2 * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
4 * SPDX-License-Identifier: BSD-3-Clause
6 * Copyright (c) 2005, WHEEL Sp. z o.o.
7 * Copyright (c) 2005 Justin T. Gibbs.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16 * substantially similar to the "NO WARRANTY" disclaimer below
17 * ("Disclaimer") and any redistribution must be conditioned upon including
18 * a substantially similar Disclaimer requirement for further binary
20 * 3. Neither the names of the above listed copyright holders nor the names
21 * of any contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
34 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 * Some Breakage and Bug Fixing added later.
38 * Copyright (c) 2006, by Matthew Jacob
41 * Support from LSI-Logic has also gone a great deal toward making this a
42 * workable subsystem and is gratefully acknowledged.
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
48 #include <dev/mpt/mpt.h>
49 #include <dev/mpt/mpt_raid.h>
51 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
52 #include "dev/mpt/mpilib/mpi_raid.h"
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_periph.h>
57 #include <cam/cam_sim.h>
58 #include <cam/cam_xpt_sim.h>
60 #include <sys/callout.h>
61 #include <sys/kthread.h>
62 #include <sys/sysctl.h>
64 #include <machine/stdarg.h>
66 struct mpt_raid_action_result
69 MPI_RAID_VOL_INDICATOR indicator_struct;
70 uint32_t new_settings;
71 uint8_t phys_disk_num;
73 uint16_t action_status;
76 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
77 (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
79 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
81 static mpt_probe_handler_t mpt_raid_probe;
82 static mpt_attach_handler_t mpt_raid_attach;
83 static mpt_enable_handler_t mpt_raid_enable;
84 static mpt_event_handler_t mpt_raid_event;
85 static mpt_shutdown_handler_t mpt_raid_shutdown;
86 static mpt_reset_handler_t mpt_raid_ioc_reset;
87 static mpt_detach_handler_t mpt_raid_detach;
89 static struct mpt_personality mpt_raid_personality =
92 .probe = mpt_raid_probe,
93 .attach = mpt_raid_attach,
94 .enable = mpt_raid_enable,
95 .event = mpt_raid_event,
96 .reset = mpt_raid_ioc_reset,
97 .shutdown = mpt_raid_shutdown,
98 .detach = mpt_raid_detach,
101 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
102 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
104 static mpt_reply_handler_t mpt_raid_reply_handler;
105 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
106 MSG_DEFAULT_REPLY *reply_frame);
107 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
108 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
109 static void mpt_raid_thread(void *arg);
110 static callout_func_t mpt_raid_timer;
112 static void mpt_enable_vol(struct mpt_softc *mpt,
113 struct mpt_raid_volume *mpt_vol, int enable);
115 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
116 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
118 static void mpt_raid_sysctl_attach(struct mpt_softc *);
120 static const char *mpt_vol_type(struct mpt_raid_volume *vol);
121 static const char *mpt_vol_state(struct mpt_raid_volume *vol);
122 static const char *mpt_disk_state(struct mpt_raid_disk *disk);
123 static void mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
124 const char *fmt, ...);
125 static void mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
126 const char *fmt, ...);
128 static int mpt_issue_raid_req(struct mpt_softc *mpt,
129 struct mpt_raid_volume *vol, struct mpt_raid_disk *disk, request_t *req,
130 u_int Action, uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
131 int write, int wait);
133 static int mpt_refresh_raid_data(struct mpt_softc *mpt);
134 static void mpt_schedule_raid_refresh(struct mpt_softc *mpt);
136 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
139 mpt_vol_type(struct mpt_raid_volume *vol)
141 switch (vol->config_page->VolumeType) {
142 case MPI_RAID_VOL_TYPE_IS:
144 case MPI_RAID_VOL_TYPE_IME:
146 case MPI_RAID_VOL_TYPE_IM:
154 mpt_vol_state(struct mpt_raid_volume *vol)
156 switch (vol->config_page->VolumeStatus.State) {
157 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
159 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
161 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
169 mpt_disk_state(struct mpt_raid_disk *disk)
171 switch (disk->config_page.PhysDiskStatus.State) {
172 case MPI_PHYSDISK0_STATUS_ONLINE:
174 case MPI_PHYSDISK0_STATUS_MISSING:
176 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
177 return ("Incompatible");
178 case MPI_PHYSDISK0_STATUS_FAILED:
180 case MPI_PHYSDISK0_STATUS_INITIALIZING:
181 return ("Initializing");
182 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
183 return ("Offline Requested");
184 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
185 return ("Failed per Host Request");
186 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
194 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
195 const char *fmt, ...)
199 printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
200 (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
201 vol->config_page->VolumeBus, vol->config_page->VolumeID);
208 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
209 const char *fmt, ...)
213 if (disk->volume != NULL) {
214 printf("(%s:vol%d:%d): ",
215 device_get_nameunit(mpt->dev),
216 disk->volume->config_page->VolumeID,
217 disk->member_number);
219 printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
220 disk->config_page.PhysDiskBus,
221 disk->config_page.PhysDiskID);
229 mpt_raid_async(void *callback_arg, u_int32_t code,
230 struct cam_path *path, void *arg)
232 struct mpt_softc *mpt;
234 mpt = (struct mpt_softc*)callback_arg;
236 case AC_FOUND_DEVICE:
238 struct ccb_getdev *cgd;
239 struct mpt_raid_volume *mpt_vol;
241 cgd = (struct ccb_getdev *)arg;
246 mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
247 cgd->ccb_h.target_id);
249 RAID_VOL_FOREACH(mpt, mpt_vol) {
250 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
253 if (mpt_vol->config_page->VolumeID
254 == cgd->ccb_h.target_id) {
255 mpt_adjust_queue_depth(mpt, mpt_vol, path);
266 mpt_raid_probe(struct mpt_softc *mpt)
269 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
276 mpt_raid_attach(struct mpt_softc *mpt)
278 struct ccb_setasync csa;
279 mpt_handler_t handler;
282 mpt_callout_init(mpt, &mpt->raid_timer);
284 error = mpt_spawn_raid_thread(mpt);
286 mpt_prt(mpt, "Unable to spawn RAID thread!\n");
291 handler.reply_handler = mpt_raid_reply_handler;
292 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
295 mpt_prt(mpt, "Unable to register RAID haandler!\n");
299 xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
300 csa.ccb_h.func_code = XPT_SASYNC_CB;
301 csa.event_enable = AC_FOUND_DEVICE;
302 csa.callback = mpt_raid_async;
303 csa.callback_arg = mpt;
304 xpt_action((union ccb *)&csa);
305 if (csa.ccb_h.status != CAM_REQ_CMP) {
306 mpt_prt(mpt, "mpt_raid_attach: Unable to register "
307 "CAM async handler.\n");
311 mpt_raid_sysctl_attach(mpt);
315 mpt_raid_detach(mpt);
320 mpt_raid_enable(struct mpt_softc *mpt)
327 mpt_raid_detach(struct mpt_softc *mpt)
329 struct ccb_setasync csa;
330 mpt_handler_t handler;
332 mpt_callout_drain(mpt, &mpt->raid_timer);
335 mpt_terminate_raid_thread(mpt);
336 handler.reply_handler = mpt_raid_reply_handler;
337 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
339 xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
340 csa.ccb_h.func_code = XPT_SASYNC_CB;
341 csa.event_enable = 0;
342 csa.callback = mpt_raid_async;
343 csa.callback_arg = mpt;
344 xpt_action((union ccb *)&csa);
349 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
352 /* Nothing to do yet. */
355 static const char *raid_event_txt[] =
359 "Volume Settings Changed",
360 "Volume Status Changed",
361 "Volume Physical Disk Membership Changed",
362 "Physical Disk Created",
363 "Physical Disk Deleted",
364 "Physical Disk Settings Changed",
365 "Physical Disk Status Changed",
366 "Domain Validation Required",
367 "SMART Data Received",
368 "Replace Action Started",
372 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
373 MSG_EVENT_NOTIFY_REPLY *msg)
375 EVENT_DATA_RAID *raid_event;
376 struct mpt_raid_volume *mpt_vol;
377 struct mpt_raid_disk *mpt_disk;
378 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
382 if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
386 raid_event = (EVENT_DATA_RAID *)&msg->Data;
390 if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
391 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
392 mpt_vol = &mpt->raid_volumes[i];
393 vol_pg = mpt_vol->config_page;
395 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
398 if (vol_pg->VolumeID == raid_event->VolumeID
399 && vol_pg->VolumeBus == raid_event->VolumeBus)
402 if (i >= mpt->ioc_page2->MaxVolumes) {
409 if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
410 mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
411 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
417 switch(raid_event->ReasonCode) {
418 case MPI_EVENT_RAID_RC_VOLUME_CREATED:
419 case MPI_EVENT_RAID_RC_VOLUME_DELETED:
421 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
422 if (mpt_vol != NULL) {
423 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
424 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
427 * Coalesce status messages into one
428 * per background run of our RAID thread.
429 * This removes "spurious" status messages
436 case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
437 case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
439 if (mpt_vol != NULL) {
440 mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
443 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
444 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
447 case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
448 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
450 if (mpt_disk != NULL) {
451 mpt_disk->flags &= ~MPT_RDF_UP2DATE;
454 case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
457 case MPI_EVENT_RAID_RC_SMART_DATA:
458 case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
463 if (mpt_disk != NULL) {
464 mpt_disk_prt(mpt, mpt_disk, "");
465 } else if (mpt_vol != NULL) {
466 mpt_vol_prt(mpt, mpt_vol, "");
468 mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
469 raid_event->VolumeID);
471 if (raid_event->PhysDiskNum != 0xFF)
472 mpt_prtc(mpt, ":%d): ",
473 raid_event->PhysDiskNum);
475 mpt_prtc(mpt, "): ");
478 if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
479 mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
480 raid_event->ReasonCode);
482 mpt_prtc(mpt, "%s\n",
483 raid_event_txt[raid_event->ReasonCode]);
486 if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
487 /* XXX Use CAM's print sense for this... */
488 if (mpt_disk != NULL)
489 mpt_disk_prt(mpt, mpt_disk, "");
491 mpt_prt(mpt, "Volume(%d:%d:%d: ",
492 raid_event->VolumeBus, raid_event->VolumeID,
493 raid_event->PhysDiskNum);
494 mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
495 raid_event->ASC, raid_event->ASCQ);
498 mpt_raid_wakeup(mpt);
503 mpt_raid_shutdown(struct mpt_softc *mpt)
505 struct mpt_raid_volume *mpt_vol;
507 if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
511 mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
512 RAID_VOL_FOREACH(mpt, mpt_vol) {
513 mpt_verify_mwce(mpt, mpt_vol);
518 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
519 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
527 if (reply_frame != NULL)
528 free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
530 else if (req->ccb != NULL) {
531 /* Complete Quiesce CCB with error... */
535 req->state &= ~REQ_STATE_QUEUED;
536 req->state |= REQ_STATE_DONE;
537 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
539 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
541 } else if (free_req) {
542 mpt_free_request(mpt, req);
549 * Parse additional completion information in the reply
550 * frame for RAID I/O requests.
553 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
554 MSG_DEFAULT_REPLY *reply_frame)
556 MSG_RAID_ACTION_REPLY *reply;
557 struct mpt_raid_action_result *action_result;
558 MSG_RAID_ACTION_REQUEST *rap;
560 reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
561 req->IOCStatus = le16toh(reply->IOCStatus);
562 rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
564 switch (rap->Action) {
565 case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
566 mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
568 case MPI_RAID_ACTION_ENABLE_PHYS_IO:
569 mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
574 action_result = REQ_TO_RAID_ACTION_RESULT(req);
575 memcpy(&action_result->action_data, &reply->ActionData,
576 sizeof(action_result->action_data));
577 action_result->action_status = le16toh(reply->ActionStatus);
582 * Utiltity routine to perform a RAID action command;
585 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
586 struct mpt_raid_disk *disk, request_t *req, u_int Action,
587 uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
590 MSG_RAID_ACTION_REQUEST *rap;
594 memset(rap, 0, sizeof *rap);
595 rap->Action = Action;
596 rap->ActionDataWord = htole32(ActionDataWord);
597 rap->Function = MPI_FUNCTION_RAID_ACTION;
598 rap->VolumeID = vol->config_page->VolumeID;
599 rap->VolumeBus = vol->config_page->VolumeBus;
601 rap->PhysDiskNum = disk->config_page.PhysDiskNum;
603 rap->PhysDiskNum = 0xFF;
604 se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
605 se->Address = htole32(addr);
606 MPI_pSGE_SET_LENGTH(se, len);
607 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
608 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
609 MPI_SGE_FLAGS_END_OF_LIST |
610 (write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
611 se->FlagsLength = htole32(se->FlagsLength);
612 rap->MsgContext = htole32(req->index | raid_handler_id);
614 mpt_check_doorbell(mpt);
615 mpt_send_cmd(mpt, req);
618 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
619 /*sleep_ok*/FALSE, /*time_ms*/2000));
625 /*************************** RAID Status Monitoring ***************************/
627 mpt_spawn_raid_thread(struct mpt_softc *mpt)
632 * Freeze out any CAM transactions until our thread
633 * is able to run at least once. We need to update
634 * our RAID pages before acception I/O or we may
635 * reject I/O to an ID we later determine is for a
639 xpt_freeze_simq(mpt->phydisk_sim, 1);
641 error = kproc_create(mpt_raid_thread, mpt,
642 &mpt->raid_thread, /*flags*/0, /*altstack*/0,
643 "mpt_raid%d", mpt->unit);
646 xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
653 mpt_terminate_raid_thread(struct mpt_softc *mpt)
656 if (mpt->raid_thread == NULL) {
659 mpt->shutdwn_raid = 1;
660 wakeup(&mpt->raid_volumes);
662 * Sleep on a slightly different location
663 * for this interlock just for added safety.
665 mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
669 mpt_raid_thread(void *arg)
671 struct mpt_softc *mpt;
674 mpt = (struct mpt_softc *)arg;
677 while (mpt->shutdwn_raid == 0) {
678 if (mpt->raid_wakeup == 0) {
679 mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
683 mpt->raid_wakeup = 0;
685 if (mpt_refresh_raid_data(mpt)) {
686 mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */
691 * Now that we have our first snapshot of RAID data,
692 * allow CAM to access our physical disk bus.
696 xpt_release_simq(mpt->phydisk_sim, TRUE);
699 if (mpt->raid_rescan != 0) {
703 mpt->raid_rescan = 0;
706 ccb = xpt_alloc_ccb();
709 error = xpt_create_path(&ccb->ccb_h.path, NULL,
710 cam_sim_path(mpt->phydisk_sim),
711 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
712 if (error != CAM_REQ_CMP) {
714 mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
720 mpt->raid_thread = NULL;
721 wakeup(&mpt->raid_thread);
728 mpt_raid_quiesce_timeout(void *arg)
731 /* Complete the CCB with error */
735 static timeout_t mpt_raid_quiesce_timeout;
737 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
743 if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
744 return (CAM_REQ_CMP);
746 if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
749 mpt_disk->flags |= MPT_RDF_QUIESCING;
750 xpt_freeze_devq(ccb->ccb_h.path, 1);
752 rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
753 MPI_RAID_ACTION_QUIESCE_PHYS_IO,
754 /*ActionData*/0, /*addr*/0,
755 /*len*/0, /*write*/FALSE,
758 return (CAM_REQ_CMP_ERR);
760 mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
762 if (rv == ETIMEDOUT) {
763 mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
764 "Quiece Timed-out\n");
765 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
766 return (CAM_REQ_CMP_ERR);
769 ar = REQ_TO_RAID_ACTION_RESULT(req);
771 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
772 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
773 mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
774 "%d:%x:%x\n", rv, req->IOCStatus,
776 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
777 return (CAM_REQ_CMP_ERR);
780 return (CAM_REQ_INPROG);
782 return (CAM_REQUEUE_REQ);
786 /* XXX Ignores that there may be multiple buses/IOCs involved. */
788 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, target_id_t *tgt)
790 struct mpt_raid_disk *mpt_disk;
792 mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
793 if (ccb->ccb_h.target_id < mpt->raid_max_disks
794 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
795 *tgt = mpt_disk->config_page.PhysDiskID;
798 mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
799 ccb->ccb_h.target_id);
803 /* XXX Ignores that there may be multiple buses/IOCs involved. */
805 mpt_is_raid_member(struct mpt_softc *mpt, target_id_t tgt)
807 struct mpt_raid_disk *mpt_disk;
810 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0)
812 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
813 mpt_disk = &mpt->raid_disks[i];
814 if ((mpt_disk->flags & MPT_RDF_ACTIVE) != 0 &&
815 mpt_disk->config_page.PhysDiskID == tgt)
822 /* XXX Ignores that there may be multiple buses/IOCs involved. */
824 mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt)
826 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
827 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
829 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
832 ioc_vol = mpt->ioc_page2->RaidVolume;
833 ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
834 for (;ioc_vol != ioc_last_vol; ioc_vol++) {
835 if (ioc_vol->VolumeID == tgt) {
844 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
848 struct mpt_raid_action_result *ar;
849 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
853 vol_pg = mpt_vol->config_page;
854 enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
857 * If the setting matches the configuration,
858 * there is nothing to do.
860 if ((enabled && enable)
861 || (!enabled && !enable))
864 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
866 mpt_vol_prt(mpt, mpt_vol,
867 "mpt_enable_vol: Get request failed!\n");
871 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
872 enable ? MPI_RAID_ACTION_ENABLE_VOLUME
873 : MPI_RAID_ACTION_DISABLE_VOLUME,
874 /*data*/0, /*addr*/0, /*len*/0,
875 /*write*/FALSE, /*wait*/TRUE);
876 if (rv == ETIMEDOUT) {
877 mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
878 "%s Volume Timed-out\n",
879 enable ? "Enable" : "Disable");
882 ar = REQ_TO_RAID_ACTION_RESULT(req);
884 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
885 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
886 mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
887 enable ? "Enable" : "Disable",
888 rv, req->IOCStatus, ar->action_status);
891 mpt_free_request(mpt, req);
896 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
899 struct mpt_raid_action_result *ar;
900 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
906 vol_pg = mpt_vol->config_page;
907 resyncing = vol_pg->VolumeStatus.Flags
908 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
909 mwce = vol_pg->VolumeSettings.Settings
910 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
913 * If the setting matches the configuration,
914 * there is nothing to do.
916 switch (mpt->raid_mwce_setting) {
917 case MPT_RAID_MWCE_REBUILD_ONLY:
918 if ((resyncing && mwce) || (!resyncing && !mwce)) {
921 mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
922 if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
924 * Wait one more status update to see if
925 * resyncing gets enabled. It gets disabled
926 * temporarilly when WCE is changed.
931 case MPT_RAID_MWCE_ON:
935 case MPT_RAID_MWCE_OFF:
939 case MPT_RAID_MWCE_NC:
943 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
945 mpt_vol_prt(mpt, mpt_vol,
946 "mpt_verify_mwce: Get request failed!\n");
950 vol_pg->VolumeSettings.Settings ^=
951 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
952 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
953 vol_pg->VolumeSettings.Settings ^=
954 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
955 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
956 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
957 data, /*addr*/0, /*len*/0,
958 /*write*/FALSE, /*wait*/TRUE);
959 if (rv == ETIMEDOUT) {
960 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
961 "Write Cache Enable Timed-out\n");
964 ar = REQ_TO_RAID_ACTION_RESULT(req);
966 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
967 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
968 mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
969 "%d:%x:%x\n", rv, req->IOCStatus,
972 vol_pg->VolumeSettings.Settings ^=
973 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
975 mpt_free_request(mpt, req);
979 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
982 struct mpt_raid_action_result *ar;
983 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
987 vol_pg = mpt_vol->config_page;
989 if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
993 * If the current RAID resync rate does not
994 * match our configured rate, update it.
996 prio = vol_pg->VolumeSettings.Settings
997 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
998 if (vol_pg->ResyncRate != 0
999 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
1000 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1002 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1003 "Get request failed!\n");
1007 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1008 MPI_RAID_ACTION_SET_RESYNC_RATE,
1009 mpt->raid_resync_rate, /*addr*/0,
1010 /*len*/0, /*write*/FALSE, /*wait*/TRUE);
1011 if (rv == ETIMEDOUT) {
1012 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1013 "Resync Rate Setting Timed-out\n");
1017 ar = REQ_TO_RAID_ACTION_RESULT(req);
1019 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1020 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1021 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1022 "%d:%x:%x\n", rv, req->IOCStatus,
1025 vol_pg->ResyncRate = mpt->raid_resync_rate;
1026 mpt_free_request(mpt, req);
1027 } else if ((prio && mpt->raid_resync_rate < 128)
1028 || (!prio && mpt->raid_resync_rate >= 128)) {
1031 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1033 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1034 "Get request failed!\n");
1038 vol_pg->VolumeSettings.Settings ^=
1039 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1040 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1041 vol_pg->VolumeSettings.Settings ^=
1042 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1043 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1044 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1045 data, /*addr*/0, /*len*/0,
1046 /*write*/FALSE, /*wait*/TRUE);
1047 if (rv == ETIMEDOUT) {
1048 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1049 "Resync Rate Setting Timed-out\n");
1052 ar = REQ_TO_RAID_ACTION_RESULT(req);
1054 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1055 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1056 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1057 "%d:%x:%x\n", rv, req->IOCStatus,
1060 vol_pg->VolumeSettings.Settings ^=
1061 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1064 mpt_free_request(mpt, req);
1069 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1070 struct cam_path *path)
1072 struct ccb_relsim crs;
1074 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1075 crs.ccb_h.func_code = XPT_REL_SIMQ;
1076 crs.ccb_h.flags = CAM_DEV_QFREEZE;
1077 crs.release_flags = RELSIM_ADJUST_OPENINGS;
1078 crs.openings = mpt->raid_queue_depth;
1079 xpt_action((union ccb *)&crs);
1080 if (crs.ccb_h.status != CAM_REQ_CMP)
1081 mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1082 "with CAM status %#x\n", crs.ccb_h.status);
1086 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1088 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1091 vol_pg = mpt_vol->config_page;
1092 mpt_vol_prt(mpt, mpt_vol, "Settings (");
1093 for (i = 1; i <= 0x8000; i <<= 1) {
1094 switch (vol_pg->VolumeSettings.Settings & i) {
1095 case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1096 mpt_prtc(mpt, " Member-WCE");
1098 case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1099 mpt_prtc(mpt, " Offline-On-SMART-Err");
1101 case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1102 mpt_prtc(mpt, " Hot-Plug-Spares");
1104 case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1105 mpt_prtc(mpt, " High-Priority-ReSync");
1111 mpt_prtc(mpt, " )\n");
1112 if (vol_pg->VolumeSettings.HotSparePool != 0) {
1113 mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1114 powerof2(vol_pg->VolumeSettings.HotSparePool)
1116 for (i = 0; i < 8; i++) {
1120 if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1122 mpt_prtc(mpt, " %d", i);
1124 mpt_prtc(mpt, "\n");
1126 mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1127 for (i = 0; i < vol_pg->NumPhysDisks; i++){
1128 struct mpt_raid_disk *mpt_disk;
1129 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1130 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1133 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1134 disk_pg = &mpt_disk->config_page;
1136 mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1137 pt_bus, disk_pg->PhysDiskID);
1138 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1139 mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1140 "Primary" : "Secondary");
1142 mpt_prtc(mpt, "Stripe Position %d",
1143 mpt_disk->member_number);
1145 f = disk_pg->PhysDiskStatus.Flags;
1146 s = disk_pg->PhysDiskStatus.State;
1147 if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1148 mpt_prtc(mpt, " Out of Sync");
1150 if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1151 mpt_prtc(mpt, " Quiesced");
1153 if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1154 mpt_prtc(mpt, " Inactive");
1156 if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1157 mpt_prtc(mpt, " Was Optimal");
1159 if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1160 mpt_prtc(mpt, " Was Non-Optimal");
1163 case MPI_PHYSDISK0_STATUS_ONLINE:
1164 mpt_prtc(mpt, " Online");
1166 case MPI_PHYSDISK0_STATUS_MISSING:
1167 mpt_prtc(mpt, " Missing");
1169 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1170 mpt_prtc(mpt, " Incompatible");
1172 case MPI_PHYSDISK0_STATUS_FAILED:
1173 mpt_prtc(mpt, " Failed");
1175 case MPI_PHYSDISK0_STATUS_INITIALIZING:
1176 mpt_prtc(mpt, " Initializing");
1178 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1179 mpt_prtc(mpt, " Requested Offline");
1181 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1182 mpt_prtc(mpt, " Requested Failed");
1184 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1186 mpt_prtc(mpt, " Offline Other (%x)", s);
1189 mpt_prtc(mpt, "\n");
1194 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1196 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1197 int rd_bus = cam_sim_bus(mpt->sim);
1198 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1201 disk_pg = &mpt_disk->config_page;
1202 mpt_disk_prt(mpt, mpt_disk,
1203 "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1204 device_get_nameunit(mpt->dev), rd_bus,
1205 disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1206 pt_bus, mpt_disk - mpt->raid_disks);
1207 if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1209 mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1210 powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1212 for (i = 0; i < 8; i++) {
1216 if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1218 mpt_prtc(mpt, " %d", i);
1220 mpt_prtc(mpt, "\n");
1224 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1225 IOC_3_PHYS_DISK *ioc_disk)
1229 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1230 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1231 &mpt_disk->config_page.Header,
1232 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1234 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1235 "Failed to read RAID Disk Hdr(%d)\n",
1236 ioc_disk->PhysDiskNum);
1239 rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1240 &mpt_disk->config_page.Header,
1241 sizeof(mpt_disk->config_page),
1242 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1244 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1245 "Failed to read RAID Disk Page(%d)\n",
1246 ioc_disk->PhysDiskNum);
1247 mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1251 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1252 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1254 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1255 struct mpt_raid_action_result *ar;
1260 vol_pg = mpt_vol->config_page;
1261 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1263 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1264 ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1266 mpt_vol_prt(mpt, mpt_vol,
1267 "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1268 ioc_vol->VolumePageNumber);
1272 rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1273 &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1275 mpt_vol_prt(mpt, mpt_vol,
1276 "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1277 ioc_vol->VolumePageNumber);
1280 mpt2host_config_page_raid_vol_0(vol_pg);
1282 mpt_vol->flags |= MPT_RVF_ACTIVE;
1284 /* Update disk entry array data. */
1285 for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1286 struct mpt_raid_disk *mpt_disk;
1287 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1288 mpt_disk->volume = mpt_vol;
1289 mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1290 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1291 mpt_disk->member_number--;
1295 if ((vol_pg->VolumeStatus.Flags
1296 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1299 req = mpt_get_request(mpt, TRUE);
1301 mpt_vol_prt(mpt, mpt_vol,
1302 "mpt_refresh_raid_vol: Get request failed!\n");
1305 rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1306 MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1307 if (rv == ETIMEDOUT) {
1308 mpt_vol_prt(mpt, mpt_vol,
1309 "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1310 mpt_free_request(mpt, req);
1314 ar = REQ_TO_RAID_ACTION_RESULT(req);
1316 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1317 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1318 memcpy(&mpt_vol->sync_progress,
1319 &ar->action_data.indicator_struct,
1320 sizeof(mpt_vol->sync_progress));
1321 mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1323 mpt_vol_prt(mpt, mpt_vol,
1324 "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1326 mpt_free_request(mpt, req);
1330 * Update in-core information about RAID support. We update any entries
1331 * that didn't previously exists or have been marked as needing to
1332 * be updated by our event handler. Interesting changes are displayed
1336 mpt_refresh_raid_data(struct mpt_softc *mpt)
1338 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1339 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1340 IOC_3_PHYS_DISK *ioc_disk;
1341 IOC_3_PHYS_DISK *ioc_last_disk;
1342 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1346 u_int nonopt_volumes;
1348 if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1353 * Mark all items as unreferenced by the configuration.
1354 * This allows us to find, report, and discard stale
1357 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1358 mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1360 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1361 mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1365 * Get Physical Disk information.
1367 len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1368 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1369 &mpt->ioc_page3->Header, len,
1370 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1373 "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1376 mpt2host_config_page_ioc3(mpt->ioc_page3);
1378 ioc_disk = mpt->ioc_page3->PhysDisk;
1379 ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1380 for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1381 struct mpt_raid_disk *mpt_disk;
1383 mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1384 mpt_disk->flags |= MPT_RDF_REFERENCED;
1385 if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1386 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1387 mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1389 mpt_disk->flags |= MPT_RDF_ACTIVE;
1394 * Refresh volume data.
1396 len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1397 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1398 &mpt->ioc_page2->Header, len,
1399 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1401 mpt_prt(mpt, "mpt_refresh_raid_data: "
1402 "Failed to read IOC Page 2\n");
1405 mpt2host_config_page_ioc2(mpt->ioc_page2);
1407 ioc_vol = mpt->ioc_page2->RaidVolume;
1408 ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1409 for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1410 struct mpt_raid_volume *mpt_vol;
1412 mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1413 mpt_vol->flags |= MPT_RVF_REFERENCED;
1414 vol_pg = mpt_vol->config_page;
1417 if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1418 != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1419 || (vol_pg->VolumeStatus.Flags
1420 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1421 mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1423 mpt_vol->flags |= MPT_RVF_ACTIVE;
1427 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1428 struct mpt_raid_volume *mpt_vol;
1434 mpt_vol = &mpt->raid_volumes[i];
1436 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1440 vol_pg = mpt_vol->config_page;
1441 if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1442 == MPT_RVF_ANNOUNCED) {
1443 mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1448 if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1449 mpt_announce_vol(mpt, mpt_vol);
1450 mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1453 if (vol_pg->VolumeStatus.State !=
1454 MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1457 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1460 mpt_vol->flags |= MPT_RVF_UP2DATE;
1461 mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1462 mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1463 mpt_verify_mwce(mpt, mpt_vol);
1465 if (vol_pg->VolumeStatus.Flags == 0) {
1469 mpt_vol_prt(mpt, mpt_vol, "Status (");
1470 for (m = 1; m <= 0x80; m <<= 1) {
1471 switch (vol_pg->VolumeStatus.Flags & m) {
1472 case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1473 mpt_prtc(mpt, " Enabled");
1475 case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1476 mpt_prtc(mpt, " Quiesced");
1478 case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1479 mpt_prtc(mpt, " Re-Syncing");
1481 case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1482 mpt_prtc(mpt, " Inactive");
1488 mpt_prtc(mpt, " )\n");
1490 if ((vol_pg->VolumeStatus.Flags
1491 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1494 mpt_verify_resync_rate(mpt, mpt_vol);
1496 left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1497 total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1498 if (vol_pg->ResyncRate != 0) {
1499 prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1500 mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1501 prio / 1000, prio % 1000);
1503 prio = vol_pg->VolumeSettings.Settings
1504 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1505 mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1506 prio ? "High" : "Low");
1508 mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1509 "blocks remaining\n", (uintmax_t)left,
1512 /* Periodically report on sync progress. */
1513 mpt_schedule_raid_refresh(mpt);
1516 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1517 struct mpt_raid_disk *mpt_disk;
1518 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1521 mpt_disk = &mpt->raid_disks[i];
1522 disk_pg = &mpt_disk->config_page;
1524 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1527 if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1528 == MPT_RDF_ANNOUNCED) {
1529 mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1530 mpt_disk->flags = 0;
1535 if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1536 mpt_announce_disk(mpt, mpt_disk);
1537 mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1540 if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1543 mpt_disk->flags |= MPT_RDF_UP2DATE;
1544 mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1545 if (disk_pg->PhysDiskStatus.Flags == 0)
1548 mpt_disk_prt(mpt, mpt_disk, "Status (");
1549 for (m = 1; m <= 0x80; m <<= 1) {
1550 switch (disk_pg->PhysDiskStatus.Flags & m) {
1551 case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1552 mpt_prtc(mpt, " Out-Of-Sync");
1554 case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1555 mpt_prtc(mpt, " Quiesced");
1561 mpt_prtc(mpt, " )\n");
1564 mpt->raid_nonopt_volumes = nonopt_volumes;
1569 mpt_raid_timer(void *arg)
1571 struct mpt_softc *mpt;
1573 mpt = (struct mpt_softc *)arg;
1574 MPT_LOCK_ASSERT(mpt);
1575 mpt_raid_wakeup(mpt);
1579 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1582 callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1583 mpt_raid_timer, mpt);
1587 mpt_raid_free_mem(struct mpt_softc *mpt)
1590 if (mpt->raid_volumes) {
1591 struct mpt_raid_volume *mpt_raid;
1593 for (i = 0; i < mpt->raid_max_volumes; i++) {
1594 mpt_raid = &mpt->raid_volumes[i];
1595 if (mpt_raid->config_page) {
1596 free(mpt_raid->config_page, M_DEVBUF);
1597 mpt_raid->config_page = NULL;
1600 free(mpt->raid_volumes, M_DEVBUF);
1601 mpt->raid_volumes = NULL;
1603 if (mpt->raid_disks) {
1604 free(mpt->raid_disks, M_DEVBUF);
1605 mpt->raid_disks = NULL;
1607 if (mpt->ioc_page2) {
1608 free(mpt->ioc_page2, M_DEVBUF);
1609 mpt->ioc_page2 = NULL;
1611 if (mpt->ioc_page3) {
1612 free(mpt->ioc_page3, M_DEVBUF);
1613 mpt->ioc_page3 = NULL;
1615 mpt->raid_max_volumes = 0;
1616 mpt->raid_max_disks = 0;
1620 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1622 struct mpt_raid_volume *mpt_vol;
1624 if ((rate > MPT_RAID_RESYNC_RATE_MAX
1625 || rate < MPT_RAID_RESYNC_RATE_MIN)
1626 && rate != MPT_RAID_RESYNC_RATE_NC)
1630 mpt->raid_resync_rate = rate;
1631 RAID_VOL_FOREACH(mpt, mpt_vol) {
1632 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1635 mpt_verify_resync_rate(mpt, mpt_vol);
1642 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1644 struct mpt_raid_volume *mpt_vol;
1646 if (vol_queue_depth > 255 || vol_queue_depth < 1)
1650 mpt->raid_queue_depth = vol_queue_depth;
1651 RAID_VOL_FOREACH(mpt, mpt_vol) {
1652 struct cam_path *path;
1655 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1658 mpt->raid_rescan = 0;
1660 error = xpt_create_path(&path, NULL,
1661 cam_sim_path(mpt->sim),
1662 mpt_vol->config_page->VolumeID,
1664 if (error != CAM_REQ_CMP) {
1665 mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1668 mpt_adjust_queue_depth(mpt, mpt_vol, path);
1669 xpt_free_path(path);
1676 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1678 struct mpt_raid_volume *mpt_vol;
1679 int force_full_resync;
1682 if (mwce == mpt->raid_mwce_setting) {
1688 * Catch MWCE being left on due to a failed shutdown. Since
1689 * sysctls cannot be set by the loader, we treat the first
1690 * setting of this varible specially and force a full volume
1691 * resync if MWCE is enabled and a resync is in progress.
1693 force_full_resync = 0;
1694 if (mpt->raid_mwce_set == 0
1695 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1696 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1697 force_full_resync = 1;
1699 mpt->raid_mwce_setting = mwce;
1700 RAID_VOL_FOREACH(mpt, mpt_vol) {
1701 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1705 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1708 vol_pg = mpt_vol->config_page;
1709 resyncing = vol_pg->VolumeStatus.Flags
1710 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1711 mwce = vol_pg->VolumeSettings.Settings
1712 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1713 if (force_full_resync && resyncing && mwce) {
1715 * XXX disable/enable volume should force a resync,
1716 * but we'll need to queice, drain, and restart
1719 mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1720 "detected. Suggest full resync.\n");
1722 mpt_verify_mwce(mpt, mpt_vol);
1724 mpt->raid_mwce_set = 1;
1729 static const char *mpt_vol_mwce_strs[] =
1733 "On-During-Rebuild",
1738 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1741 struct mpt_softc *mpt;
1749 mpt = (struct mpt_softc *)arg1;
1750 str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1751 error = SYSCTL_OUT(req, str, strlen(str) + 1);
1752 if (error || !req->newptr) {
1756 size = req->newlen - req->newidx;
1757 if (size >= sizeof(inbuf)) {
1761 error = SYSCTL_IN(req, inbuf, size);
1766 for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1767 if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1768 return (mpt_raid_set_vol_mwce(mpt, i));
1775 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1777 struct mpt_softc *mpt;
1778 u_int raid_resync_rate;
1783 mpt = (struct mpt_softc *)arg1;
1784 raid_resync_rate = mpt->raid_resync_rate;
1786 error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1787 if (error || !req->newptr) {
1791 return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1795 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1797 struct mpt_softc *mpt;
1798 u_int raid_queue_depth;
1803 mpt = (struct mpt_softc *)arg1;
1804 raid_queue_depth = mpt->raid_queue_depth;
1806 error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1807 if (error || !req->newptr) {
1811 return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1815 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1817 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1818 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1820 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1821 "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1822 mpt, 0, mpt_raid_sysctl_vol_member_wce, "A",
1823 "volume member WCE(On,Off,On-During-Rebuild,NC)");
1825 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1826 "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1827 mpt, 0, mpt_raid_sysctl_vol_queue_depth, "I",
1828 "default volume queue depth");
1830 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1831 "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
1832 mpt, 0, mpt_raid_sysctl_vol_resync_rate, "I",
1833 "volume resync priority (0 == NC, 1 - 255)");
1834 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1835 "nonoptimal_volumes", CTLFLAG_RD,
1836 &mpt->raid_nonopt_volumes, 0,
1837 "number of nonoptimal volumes");