2 * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
4 * Copyright (c) 2005, WHEEL Sp. z o.o.
5 * Copyright (c) 2005 Justin T. Gibbs.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon including
16 * a substantially similar Disclaimer requirement for further binary
18 * 3. Neither the names of the above listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Some Breakage and Bug Fixing added later.
36 * Copyright (c) 2006, by Matthew Jacob
39 * Support from LSI-Logic has also gone a great deal toward making this a
40 * workable subsystem and is gratefully acknowledged.
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
46 #include <dev/mpt/mpt.h>
47 #include <dev/mpt/mpt_raid.h>
49 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
50 #include "dev/mpt/mpilib/mpi_raid.h"
53 #include <cam/cam_ccb.h>
54 #include <cam/cam_sim.h>
55 #include <cam/cam_xpt_periph.h>
56 #include <cam/cam_xpt_sim.h>
58 #if __FreeBSD_version < 500000
59 #include <sys/devicestat.h>
60 #define GIANT_REQUIRED
62 #include <cam/cam_periph.h>
64 #include <sys/callout.h>
65 #include <sys/kthread.h>
66 #include <sys/sysctl.h>
68 #include <machine/stdarg.h>
70 struct mpt_raid_action_result
73 MPI_RAID_VOL_INDICATOR indicator_struct;
74 uint32_t new_settings;
75 uint8_t phys_disk_num;
77 uint16_t action_status;
80 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
81 (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
83 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
86 static mpt_probe_handler_t mpt_raid_probe;
87 static mpt_attach_handler_t mpt_raid_attach;
88 static mpt_enable_handler_t mpt_raid_enable;
89 static mpt_event_handler_t mpt_raid_event;
90 static mpt_shutdown_handler_t mpt_raid_shutdown;
91 static mpt_reset_handler_t mpt_raid_ioc_reset;
92 static mpt_detach_handler_t mpt_raid_detach;
94 static struct mpt_personality mpt_raid_personality =
97 .probe = mpt_raid_probe,
98 .attach = mpt_raid_attach,
99 .enable = mpt_raid_enable,
100 .event = mpt_raid_event,
101 .reset = mpt_raid_ioc_reset,
102 .shutdown = mpt_raid_shutdown,
103 .detach = mpt_raid_detach,
106 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
107 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
109 static mpt_reply_handler_t mpt_raid_reply_handler;
110 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
111 MSG_DEFAULT_REPLY *reply_frame);
112 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
113 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
114 static void mpt_raid_thread(void *arg);
115 static timeout_t mpt_raid_timer;
117 static void mpt_enable_vol(struct mpt_softc *mpt,
118 struct mpt_raid_volume *mpt_vol, int enable);
120 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
121 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
123 #if __FreeBSD_version < 500000
124 #define mpt_raid_sysctl_attach(x) do { } while (0)
126 static void mpt_raid_sysctl_attach(struct mpt_softc *);
129 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
132 mpt_vol_type(struct mpt_raid_volume *vol)
134 switch (vol->config_page->VolumeType) {
135 case MPI_RAID_VOL_TYPE_IS:
137 case MPI_RAID_VOL_TYPE_IME:
139 case MPI_RAID_VOL_TYPE_IM:
147 mpt_vol_state(struct mpt_raid_volume *vol)
149 switch (vol->config_page->VolumeStatus.State) {
150 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
152 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
154 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
162 mpt_disk_state(struct mpt_raid_disk *disk)
164 switch (disk->config_page.PhysDiskStatus.State) {
165 case MPI_PHYSDISK0_STATUS_ONLINE:
167 case MPI_PHYSDISK0_STATUS_MISSING:
169 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
170 return ("Incompatible");
171 case MPI_PHYSDISK0_STATUS_FAILED:
173 case MPI_PHYSDISK0_STATUS_INITIALIZING:
174 return ("Initializing");
175 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
176 return ("Offline Requested");
177 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
178 return ("Failed per Host Request");
179 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
187 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
188 const char *fmt, ...)
192 printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
193 (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
194 vol->config_page->VolumeBus, vol->config_page->VolumeID);
201 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
202 const char *fmt, ...)
206 if (disk->volume != NULL) {
207 printf("(%s:vol%d:%d): ",
208 device_get_nameunit(mpt->dev),
209 disk->volume->config_page->VolumeID,
210 disk->member_number);
212 printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
213 disk->config_page.PhysDiskBus,
214 disk->config_page.PhysDiskID);
222 mpt_raid_async(void *callback_arg, u_int32_t code,
223 struct cam_path *path, void *arg)
225 struct mpt_softc *mpt;
227 mpt = (struct mpt_softc*)callback_arg;
229 case AC_FOUND_DEVICE:
231 struct ccb_getdev *cgd;
232 struct mpt_raid_volume *mpt_vol;
234 cgd = (struct ccb_getdev *)arg;
239 mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
240 cgd->ccb_h.target_id);
242 RAID_VOL_FOREACH(mpt, mpt_vol) {
243 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
246 if (mpt_vol->config_page->VolumeID
247 == cgd->ccb_h.target_id) {
248 mpt_adjust_queue_depth(mpt, mpt_vol, path);
259 mpt_raid_probe(struct mpt_softc *mpt)
261 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
268 mpt_raid_attach(struct mpt_softc *mpt)
270 struct ccb_setasync csa;
271 mpt_handler_t handler;
274 mpt_callout_init(&mpt->raid_timer);
276 error = mpt_spawn_raid_thread(mpt);
278 mpt_prt(mpt, "Unable to spawn RAID thread!\n");
283 handler.reply_handler = mpt_raid_reply_handler;
284 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
287 mpt_prt(mpt, "Unable to register RAID haandler!\n");
291 xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
292 csa.ccb_h.func_code = XPT_SASYNC_CB;
293 csa.event_enable = AC_FOUND_DEVICE;
294 csa.callback = mpt_raid_async;
295 csa.callback_arg = mpt;
296 xpt_action((union ccb *)&csa);
297 if (csa.ccb_h.status != CAM_REQ_CMP) {
298 mpt_prt(mpt, "mpt_raid_attach: Unable to register "
299 "CAM async handler.\n");
303 mpt_raid_sysctl_attach(mpt);
307 mpt_raid_detach(mpt);
312 mpt_raid_enable(struct mpt_softc *mpt)
318 mpt_raid_detach(struct mpt_softc *mpt)
320 struct ccb_setasync csa;
321 mpt_handler_t handler;
323 callout_stop(&mpt->raid_timer);
325 mpt_terminate_raid_thread(mpt);
327 handler.reply_handler = mpt_raid_reply_handler;
328 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
330 xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
331 csa.ccb_h.func_code = XPT_SASYNC_CB;
332 csa.event_enable = 0;
333 csa.callback = mpt_raid_async;
334 csa.callback_arg = mpt;
335 xpt_action((union ccb *)&csa);
340 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
342 /* Nothing to do yet. */
345 static const char *raid_event_txt[] =
349 "Volume Settings Changed",
350 "Volume Status Changed",
351 "Volume Physical Disk Membership Changed",
352 "Physical Disk Created",
353 "Physical Disk Deleted",
354 "Physical Disk Settings Changed",
355 "Physical Disk Status Changed",
356 "Domain Validation Required",
357 "SMART Data Received",
358 "Replace Action Started",
362 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
363 MSG_EVENT_NOTIFY_REPLY *msg)
365 EVENT_DATA_RAID *raid_event;
366 struct mpt_raid_volume *mpt_vol;
367 struct mpt_raid_disk *mpt_disk;
368 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
372 if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
376 raid_event = (EVENT_DATA_RAID *)&msg->Data;
380 if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
381 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
382 mpt_vol = &mpt->raid_volumes[i];
383 vol_pg = mpt_vol->config_page;
385 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
388 if (vol_pg->VolumeID == raid_event->VolumeID
389 && vol_pg->VolumeBus == raid_event->VolumeBus)
392 if (i >= mpt->ioc_page2->MaxVolumes) {
399 if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
400 mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
401 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
407 switch(raid_event->ReasonCode) {
408 case MPI_EVENT_RAID_RC_VOLUME_CREATED:
409 case MPI_EVENT_RAID_RC_VOLUME_DELETED:
411 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
412 if (mpt_vol != NULL) {
413 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
414 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
417 * Coalesce status messages into one
418 * per background run of our RAID thread.
419 * This removes "spurious" status messages
426 case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
427 case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
429 if (mpt_vol != NULL) {
430 mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
433 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
434 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
437 case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
438 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
440 if (mpt_disk != NULL) {
441 mpt_disk->flags &= ~MPT_RDF_UP2DATE;
444 case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
447 case MPI_EVENT_RAID_RC_SMART_DATA:
448 case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
453 if (mpt_disk != NULL) {
454 mpt_disk_prt(mpt, mpt_disk, "");
455 } else if (mpt_vol != NULL) {
456 mpt_vol_prt(mpt, mpt_vol, "");
458 mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
459 raid_event->VolumeID);
461 if (raid_event->PhysDiskNum != 0xFF)
462 mpt_prtc(mpt, ":%d): ",
463 raid_event->PhysDiskNum);
465 mpt_prtc(mpt, "): ");
468 if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
469 mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
470 raid_event->ReasonCode);
472 mpt_prtc(mpt, "%s\n",
473 raid_event_txt[raid_event->ReasonCode]);
476 if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
477 /* XXX Use CAM's print sense for this... */
478 if (mpt_disk != NULL)
479 mpt_disk_prt(mpt, mpt_disk, "");
481 mpt_prt(mpt, "Volume(%d:%d:%d: ",
482 raid_event->VolumeBus, raid_event->VolumeID,
483 raid_event->PhysDiskNum);
484 mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
485 raid_event->ASC, raid_event->ASCQ);
488 mpt_raid_wakeup(mpt);
493 mpt_raid_shutdown(struct mpt_softc *mpt)
495 struct mpt_raid_volume *mpt_vol;
497 if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
501 mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
502 RAID_VOL_FOREACH(mpt, mpt_vol) {
503 mpt_verify_mwce(mpt, mpt_vol);
508 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
509 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
517 if (reply_frame != NULL)
518 free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
520 else if (req->ccb != NULL) {
521 /* Complete Quiesce CCB with error... */
525 req->state &= ~REQ_STATE_QUEUED;
526 req->state |= REQ_STATE_DONE;
527 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
529 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
531 } else if (free_req) {
532 mpt_free_request(mpt, req);
539 * Parse additional completion information in the reply
540 * frame for RAID I/O requests.
543 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
544 MSG_DEFAULT_REPLY *reply_frame)
546 MSG_RAID_ACTION_REPLY *reply;
547 struct mpt_raid_action_result *action_result;
548 MSG_RAID_ACTION_REQUEST *rap;
550 reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
551 req->IOCStatus = le16toh(reply->IOCStatus);
552 rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
554 switch (rap->Action) {
555 case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
556 mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
558 case MPI_RAID_ACTION_ENABLE_PHYS_IO:
559 mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
564 action_result = REQ_TO_RAID_ACTION_RESULT(req);
565 memcpy(&action_result->action_data, &reply->ActionData,
566 sizeof(action_result->action_data));
567 action_result->action_status = le16toh(reply->ActionStatus);
572 * Utiltity routine to perform a RAID action command;
575 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
576 struct mpt_raid_disk *disk, request_t *req, u_int Action,
577 uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
580 MSG_RAID_ACTION_REQUEST *rap;
584 memset(rap, 0, sizeof *rap);
585 rap->Action = Action;
586 rap->ActionDataWord = htole32(ActionDataWord);
587 rap->Function = MPI_FUNCTION_RAID_ACTION;
588 rap->VolumeID = vol->config_page->VolumeID;
589 rap->VolumeBus = vol->config_page->VolumeBus;
591 rap->PhysDiskNum = disk->config_page.PhysDiskNum;
593 rap->PhysDiskNum = 0xFF;
594 se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
595 se->Address = htole32(addr);
596 MPI_pSGE_SET_LENGTH(se, len);
597 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
598 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
599 MPI_SGE_FLAGS_END_OF_LIST |
600 write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
601 se->FlagsLength = htole32(se->FlagsLength);
602 rap->MsgContext = htole32(req->index | raid_handler_id);
604 mpt_check_doorbell(mpt);
605 mpt_send_cmd(mpt, req);
608 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
609 /*sleep_ok*/FALSE, /*time_ms*/2000));
615 /*************************** RAID Status Monitoring ***************************/
617 mpt_spawn_raid_thread(struct mpt_softc *mpt)
622 * Freeze out any CAM transactions until our thread
623 * is able to run at least once. We need to update
624 * our RAID pages before acception I/O or we may
625 * reject I/O to an ID we later determine is for a
629 xpt_freeze_simq(mpt->phydisk_sim, 1);
631 error = mpt_kthread_create(mpt_raid_thread, mpt,
632 &mpt->raid_thread, /*flags*/0, /*altstack*/0,
633 "mpt_raid%d", mpt->unit);
636 xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
643 mpt_terminate_raid_thread(struct mpt_softc *mpt)
646 if (mpt->raid_thread == NULL) {
649 mpt->shutdwn_raid = 1;
650 wakeup(mpt->raid_volumes);
652 * Sleep on a slightly different location
653 * for this interlock just for added safety.
655 mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
659 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
661 xpt_free_path(ccb->ccb_h.path);
665 mpt_raid_thread(void *arg)
667 struct mpt_softc *mpt;
671 mpt = (struct mpt_softc *)arg;
673 ccb = xpt_alloc_ccb();
675 while (mpt->shutdwn_raid == 0) {
677 if (mpt->raid_wakeup == 0) {
678 mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
682 mpt->raid_wakeup = 0;
684 if (mpt_refresh_raid_data(mpt)) {
685 mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */
690 * Now that we have our first snapshot of RAID data,
691 * allow CAM to access our physical disk bus.
695 MPTLOCK_2_CAMLOCK(mpt);
696 xpt_release_simq(mpt->phydisk_sim, TRUE);
697 CAMLOCK_2_MPTLOCK(mpt);
700 if (mpt->raid_rescan != 0) {
701 struct cam_path *path;
704 mpt->raid_rescan = 0;
706 error = xpt_create_path(&path, xpt_periph,
707 cam_sim_path(mpt->phydisk_sim),
708 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
709 if (error != CAM_REQ_CMP) {
710 mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
712 xpt_setup_ccb(&ccb->ccb_h, path, 5);
713 ccb->ccb_h.func_code = XPT_SCAN_BUS;
714 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
715 ccb->crcn.flags = CAM_FLAG_NONE;
716 MPTLOCK_2_CAMLOCK(mpt);
718 CAMLOCK_2_MPTLOCK(mpt);
723 mpt->raid_thread = NULL;
724 wakeup(&mpt->raid_thread);
731 mpt_raid_quiesce_timeout(void *arg)
733 /* Complete the CCB with error */
737 static timeout_t mpt_raid_quiesce_timeout;
739 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
745 if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
746 return (CAM_REQ_CMP);
748 if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
751 mpt_disk->flags |= MPT_RDF_QUIESCING;
752 xpt_freeze_devq(ccb->ccb_h.path, 1);
754 rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
755 MPI_RAID_ACTION_QUIESCE_PHYS_IO,
756 /*ActionData*/0, /*addr*/0,
757 /*len*/0, /*write*/FALSE,
760 return (CAM_REQ_CMP_ERR);
762 mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
764 if (rv == ETIMEDOUT) {
765 mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
766 "Quiece Timed-out\n");
767 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
768 return (CAM_REQ_CMP_ERR);
771 ar = REQ_TO_RAID_ACTION_RESULT(req);
773 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
774 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
775 mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
776 "%d:%x:%x\n", rv, req->IOCStatus,
778 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
779 return (CAM_REQ_CMP_ERR);
782 return (CAM_REQ_INPROG);
784 return (CAM_REQUEUE_REQ);
788 /* XXX Ignores that there may be multiple busses/IOCs involved. */
790 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
792 struct mpt_raid_disk *mpt_disk;
794 mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
795 if (ccb->ccb_h.target_id < mpt->raid_max_disks
796 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
797 *tgt = mpt_disk->config_page.PhysDiskID;
800 mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
801 ccb->ccb_h.target_id);
805 /* XXX Ignores that there may be multiple busses/IOCs involved. */
807 mpt_is_raid_volume(struct mpt_softc *mpt, int tgt)
809 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
810 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
812 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
815 ioc_vol = mpt->ioc_page2->RaidVolume;
816 ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
817 for (;ioc_vol != ioc_last_vol; ioc_vol++) {
818 if (ioc_vol->VolumeID == tgt) {
827 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
831 struct mpt_raid_action_result *ar;
832 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
836 vol_pg = mpt_vol->config_page;
837 enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
840 * If the setting matches the configuration,
841 * there is nothing to do.
843 if ((enabled && enable)
844 || (!enabled && !enable))
847 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
849 mpt_vol_prt(mpt, mpt_vol,
850 "mpt_enable_vol: Get request failed!\n");
854 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
855 enable ? MPI_RAID_ACTION_ENABLE_VOLUME
856 : MPI_RAID_ACTION_DISABLE_VOLUME,
857 /*data*/0, /*addr*/0, /*len*/0,
858 /*write*/FALSE, /*wait*/TRUE);
859 if (rv == ETIMEDOUT) {
860 mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
861 "%s Volume Timed-out\n",
862 enable ? "Enable" : "Disable");
865 ar = REQ_TO_RAID_ACTION_RESULT(req);
867 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
868 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
869 mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
870 enable ? "Enable" : "Disable",
871 rv, req->IOCStatus, ar->action_status);
874 mpt_free_request(mpt, req);
879 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
882 struct mpt_raid_action_result *ar;
883 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
889 vol_pg = mpt_vol->config_page;
890 resyncing = vol_pg->VolumeStatus.Flags
891 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
892 mwce = vol_pg->VolumeSettings.Settings
893 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
896 * If the setting matches the configuration,
897 * there is nothing to do.
899 switch (mpt->raid_mwce_setting) {
900 case MPT_RAID_MWCE_REBUILD_ONLY:
901 if ((resyncing && mwce) || (!resyncing && !mwce)) {
904 mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
905 if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
907 * Wait one more status update to see if
908 * resyncing gets enabled. It gets disabled
909 * temporarilly when WCE is changed.
914 case MPT_RAID_MWCE_ON:
918 case MPT_RAID_MWCE_OFF:
922 case MPT_RAID_MWCE_NC:
926 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
928 mpt_vol_prt(mpt, mpt_vol,
929 "mpt_verify_mwce: Get request failed!\n");
933 vol_pg->VolumeSettings.Settings ^=
934 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
935 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
936 vol_pg->VolumeSettings.Settings ^=
937 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
938 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
939 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
940 data, /*addr*/0, /*len*/0,
941 /*write*/FALSE, /*wait*/TRUE);
942 if (rv == ETIMEDOUT) {
943 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
944 "Write Cache Enable Timed-out\n");
947 ar = REQ_TO_RAID_ACTION_RESULT(req);
949 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
950 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
951 mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
952 "%d:%x:%x\n", rv, req->IOCStatus,
955 vol_pg->VolumeSettings.Settings ^=
956 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
958 mpt_free_request(mpt, req);
962 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
965 struct mpt_raid_action_result *ar;
966 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
970 vol_pg = mpt_vol->config_page;
972 if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
976 * If the current RAID resync rate does not
977 * match our configured rate, update it.
979 prio = vol_pg->VolumeSettings.Settings
980 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
981 if (vol_pg->ResyncRate != 0
982 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
984 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
986 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
987 "Get request failed!\n");
991 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
992 MPI_RAID_ACTION_SET_RESYNC_RATE,
993 mpt->raid_resync_rate, /*addr*/0,
994 /*len*/0, /*write*/FALSE, /*wait*/TRUE);
995 if (rv == ETIMEDOUT) {
996 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
997 "Resync Rate Setting Timed-out\n");
1001 ar = REQ_TO_RAID_ACTION_RESULT(req);
1003 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1004 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1005 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1006 "%d:%x:%x\n", rv, req->IOCStatus,
1009 vol_pg->ResyncRate = mpt->raid_resync_rate;
1010 mpt_free_request(mpt, req);
1011 } else if ((prio && mpt->raid_resync_rate < 128)
1012 || (!prio && mpt->raid_resync_rate >= 128)) {
1015 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1017 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1018 "Get request failed!\n");
1022 vol_pg->VolumeSettings.Settings ^=
1023 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1024 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1025 vol_pg->VolumeSettings.Settings ^=
1026 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1027 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1028 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1029 data, /*addr*/0, /*len*/0,
1030 /*write*/FALSE, /*wait*/TRUE);
1031 if (rv == ETIMEDOUT) {
1032 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1033 "Resync Rate Setting Timed-out\n");
1036 ar = REQ_TO_RAID_ACTION_RESULT(req);
1038 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1039 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1040 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1041 "%d:%x:%x\n", rv, req->IOCStatus,
1044 vol_pg->VolumeSettings.Settings ^=
1045 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1048 mpt_free_request(mpt, req);
1053 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1054 struct cam_path *path)
1056 struct ccb_relsim crs;
1058 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1059 crs.ccb_h.func_code = XPT_REL_SIMQ;
1060 crs.release_flags = RELSIM_ADJUST_OPENINGS;
1061 crs.openings = mpt->raid_queue_depth;
1062 xpt_action((union ccb *)&crs);
1063 if (crs.ccb_h.status != CAM_REQ_CMP)
1064 mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1065 "with CAM status %#x\n", crs.ccb_h.status);
1069 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1071 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1074 vol_pg = mpt_vol->config_page;
1075 mpt_vol_prt(mpt, mpt_vol, "Settings (");
1076 for (i = 1; i <= 0x8000; i <<= 1) {
1077 switch (vol_pg->VolumeSettings.Settings & i) {
1078 case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1079 mpt_prtc(mpt, " Member-WCE");
1081 case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1082 mpt_prtc(mpt, " Offline-On-SMART-Err");
1084 case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1085 mpt_prtc(mpt, " Hot-Plug-Spares");
1087 case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1088 mpt_prtc(mpt, " High-Priority-ReSync");
1094 mpt_prtc(mpt, " )\n");
1095 if (vol_pg->VolumeSettings.HotSparePool != 0) {
1096 mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1097 powerof2(vol_pg->VolumeSettings.HotSparePool)
1099 for (i = 0; i < 8; i++) {
1103 if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1105 mpt_prtc(mpt, " %d", i);
1107 mpt_prtc(mpt, "\n");
1109 mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1110 for (i = 0; i < vol_pg->NumPhysDisks; i++){
1111 struct mpt_raid_disk *mpt_disk;
1112 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1113 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1116 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1117 disk_pg = &mpt_disk->config_page;
1119 mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1120 pt_bus, disk_pg->PhysDiskID);
1121 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1122 mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1123 "Primary" : "Secondary");
1125 mpt_prtc(mpt, "Stripe Position %d",
1126 mpt_disk->member_number);
1128 f = disk_pg->PhysDiskStatus.Flags;
1129 s = disk_pg->PhysDiskStatus.State;
1130 if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1131 mpt_prtc(mpt, " Out of Sync");
1133 if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1134 mpt_prtc(mpt, " Quiesced");
1136 if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1137 mpt_prtc(mpt, " Inactive");
1139 if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1140 mpt_prtc(mpt, " Was Optimal");
1142 if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1143 mpt_prtc(mpt, " Was Non-Optimal");
1146 case MPI_PHYSDISK0_STATUS_ONLINE:
1147 mpt_prtc(mpt, " Online");
1149 case MPI_PHYSDISK0_STATUS_MISSING:
1150 mpt_prtc(mpt, " Missing");
1152 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1153 mpt_prtc(mpt, " Incompatible");
1155 case MPI_PHYSDISK0_STATUS_FAILED:
1156 mpt_prtc(mpt, " Failed");
1158 case MPI_PHYSDISK0_STATUS_INITIALIZING:
1159 mpt_prtc(mpt, " Initializing");
1161 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1162 mpt_prtc(mpt, " Requested Offline");
1164 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1165 mpt_prtc(mpt, " Requested Failed");
1167 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1169 mpt_prtc(mpt, " Offline Other (%x)", s);
1172 mpt_prtc(mpt, "\n");
1177 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1179 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1180 int rd_bus = cam_sim_bus(mpt->sim);
1181 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1184 disk_pg = &mpt_disk->config_page;
1185 mpt_disk_prt(mpt, mpt_disk,
1186 "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1187 device_get_nameunit(mpt->dev), rd_bus,
1188 disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1189 pt_bus, mpt_disk - mpt->raid_disks);
1190 if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1192 mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1193 powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1195 for (i = 0; i < 8; i++) {
1199 if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1201 mpt_prtc(mpt, " %d", i);
1203 mpt_prtc(mpt, "\n");
1207 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1208 IOC_3_PHYS_DISK *ioc_disk)
1212 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1213 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1214 &mpt_disk->config_page.Header,
1215 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1217 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1218 "Failed to read RAID Disk Hdr(%d)\n",
1219 ioc_disk->PhysDiskNum);
1222 rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1223 &mpt_disk->config_page.Header,
1224 sizeof(mpt_disk->config_page),
1225 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1227 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1228 "Failed to read RAID Disk Page(%d)\n",
1229 ioc_disk->PhysDiskNum);
1230 mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1234 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1235 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1237 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1238 struct mpt_raid_action_result *ar;
1243 vol_pg = mpt_vol->config_page;
1244 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1246 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1247 ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1249 mpt_vol_prt(mpt, mpt_vol,
1250 "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1251 ioc_vol->VolumePageNumber);
1255 rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1256 &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1258 mpt_vol_prt(mpt, mpt_vol,
1259 "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1260 ioc_vol->VolumePageNumber);
1263 mpt2host_config_page_raid_vol_0(vol_pg);
1265 mpt_vol->flags |= MPT_RVF_ACTIVE;
1267 /* Update disk entry array data. */
1268 for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1269 struct mpt_raid_disk *mpt_disk;
1270 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1271 mpt_disk->volume = mpt_vol;
1272 mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1273 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1274 mpt_disk->member_number--;
1278 if ((vol_pg->VolumeStatus.Flags
1279 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1282 req = mpt_get_request(mpt, TRUE);
1284 mpt_vol_prt(mpt, mpt_vol,
1285 "mpt_refresh_raid_vol: Get request failed!\n");
1288 rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1289 MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1290 if (rv == ETIMEDOUT) {
1291 mpt_vol_prt(mpt, mpt_vol,
1292 "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1293 mpt_free_request(mpt, req);
1297 ar = REQ_TO_RAID_ACTION_RESULT(req);
1299 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1300 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1301 memcpy(&mpt_vol->sync_progress,
1302 &ar->action_data.indicator_struct,
1303 sizeof(mpt_vol->sync_progress));
1304 mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1306 mpt_vol_prt(mpt, mpt_vol,
1307 "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1309 mpt_free_request(mpt, req);
1313 * Update in-core information about RAID support. We update any entries
1314 * that didn't previously exists or have been marked as needing to
1315 * be updated by our event handler. Interesting changes are displayed
1319 mpt_refresh_raid_data(struct mpt_softc *mpt)
1321 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1322 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1323 IOC_3_PHYS_DISK *ioc_disk;
1324 IOC_3_PHYS_DISK *ioc_last_disk;
1325 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1329 u_int nonopt_volumes;
1331 if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1336 * Mark all items as unreferenced by the configuration.
1337 * This allows us to find, report, and discard stale
1340 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1341 mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1343 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1344 mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1348 * Get Physical Disk information.
1350 len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1351 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1352 &mpt->ioc_page3->Header, len,
1353 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1356 "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1359 mpt2host_config_page_ioc3(mpt->ioc_page3);
1361 ioc_disk = mpt->ioc_page3->PhysDisk;
1362 ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1363 for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1364 struct mpt_raid_disk *mpt_disk;
1366 mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1367 mpt_disk->flags |= MPT_RDF_REFERENCED;
1368 if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1369 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1371 mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1374 mpt_disk->flags |= MPT_RDF_ACTIVE;
1379 * Refresh volume data.
1381 len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1382 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1383 &mpt->ioc_page2->Header, len,
1384 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1386 mpt_prt(mpt, "mpt_refresh_raid_data: "
1387 "Failed to read IOC Page 2\n");
1390 mpt2host_config_page_ioc2(mpt->ioc_page2);
1392 ioc_vol = mpt->ioc_page2->RaidVolume;
1393 ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1394 for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1395 struct mpt_raid_volume *mpt_vol;
1397 mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1398 mpt_vol->flags |= MPT_RVF_REFERENCED;
1399 vol_pg = mpt_vol->config_page;
1402 if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1403 != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1404 || (vol_pg->VolumeStatus.Flags
1405 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1407 mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1409 mpt_vol->flags |= MPT_RVF_ACTIVE;
1413 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1414 struct mpt_raid_volume *mpt_vol;
1420 mpt_vol = &mpt->raid_volumes[i];
1422 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1426 vol_pg = mpt_vol->config_page;
1427 if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1428 == MPT_RVF_ANNOUNCED) {
1429 mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1434 if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1435 mpt_announce_vol(mpt, mpt_vol);
1436 mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1439 if (vol_pg->VolumeStatus.State !=
1440 MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1443 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1446 mpt_vol->flags |= MPT_RVF_UP2DATE;
1447 mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1448 mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1449 mpt_verify_mwce(mpt, mpt_vol);
1451 if (vol_pg->VolumeStatus.Flags == 0) {
1455 mpt_vol_prt(mpt, mpt_vol, "Status (");
1456 for (m = 1; m <= 0x80; m <<= 1) {
1457 switch (vol_pg->VolumeStatus.Flags & m) {
1458 case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1459 mpt_prtc(mpt, " Enabled");
1461 case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1462 mpt_prtc(mpt, " Quiesced");
1464 case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1465 mpt_prtc(mpt, " Re-Syncing");
1467 case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1468 mpt_prtc(mpt, " Inactive");
1474 mpt_prtc(mpt, " )\n");
1476 if ((vol_pg->VolumeStatus.Flags
1477 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1480 mpt_verify_resync_rate(mpt, mpt_vol);
1482 left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1483 total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1484 if (vol_pg->ResyncRate != 0) {
1486 prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1487 mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1488 prio / 1000, prio % 1000);
1490 prio = vol_pg->VolumeSettings.Settings
1491 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1492 mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1493 prio ? "High" : "Low");
1495 #if __FreeBSD_version >= 500000
1496 mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1497 "blocks remaining\n", (uintmax_t)left,
1500 mpt_vol_prt(mpt, mpt_vol, "%llu of %llu "
1501 "blocks remaining\n", (uint64_t)left,
1505 /* Periodically report on sync progress. */
1506 mpt_schedule_raid_refresh(mpt);
1509 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1510 struct mpt_raid_disk *mpt_disk;
1511 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1514 mpt_disk = &mpt->raid_disks[i];
1515 disk_pg = &mpt_disk->config_page;
1517 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1520 if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1521 == MPT_RDF_ANNOUNCED) {
1522 mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1523 mpt_disk->flags = 0;
1528 if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1530 mpt_announce_disk(mpt, mpt_disk);
1531 mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1534 if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1537 mpt_disk->flags |= MPT_RDF_UP2DATE;
1538 mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1539 if (disk_pg->PhysDiskStatus.Flags == 0)
1542 mpt_disk_prt(mpt, mpt_disk, "Status (");
1543 for (m = 1; m <= 0x80; m <<= 1) {
1544 switch (disk_pg->PhysDiskStatus.Flags & m) {
1545 case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1546 mpt_prtc(mpt, " Out-Of-Sync");
1548 case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1549 mpt_prtc(mpt, " Quiesced");
1555 mpt_prtc(mpt, " )\n");
1558 mpt->raid_nonopt_volumes = nonopt_volumes;
1563 mpt_raid_timer(void *arg)
1565 struct mpt_softc *mpt;
1567 mpt = (struct mpt_softc *)arg;
1569 mpt_raid_wakeup(mpt);
1574 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1576 callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1577 mpt_raid_timer, mpt);
1581 mpt_raid_free_mem(struct mpt_softc *mpt)
1584 if (mpt->raid_volumes) {
1585 struct mpt_raid_volume *mpt_raid;
1587 for (i = 0; i < mpt->raid_max_volumes; i++) {
1588 mpt_raid = &mpt->raid_volumes[i];
1589 if (mpt_raid->config_page) {
1590 free(mpt_raid->config_page, M_DEVBUF);
1591 mpt_raid->config_page = NULL;
1594 free(mpt->raid_volumes, M_DEVBUF);
1595 mpt->raid_volumes = NULL;
1597 if (mpt->raid_disks) {
1598 free(mpt->raid_disks, M_DEVBUF);
1599 mpt->raid_disks = NULL;
1601 if (mpt->ioc_page2) {
1602 free(mpt->ioc_page2, M_DEVBUF);
1603 mpt->ioc_page2 = NULL;
1605 if (mpt->ioc_page3) {
1606 free(mpt->ioc_page3, M_DEVBUF);
1607 mpt->ioc_page3 = NULL;
1609 mpt->raid_max_volumes = 0;
1610 mpt->raid_max_disks = 0;
1613 #if __FreeBSD_version >= 500000
1615 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1617 struct mpt_raid_volume *mpt_vol;
1619 if ((rate > MPT_RAID_RESYNC_RATE_MAX
1620 || rate < MPT_RAID_RESYNC_RATE_MIN)
1621 && rate != MPT_RAID_RESYNC_RATE_NC)
1625 mpt->raid_resync_rate = rate;
1626 RAID_VOL_FOREACH(mpt, mpt_vol) {
1627 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1630 mpt_verify_resync_rate(mpt, mpt_vol);
1637 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1639 struct mpt_raid_volume *mpt_vol;
1641 if (vol_queue_depth > 255 || vol_queue_depth < 1)
1645 mpt->raid_queue_depth = vol_queue_depth;
1646 RAID_VOL_FOREACH(mpt, mpt_vol) {
1647 struct cam_path *path;
1650 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1653 mpt->raid_rescan = 0;
1655 MPTLOCK_2_CAMLOCK(mpt);
1656 error = xpt_create_path(&path, xpt_periph,
1657 cam_sim_path(mpt->sim),
1658 mpt_vol->config_page->VolumeID,
1660 if (error != CAM_REQ_CMP) {
1661 CAMLOCK_2_MPTLOCK(mpt);
1662 mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1665 mpt_adjust_queue_depth(mpt, mpt_vol, path);
1666 xpt_free_path(path);
1667 CAMLOCK_2_MPTLOCK(mpt);
1674 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1676 struct mpt_raid_volume *mpt_vol;
1677 int force_full_resync;
1680 if (mwce == mpt->raid_mwce_setting) {
1686 * Catch MWCE being left on due to a failed shutdown. Since
1687 * sysctls cannot be set by the loader, we treat the first
1688 * setting of this varible specially and force a full volume
1689 * resync if MWCE is enabled and a resync is in progress.
1691 force_full_resync = 0;
1692 if (mpt->raid_mwce_set == 0
1693 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1694 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1695 force_full_resync = 1;
1697 mpt->raid_mwce_setting = mwce;
1698 RAID_VOL_FOREACH(mpt, mpt_vol) {
1699 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1703 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1706 vol_pg = mpt_vol->config_page;
1707 resyncing = vol_pg->VolumeStatus.Flags
1708 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1709 mwce = vol_pg->VolumeSettings.Settings
1710 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1711 if (force_full_resync && resyncing && mwce) {
1714 * XXX disable/enable volume should force a resync,
1715 * but we'll need to queice, drain, and restart
1718 mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1719 "detected. Suggest full resync.\n");
1721 mpt_verify_mwce(mpt, mpt_vol);
1723 mpt->raid_mwce_set = 1;
1727 const char *mpt_vol_mwce_strs[] =
1731 "On-During-Rebuild",
1736 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1739 struct mpt_softc *mpt;
1747 mpt = (struct mpt_softc *)arg1;
1748 str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1749 error = SYSCTL_OUT(req, str, strlen(str) + 1);
1750 if (error || !req->newptr) {
1754 size = req->newlen - req->newidx;
1755 if (size >= sizeof(inbuf)) {
1759 error = SYSCTL_IN(req, inbuf, size);
1764 for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1765 if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1766 return (mpt_raid_set_vol_mwce(mpt, i));
1773 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1775 struct mpt_softc *mpt;
1776 u_int raid_resync_rate;
1781 mpt = (struct mpt_softc *)arg1;
1782 raid_resync_rate = mpt->raid_resync_rate;
1784 error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1785 if (error || !req->newptr) {
1789 return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1793 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1795 struct mpt_softc *mpt;
1796 u_int raid_queue_depth;
1801 mpt = (struct mpt_softc *)arg1;
1802 raid_queue_depth = mpt->raid_queue_depth;
1804 error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1805 if (error || !req->newptr) {
1809 return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1813 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1815 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1816 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1818 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1819 "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1820 mpt_raid_sysctl_vol_member_wce, "A",
1821 "volume member WCE(On,Off,On-During-Rebuild,NC)");
1823 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1824 "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1825 mpt_raid_sysctl_vol_queue_depth, "I",
1826 "default volume queue depth");
1828 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1829 "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1830 mpt_raid_sysctl_vol_resync_rate, "I",
1831 "volume resync priority (0 == NC, 1 - 255)");
1832 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1833 "nonoptimal_volumes", CTLFLAG_RD,
1834 &mpt->raid_nonopt_volumes, 0,
1835 "number of nonoptimal volumes");