2 * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
4 * Copyright (c) 2005, WHEEL Sp. z o.o.
5 * Copyright (c) 2005 Justin T. Gibbs.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon including
16 * a substantially similar Disclaimer requirement for further binary
18 * 3. Neither the names of the above listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <dev/mpt/mpt.h>
39 #include <dev/mpt/mpt_raid.h>
41 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
42 #include "dev/mpt/mpilib/mpi_raid.h"
45 #include <cam/cam_ccb.h>
46 #include <cam/cam_sim.h>
47 #include <cam/cam_xpt_sim.h>
49 #include <cam/cam_periph.h>
51 #include <sys/callout.h>
52 #include <sys/kthread.h>
53 #include <sys/sysctl.h>
55 #include <machine/stdarg.h>
57 struct mpt_raid_action_result
60 MPI_RAID_VOL_INDICATOR indicator_struct;
61 uint32_t new_settings;
62 uint8_t phys_disk_num;
64 uint16_t action_status;
67 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
68 (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
70 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
73 static mpt_probe_handler_t mpt_raid_probe;
74 static mpt_attach_handler_t mpt_raid_attach;
75 static mpt_event_handler_t mpt_raid_event;
76 static mpt_shutdown_handler_t mpt_raid_shutdown;
77 static mpt_reset_handler_t mpt_raid_ioc_reset;
78 static mpt_detach_handler_t mpt_raid_detach;
80 static struct mpt_personality mpt_raid_personality =
83 .probe = mpt_raid_probe,
84 .attach = mpt_raid_attach,
85 .event = mpt_raid_event,
86 .reset = mpt_raid_ioc_reset,
87 .shutdown = mpt_raid_shutdown,
88 .detach = mpt_raid_detach,
91 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
92 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
94 static mpt_reply_handler_t mpt_raid_reply_handler;
95 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
96 MSG_DEFAULT_REPLY *reply_frame);
97 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
98 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
99 static void mpt_raid_thread(void *arg);
100 static timeout_t mpt_raid_timer;
101 static timeout_t mpt_raid_quiesce_timeout;
103 static void mpt_enable_vol(struct mpt_softc *mpt,
104 struct mpt_raid_volume *mpt_vol, int enable);
106 static void mpt_verify_mwce(struct mpt_softc *mpt,
107 struct mpt_raid_volume *mpt_vol);
108 static void mpt_adjust_queue_depth(struct mpt_softc *mpt,
109 struct mpt_raid_volume *mpt_vol,
110 struct cam_path *path);
111 static void mpt_raid_sysctl_attach(struct mpt_softc *mpt);
113 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
116 mpt_vol_type(struct mpt_raid_volume *vol)
118 switch (vol->config_page->VolumeType) {
119 case MPI_RAID_VOL_TYPE_IS:
121 case MPI_RAID_VOL_TYPE_IME:
123 case MPI_RAID_VOL_TYPE_IM:
131 mpt_vol_state(struct mpt_raid_volume *vol)
133 switch (vol->config_page->VolumeStatus.State) {
134 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
136 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
138 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
146 mpt_disk_state(struct mpt_raid_disk *disk)
148 switch (disk->config_page.PhysDiskStatus.State) {
149 case MPI_PHYSDISK0_STATUS_ONLINE:
151 case MPI_PHYSDISK0_STATUS_MISSING:
153 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
154 return ("Incompatible");
155 case MPI_PHYSDISK0_STATUS_FAILED:
157 case MPI_PHYSDISK0_STATUS_INITIALIZING:
158 return ("Initializing");
159 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
160 return ("Offline Requested");
161 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
162 return ("Failed per Host Request");
163 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
171 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
172 const char *fmt, ...)
176 printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
177 (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
178 vol->config_page->VolumeBus, vol->config_page->VolumeID);
185 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
186 const char *fmt, ...)
190 if (disk->volume != NULL) {
191 printf("(%s:vol%d:%d): ",
192 device_get_nameunit(mpt->dev),
193 disk->volume->config_page->VolumeID,
194 disk->member_number);
196 printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
197 disk->config_page.PhysDiskBus,
198 disk->config_page.PhysDiskID);
206 mpt_raid_async(void *callback_arg, u_int32_t code,
207 struct cam_path *path, void *arg)
209 struct mpt_softc *mpt;
211 mpt = (struct mpt_softc*)callback_arg;
213 case AC_FOUND_DEVICE:
215 struct ccb_getdev *cgd;
216 struct mpt_raid_volume *mpt_vol;
218 cgd = (struct ccb_getdev *)arg;
222 mpt_lprt(mpt, MPT_PRT_DEBUG, " Callback for %d\n",
223 cgd->ccb_h.target_id);
225 RAID_VOL_FOREACH(mpt, mpt_vol) {
226 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
229 if (mpt_vol->config_page->VolumeID
230 == cgd->ccb_h.target_id) {
231 mpt_adjust_queue_depth(mpt, mpt_vol, path);
242 mpt_raid_probe(struct mpt_softc *mpt)
244 if (mpt->ioc_page2 == NULL
245 || mpt->ioc_page2->MaxPhysDisks == 0)
251 mpt_raid_attach(struct mpt_softc *mpt)
253 struct ccb_setasync csa;
254 mpt_handler_t handler;
257 mpt_callout_init(&mpt->raid_timer);
259 handler.reply_handler = mpt_raid_reply_handler;
260 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
265 error = mpt_spawn_raid_thread(mpt);
267 mpt_prt(mpt, "Unable to spawn RAID thread!\n");
271 xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
272 csa.ccb_h.func_code = XPT_SASYNC_CB;
273 csa.event_enable = AC_FOUND_DEVICE;
274 csa.callback = mpt_raid_async;
275 csa.callback_arg = mpt;
276 xpt_action((union ccb *)&csa);
277 if (csa.ccb_h.status != CAM_REQ_CMP) {
278 mpt_prt(mpt, "mpt_raid_attach: Unable to register "
279 "CAM async handler.\n");
282 mpt_raid_sysctl_attach(mpt);
285 mpt_raid_detach(mpt);
290 mpt_raid_detach(struct mpt_softc *mpt)
292 struct ccb_setasync csa;
293 mpt_handler_t handler;
295 callout_stop(&mpt->raid_timer);
296 mpt_terminate_raid_thread(mpt);
298 handler.reply_handler = mpt_raid_reply_handler;
299 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
301 xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
302 csa.ccb_h.func_code = XPT_SASYNC_CB;
303 csa.event_enable = 0;
304 csa.callback = mpt_raid_async;
305 csa.callback_arg = mpt;
306 xpt_action((union ccb *)&csa);
310 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
312 /* Nothing to do yet. */
315 static const char *raid_event_txt[] =
319 "Volume Settings Changed",
320 "Volume Status Changed",
321 "Volume Physical Disk Membership Changed",
322 "Physical Disk Created",
323 "Physical Disk Deleted",
324 "Physical Disk Settings Changed",
325 "Physical Disk Status Changed",
326 "Domain Validation Required",
327 "SMART Data Received",
328 "Replace Action Started",
332 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
333 MSG_EVENT_NOTIFY_REPLY *msg)
335 EVENT_DATA_RAID *raid_event;
336 struct mpt_raid_volume *mpt_vol;
337 struct mpt_raid_disk *mpt_disk;
338 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
342 if (msg->Event != MPI_EVENT_INTEGRATED_RAID)
343 return (/*handled*/0);
345 raid_event = (EVENT_DATA_RAID *)&msg->Data;
349 if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
350 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
351 mpt_vol = &mpt->raid_volumes[i];
352 vol_pg = mpt_vol->config_page;
354 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
357 if (vol_pg->VolumeID == raid_event->VolumeID
358 && vol_pg->VolumeBus == raid_event->VolumeBus)
361 if (i >= mpt->ioc_page2->MaxVolumes) {
368 if (raid_event->PhysDiskNum != 0xFF
369 && mpt->raid_disks != NULL) {
370 mpt_disk = mpt->raid_disks
371 + raid_event->PhysDiskNum;
372 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
377 switch(raid_event->ReasonCode) {
378 case MPI_EVENT_RAID_RC_VOLUME_CREATED:
379 case MPI_EVENT_RAID_RC_VOLUME_DELETED:
381 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
382 if (mpt_vol != NULL) {
383 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
384 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
387 * Coalesce status messages into one
388 * per background run of our RAID thread.
389 * This removes "spurious" status messages
396 case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
397 case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
400 mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
402 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
403 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
406 case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
407 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
409 if (mpt_disk != NULL)
410 mpt_disk->flags &= ~MPT_RDF_UP2DATE;
412 case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
415 case MPI_EVENT_RAID_RC_SMART_DATA:
416 case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
421 if (mpt_disk != NULL) {
422 mpt_disk_prt(mpt, mpt_disk, "");
423 } else if (mpt_vol != NULL) {
424 mpt_vol_prt(mpt, mpt_vol, "");
426 mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
427 raid_event->VolumeID);
429 if (raid_event->PhysDiskNum != 0xFF)
430 mpt_prtc(mpt, ":%d): ",
431 raid_event->PhysDiskNum);
433 mpt_prtc(mpt, "): ");
436 if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
437 mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
438 raid_event->ReasonCode);
440 mpt_prtc(mpt, "%s\n",
441 raid_event_txt[raid_event->ReasonCode]);
444 if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
445 /* XXX Use CAM's print sense for this... */
446 if (mpt_disk != NULL)
447 mpt_disk_prt(mpt, mpt_disk, "");
449 mpt_prt(mpt, "Volume(%d:%d:%d: ");
450 mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x\n",
451 raid_event->ASC, raid_event->ASCQ);
454 mpt_raid_wakeup(mpt);
455 return (/*handled*/1);
459 mpt_raid_shutdown(struct mpt_softc *mpt)
461 struct mpt_raid_volume *mpt_vol;
463 if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY)
466 mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
467 RAID_VOL_FOREACH(mpt, mpt_vol) {
469 mpt_verify_mwce(mpt, mpt_vol);
474 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
475 MSG_DEFAULT_REPLY *reply_frame)
480 return (/*free_reply*/TRUE);
483 if (reply_frame != NULL)
484 free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
486 else if (req->ccb != NULL) {
487 /* Complete Quiesce CCB with error... */
491 req->state &= ~REQ_STATE_QUEUED;
492 req->state |= REQ_STATE_DONE;
493 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
495 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
497 } else if (free_req) {
498 mpt_free_request(mpt, req);
501 return (/*free_reply*/TRUE);
505 * Parse additional completion information in the reply
506 * frame for RAID I/O requests.
509 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
510 MSG_DEFAULT_REPLY *reply_frame)
512 MSG_RAID_ACTION_REPLY *reply;
513 struct mpt_raid_action_result *action_result;
514 MSG_RAID_ACTION_REQUEST *rap;
516 reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
517 req->IOCStatus = le16toh(reply->IOCStatus);
518 rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
520 switch (rap->Action) {
521 case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
523 * Parse result, call mpt_start with ccb,
524 * release device queue.
528 case MPI_RAID_ACTION_ENABLE_PHYS_IO:
530 * Need additional state for transition to enabled to
531 * protect against attempts to disable??
535 action_result = REQ_TO_RAID_ACTION_RESULT(req);
536 memcpy(&action_result->action_data, &reply->ActionData,
537 sizeof(action_result->action_data));
538 action_result->action_status = reply->ActionStatus;
542 return (/*Free Request*/TRUE);
546 * Utiltity routine to perform a RAID action command;
549 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
550 struct mpt_raid_disk *disk, request_t *req, u_int Action,
551 uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
554 MSG_RAID_ACTION_REQUEST *rap;
558 memset(rap, 0, sizeof *rap);
559 rap->Action = Action;
560 rap->ActionDataWord = ActionDataWord;
561 rap->Function = MPI_FUNCTION_RAID_ACTION;
562 rap->VolumeID = vol->config_page->VolumeID;
563 rap->VolumeBus = vol->config_page->VolumeBus;
565 rap->PhysDiskNum = disk->config_page.PhysDiskNum;
567 rap->PhysDiskNum = 0xFF;
568 se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
570 MPI_pSGE_SET_LENGTH(se, len);
571 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
572 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
573 MPI_SGE_FLAGS_END_OF_LIST |
574 write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
575 rap->MsgContext = htole32(req->index | raid_handler_id);
577 mpt_check_doorbell(mpt);
578 mpt_send_cmd(mpt, req);
581 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
582 /*sleep_ok*/FALSE, /*time_ms*/2000));
588 /*************************** RAID Status Monitoring ***************************/
590 mpt_spawn_raid_thread(struct mpt_softc *mpt)
595 * Freeze out any CAM transactions until our thread
596 * is able to run at least once. We need to update
597 * our RAID pages before acception I/O or we may
598 * reject I/O to an ID we later determine is for a
601 xpt_freeze_simq(mpt->phydisk_sim, 1);
602 error = mpt_kthread_create(mpt_raid_thread, mpt,
603 &mpt->raid_thread, /*flags*/0, /*altstack*/0,
604 "mpt_raid%d", mpt->unit);
606 xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
611 * Lock is not held on entry.
614 mpt_terminate_raid_thread(struct mpt_softc *mpt)
618 if (mpt->raid_thread == NULL) {
622 mpt->shutdwn_raid = 1;
623 wakeup(mpt->raid_volumes);
625 * Sleep on a slightly different location
626 * for this interlock just for added safety.
628 mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
633 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
635 xpt_free_path(ccb->ccb_h.path);
640 mpt_raid_thread(void *arg)
642 struct mpt_softc *mpt;
645 #if __FreeBSD_version >= 500000
648 mpt = (struct mpt_softc *)arg;
651 while (mpt->shutdwn_raid == 0) {
653 if (mpt->raid_wakeup == 0) {
654 mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
658 mpt->raid_wakeup = 0;
660 mpt_refresh_raid_data(mpt);
663 * Now that we have our first snapshot of RAID data,
664 * allow CAM to access our physical disk bus.
668 xpt_release_simq(mpt->phydisk_sim, /*run_queue*/TRUE);
671 if (mpt->raid_rescan != 0) {
673 struct cam_path *path;
676 mpt->raid_rescan = 0;
678 ccb = malloc(sizeof(*ccb), M_DEVBUF, M_WAITOK);
679 error = xpt_create_path(&path, xpt_periph,
680 cam_sim_path(mpt->phydisk_sim),
683 if (error != CAM_REQ_CMP) {
685 mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
687 xpt_setup_ccb(&ccb->ccb_h, path, /*priority*/5);
688 ccb->ccb_h.func_code = XPT_SCAN_BUS;
689 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
690 ccb->crcn.flags = CAM_FLAG_NONE;
695 mpt->raid_thread = NULL;
696 wakeup(&mpt->raid_thread);
698 #if __FreeBSD_version >= 500000
705 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
711 if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
712 return (CAM_REQ_CMP);
714 if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
717 mpt_disk->flags |= MPT_RDF_QUIESCING;
718 xpt_freeze_devq(ccb->ccb_h.path, 1);
720 rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
721 MPI_RAID_ACTION_QUIESCE_PHYS_IO,
722 /*ActionData*/0, /*addr*/0,
723 /*len*/0, /*write*/FALSE,
726 return (CAM_REQ_CMP_ERR);
728 ccb->ccb_h.timeout_ch =
729 timeout(mpt_raid_quiesce_timeout, (caddr_t)ccb, 5 * hz);
731 if (rv == ETIMEDOUT) {
732 mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
733 "Quiece Timed-out\n");
734 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
735 return (CAM_REQ_CMP_ERR);
738 ar = REQ_TO_RAID_ACTION_RESULT(req);
740 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
741 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
742 mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
743 "%d:%x:%x\n", rv, req->IOCStatus,
745 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
746 return (CAM_REQ_CMP_ERR);
749 return (CAM_REQ_INPROG);
751 return (CAM_REQUEUE_REQ);
754 /* XXX Ignores that there may be multiple busses/IOCs involved. */
756 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
758 struct mpt_raid_disk *mpt_disk;
760 mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
761 if (ccb->ccb_h.target_id < mpt->raid_max_disks
762 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
764 *tgt = mpt_disk->config_page.PhysDiskID;
767 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_map_physdisk(%d) - Not Active\n",
768 ccb->ccb_h.target_id);
774 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
778 struct mpt_raid_action_result *ar;
779 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
783 vol_pg = mpt_vol->config_page;
784 enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
787 * If the setting matches the configuration,
788 * there is nothing to do.
790 if ((enabled && enable)
791 || (!enabled && !enable))
794 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
796 mpt_vol_prt(mpt, mpt_vol,
797 "mpt_enable_vol: Get request failed!\n");
801 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
802 enable ? MPI_RAID_ACTION_ENABLE_VOLUME
803 : MPI_RAID_ACTION_DISABLE_VOLUME,
804 /*data*/0, /*addr*/0, /*len*/0,
805 /*write*/FALSE, /*wait*/TRUE);
806 if (rv == ETIMEDOUT) {
807 mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
808 "%s Volume Timed-out\n",
809 enable ? "Enable" : "Disable");
812 ar = REQ_TO_RAID_ACTION_RESULT(req);
814 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
815 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
816 mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
817 enable ? "Enable" : "Disable",
818 rv, req->IOCStatus, ar->action_status);
821 mpt_free_request(mpt, req);
826 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
829 struct mpt_raid_action_result *ar;
830 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
836 vol_pg = mpt_vol->config_page;
837 resyncing = vol_pg->VolumeStatus.Flags
838 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
839 mwce = vol_pg->VolumeSettings.Settings
840 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
843 * If the setting matches the configuration,
844 * there is nothing to do.
846 switch (mpt->raid_mwce_setting) {
847 case MPT_RAID_MWCE_REBUILD_ONLY:
848 if ((resyncing && mwce)
849 || (!resyncing && !mwce))
852 mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
853 if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
855 * Wait one more status update to see if
856 * resyncing gets enabled. It gets disabled
857 * temporarilly when WCE is changed.
862 case MPT_RAID_MWCE_ON:
866 case MPT_RAID_MWCE_OFF:
870 case MPT_RAID_MWCE_NC:
874 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
876 mpt_vol_prt(mpt, mpt_vol,
877 "mpt_verify_mwce: Get request failed!\n");
881 vol_pg->VolumeSettings.Settings ^=
882 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
883 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
884 vol_pg->VolumeSettings.Settings ^=
885 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
886 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
887 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
888 data, /*addr*/0, /*len*/0,
889 /*write*/FALSE, /*wait*/TRUE);
890 if (rv == ETIMEDOUT) {
891 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
892 "Write Cache Enable Timed-out\n");
895 ar = REQ_TO_RAID_ACTION_RESULT(req);
897 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
898 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
899 mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
900 "%d:%x:%x\n", rv, req->IOCStatus,
903 vol_pg->VolumeSettings.Settings ^=
904 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
907 mpt_free_request(mpt, req);
911 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
914 struct mpt_raid_action_result *ar;
915 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
919 vol_pg = mpt_vol->config_page;
921 if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
925 * If the current RAID resync rate does not
926 * match our configured rate, update it.
928 prio = vol_pg->VolumeSettings.Settings
929 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
930 if (vol_pg->ResyncRate != 0
931 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
933 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
935 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
936 "Get request failed!\n");
940 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
941 MPI_RAID_ACTION_SET_RESYNC_RATE,
942 mpt->raid_resync_rate, /*addr*/0,
943 /*len*/0, /*write*/FALSE, /*wait*/TRUE);
944 if (rv == ETIMEDOUT) {
945 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
946 "Resync Rate Setting Timed-out\n");
950 ar = REQ_TO_RAID_ACTION_RESULT(req);
952 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
953 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
954 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
955 "%d:%x:%x\n", rv, req->IOCStatus,
958 vol_pg->ResyncRate = mpt->raid_resync_rate;
959 mpt_free_request(mpt, req);
960 } else if ((prio && mpt->raid_resync_rate < 128)
961 || (!prio && mpt->raid_resync_rate >= 128)) {
964 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
966 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
967 "Get request failed!\n");
971 vol_pg->VolumeSettings.Settings ^=
972 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
973 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
974 vol_pg->VolumeSettings.Settings ^=
975 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
976 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
977 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
978 data, /*addr*/0, /*len*/0,
979 /*write*/FALSE, /*wait*/TRUE);
980 if (rv == ETIMEDOUT) {
981 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
982 "Resync Rate Setting Timed-out\n");
985 ar = REQ_TO_RAID_ACTION_RESULT(req);
987 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
988 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
989 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
990 "%d:%x:%x\n", rv, req->IOCStatus,
993 vol_pg->VolumeSettings.Settings ^=
994 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
997 mpt_free_request(mpt, req);
1002 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1003 struct cam_path *path)
1005 struct ccb_relsim crs;
1007 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1008 crs.ccb_h.func_code = XPT_REL_SIMQ;
1009 crs.release_flags = RELSIM_ADJUST_OPENINGS;
1010 crs.openings = mpt->raid_queue_depth;
1011 xpt_action((union ccb *)&crs);
1012 if (crs.ccb_h.status != CAM_REQ_CMP)
1013 mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1014 "with CAM status %#x\n", crs.ccb_h.status);
1018 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1020 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1023 vol_pg = mpt_vol->config_page;
1024 mpt_vol_prt(mpt, mpt_vol, "Settings (");
1025 for (i = 1; i <= 0x8000; i <<= 1) {
1026 switch (vol_pg->VolumeSettings.Settings & i) {
1027 case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1028 mpt_prtc(mpt, " Member-WCE");
1030 case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1031 mpt_prtc(mpt, " Offline-On-SMART-Err");
1033 case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1034 mpt_prtc(mpt, " Hot-Plug-Spares");
1036 case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1037 mpt_prtc(mpt, " High-Priority-ReSync");
1043 mpt_prtc(mpt, " )\n");
1044 if (vol_pg->VolumeSettings.HotSparePool != 0) {
1045 mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1046 powerof2(vol_pg->VolumeSettings.HotSparePool)
1048 for (i = 0; i < 8; i++) {
1052 if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1054 mpt_prtc(mpt, " %d", i);
1056 mpt_prtc(mpt, "\n");
1058 mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1059 for (i = 0; i < vol_pg->NumPhysDisks; i++){
1060 struct mpt_raid_disk *mpt_disk;
1061 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1063 mpt_disk = mpt->raid_disks
1064 + vol_pg->PhysDisk[i].PhysDiskNum;
1065 disk_pg = &mpt_disk->config_page;
1067 mpt_prtc(mpt, "(%s:%d:%d): ", device_get_nameunit(mpt->dev),
1068 disk_pg->PhysDiskBus, disk_pg->PhysDiskID);
1069 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM)
1070 mpt_prtc(mpt, "%s\n",
1071 mpt_disk->member_number == 0
1072 ? "Primary" : "Secondary");
1074 mpt_prtc(mpt, "Stripe Position %d\n",
1075 mpt_disk->member_number);
1080 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1082 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1085 disk_pg = &mpt_disk->config_page;
1086 mpt_disk_prt(mpt, mpt_disk,
1087 "Physical (%s:%d:%d), Pass-thru (%s:%d:%d)\n",
1088 device_get_nameunit(mpt->dev), disk_pg->PhysDiskBus,
1089 disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1090 /*bus*/1, mpt_disk - mpt->raid_disks);
1092 if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1094 mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1095 powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1097 for (i = 0; i < 8; i++) {
1101 if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1103 mpt_prtc(mpt, " %d", i);
1105 mpt_prtc(mpt, "\n");
1109 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1110 IOC_3_PHYS_DISK *ioc_disk)
1114 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1115 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1116 &mpt_disk->config_page.Header,
1117 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1119 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1120 "Failed to read RAID Disk Hdr(%d)\n",
1121 ioc_disk->PhysDiskNum);
1124 rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1125 &mpt_disk->config_page.Header,
1126 sizeof(mpt_disk->config_page),
1127 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1129 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1130 "Failed to read RAID Disk Page(%d)\n",
1131 ioc_disk->PhysDiskNum);
1135 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1136 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1138 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1139 struct mpt_raid_action_result *ar;
1144 vol_pg = mpt_vol->config_page;
1145 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1146 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME,
1147 /*PageNumber*/0, ioc_vol->VolumePageNumber,
1148 &vol_pg->Header, /*sleep_ok*/TRUE,
1149 /*timeout_ms*/5000);
1151 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1152 "Failed to read RAID Vol Hdr(%d)\n",
1153 ioc_vol->VolumePageNumber);
1156 rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1157 &vol_pg->Header, mpt->raid_page0_len,
1158 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1160 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1161 "Failed to read RAID Vol Page(%d)\n",
1162 ioc_vol->VolumePageNumber);
1165 mpt_vol->flags |= MPT_RVF_ACTIVE;
1167 /* Update disk entry array data. */
1168 for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1169 struct mpt_raid_disk *mpt_disk;
1171 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1172 mpt_disk->volume = mpt_vol;
1173 mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1174 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM)
1175 mpt_disk->member_number--;
1178 if ((vol_pg->VolumeStatus.Flags
1179 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1182 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1184 mpt_vol_prt(mpt, mpt_vol,
1185 "mpt_refresh_raid_vol: Get request failed!\n");
1188 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1189 MPI_RAID_ACTION_INDICATOR_STRUCT,
1190 /*ActionWord*/0, /*addr*/0, /*len*/0,
1191 /*write*/FALSE, /*wait*/TRUE);
1192 if (rv == ETIMEDOUT) {
1193 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1194 "Progress indicator fetch timedout!\n");
1198 ar = REQ_TO_RAID_ACTION_RESULT(req);
1200 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1201 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1202 memcpy(&mpt_vol->sync_progress,
1203 &ar->action_data.indicator_struct,
1204 sizeof(mpt_vol->sync_progress));
1206 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1207 "Progress indicator fetch failed!\n");
1209 mpt_free_request(mpt, req);
1213 * Update in-core information about RAID support. We update any entries
1214 * that didn't previously exists or have been marked as needing to
1215 * be updated by our event handler. Interesting changes are displayed
1219 mpt_refresh_raid_data(struct mpt_softc *mpt)
1221 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1222 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1223 IOC_3_PHYS_DISK *ioc_disk;
1224 IOC_3_PHYS_DISK *ioc_last_disk;
1225 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1230 if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL)
1234 * Mark all items as unreferrened by the configuration.
1235 * This allows us to find, report, and discard stale
1238 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++)
1239 mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1240 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++)
1241 mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1244 * Get Physical Disk information.
1246 len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1247 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1248 &mpt->ioc_page3->Header, len,
1249 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1251 mpt_prt(mpt, "mpt_refresh_raid_data: "
1252 "Failed to read IOC Page 3\n");
1256 ioc_disk = mpt->ioc_page3->PhysDisk;
1257 ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1258 for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1259 struct mpt_raid_disk *mpt_disk;
1261 mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1262 mpt_disk->flags |= MPT_RDF_REFERENCED;
1263 if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1264 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1266 mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1269 mpt_disk->flags |= MPT_RDF_ACTIVE;
1274 * Refresh volume data.
1276 len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1277 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1278 &mpt->ioc_page2->Header, len,
1279 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1281 mpt_prt(mpt, "mpt_refresh_raid_data: "
1282 "Failed to read IOC Page 2\n");
1286 ioc_vol = mpt->ioc_page2->RaidVolume;
1287 ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1288 for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1289 struct mpt_raid_volume *mpt_vol;
1291 mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1292 mpt_vol->flags |= MPT_RVF_REFERENCED;
1293 vol_pg = mpt_vol->config_page;
1296 if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1297 != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1298 || (vol_pg->VolumeStatus.Flags
1299 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1301 mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1303 mpt_vol->flags |= MPT_RVF_ACTIVE;
1306 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1307 struct mpt_raid_volume *mpt_vol;
1313 mpt_vol = &mpt->raid_volumes[i];
1315 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1318 vol_pg = mpt_vol->config_page;
1319 if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1320 == MPT_RVF_ANNOUNCED) {
1321 mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1326 if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1328 mpt_announce_vol(mpt, mpt_vol);
1329 mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1332 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1335 mpt_vol->flags |= MPT_RVF_UP2DATE;
1336 mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1337 mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1338 mpt_verify_mwce(mpt, mpt_vol);
1340 if (vol_pg->VolumeStatus.Flags == 0)
1343 mpt_vol_prt(mpt, mpt_vol, "Status (");
1344 for (m = 1; m <= 0x80; m <<= 1) {
1345 switch (vol_pg->VolumeStatus.Flags & m) {
1346 case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1347 mpt_prtc(mpt, " Enabled");
1349 case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1350 mpt_prtc(mpt, " Quiesced");
1352 case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1353 mpt_prtc(mpt, " Re-Syncing");
1355 case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1356 mpt_prtc(mpt, " Inactive");
1362 mpt_prtc(mpt, " )\n");
1364 if ((vol_pg->VolumeStatus.Flags
1365 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1368 mpt_verify_resync_rate(mpt, mpt_vol);
1370 left = u64toh(mpt_vol->sync_progress.BlocksRemaining);
1371 total = u64toh(mpt_vol->sync_progress.TotalBlocks);
1372 if (vol_pg->ResyncRate != 0) {
1374 prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1375 mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1376 prio / 1000, prio % 1000);
1378 prio = vol_pg->VolumeSettings.Settings
1379 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1380 mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1381 prio ? "High" : "Low");
1383 mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1384 "blocks remaining\n", (uintmax_t)left,
1387 /* Periodically report on sync progress. */
1388 mpt_schedule_raid_refresh(mpt);
1391 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1392 struct mpt_raid_disk *mpt_disk;
1393 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1396 mpt_disk = &mpt->raid_disks[i];
1397 disk_pg = &mpt_disk->config_page;
1399 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1402 if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1403 == MPT_RDF_ANNOUNCED) {
1404 mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1405 mpt_disk->flags = 0;
1410 if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1412 mpt_announce_disk(mpt, mpt_disk);
1413 mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1416 if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1419 mpt_disk->flags |= MPT_RDF_UP2DATE;
1420 mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1421 if (disk_pg->PhysDiskStatus.Flags == 0)
1424 mpt_disk_prt(mpt, mpt_disk, "Status (");
1425 for (m = 1; m <= 0x80; m <<= 1) {
1426 switch (disk_pg->PhysDiskStatus.Flags & m) {
1427 case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1428 mpt_prtc(mpt, " Out-Of-Sync");
1430 case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1431 mpt_prtc(mpt, " Quiesced");
1437 mpt_prtc(mpt, " )\n");
1442 mpt_raid_timer(void *arg)
1444 struct mpt_softc *mpt;
1446 mpt = (struct mpt_softc *)arg;
1448 mpt_raid_wakeup(mpt);
1453 mpt_raid_quiesce_timeout(void *arg)
1455 /* Complete the CCB with error */
1460 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1462 callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1463 mpt_raid_timer, mpt);
1467 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1469 struct mpt_raid_volume *mpt_vol;
1471 if ((rate > MPT_RAID_RESYNC_RATE_MAX
1472 || rate < MPT_RAID_RESYNC_RATE_MIN)
1473 && rate != MPT_RAID_RESYNC_RATE_NC)
1477 mpt->raid_resync_rate = rate;
1478 RAID_VOL_FOREACH(mpt, mpt_vol) {
1479 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1481 mpt_verify_resync_rate(mpt, mpt_vol);
1488 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1490 struct mpt_raid_volume *mpt_vol;
1492 if (vol_queue_depth > 255
1493 || vol_queue_depth < 1)
1497 mpt->raid_queue_depth = vol_queue_depth;
1498 RAID_VOL_FOREACH(mpt, mpt_vol) {
1499 struct cam_path *path;
1502 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1505 mpt->raid_rescan = 0;
1507 error = xpt_create_path(&path, xpt_periph,
1508 cam_sim_path(mpt->sim),
1509 mpt_vol->config_page->VolumeID,
1511 if (error != CAM_REQ_CMP) {
1512 mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1515 mpt_adjust_queue_depth(mpt, mpt_vol, path);
1516 xpt_free_path(path);
1523 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1525 struct mpt_raid_volume *mpt_vol;
1526 int force_full_resync;
1529 if (mwce == mpt->raid_mwce_setting) {
1535 * Catch MWCE being left on due to a failed shutdown. Since
1536 * sysctls cannot be set by the loader, we treat the first
1537 * setting of this varible specially and force a full volume
1538 * resync if MWCE is enabled and a resync is in progress.
1540 force_full_resync = 0;
1541 if (mpt->raid_mwce_set == 0
1542 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1543 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1544 force_full_resync = 1;
1546 mpt->raid_mwce_setting = mwce;
1547 RAID_VOL_FOREACH(mpt, mpt_vol) {
1548 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1552 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1555 vol_pg = mpt_vol->config_page;
1556 resyncing = vol_pg->VolumeStatus.Flags
1557 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1558 mwce = vol_pg->VolumeSettings.Settings
1559 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1560 if (force_full_resync && resyncing && mwce) {
1563 * XXX disable/enable volume should force a resync,
1564 * but we'll need to queice, drain, and restart
1567 mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1568 "detected. Suggest full resync.\n");
1570 mpt_verify_mwce(mpt, mpt_vol);
1572 mpt->raid_mwce_set = 1;
1577 const char *mpt_vol_mwce_strs[] =
1581 "On-During-Rebuild",
1586 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1589 struct mpt_softc *mpt;
1596 mpt = (struct mpt_softc *)arg1;
1597 str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1598 error = SYSCTL_OUT(req, str, strlen(str) + 1);
1599 if (error || !req->newptr)
1602 size = req->newlen - req->newidx;
1603 if (size >= sizeof(inbuf))
1606 error = SYSCTL_IN(req, inbuf, size);
1610 for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1612 if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0)
1613 return (mpt_raid_set_vol_mwce(mpt, i));
1619 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1621 struct mpt_softc *mpt;
1622 u_int raid_resync_rate;
1626 mpt = (struct mpt_softc *)arg1;
1627 raid_resync_rate = mpt->raid_resync_rate;
1629 error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1630 if (error || !req->newptr)
1633 return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1637 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1639 struct mpt_softc *mpt;
1640 u_int raid_queue_depth;
1644 mpt = (struct mpt_softc *)arg1;
1645 raid_queue_depth = mpt->raid_queue_depth;
1647 error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1648 if (error || !req->newptr)
1651 return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1655 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1657 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1658 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1660 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1661 "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1662 mpt_raid_sysctl_vol_member_wce, "A",
1663 "volume member WCE(On,Off,On-During-Rebuild,NC)");
1665 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1666 "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1667 mpt_raid_sysctl_vol_queue_depth, "I",
1668 "default volume queue depth");
1670 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1671 "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1672 mpt_raid_sysctl_vol_resync_rate, "I",
1673 "volume resync priority (0 == NC, 1 - 255)");