2 * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
4 * Copyright (c) 2005, WHEEL Sp. z o.o.
5 * Copyright (c) 2005 Justin T. Gibbs.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon including
16 * a substantially similar Disclaimer requirement for further binary
18 * 3. Neither the names of the above listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Some Breakage and Bug Fixing added later.
36 * Copyright (c) 2006, by Matthew Jacob
39 * Support from LSI-Logic has also gone a great deal toward making this a
40 * workable subsystem and is gratefully acknowledged.
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
46 #include <dev/mpt/mpt.h>
47 #include <dev/mpt/mpt_raid.h>
49 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
50 #include "dev/mpt/mpilib/mpi_raid.h"
53 #include <cam/cam_ccb.h>
54 #include <cam/cam_sim.h>
55 #include <cam/cam_xpt_sim.h>
57 #if __FreeBSD_version < 500000
58 #include <sys/devicestat.h>
59 #define GIANT_REQUIRED
61 #include <cam/cam_periph.h>
63 #include <sys/callout.h>
64 #include <sys/kthread.h>
65 #include <sys/sysctl.h>
67 #include <machine/stdarg.h>
69 struct mpt_raid_action_result
72 MPI_RAID_VOL_INDICATOR indicator_struct;
73 uint32_t new_settings;
74 uint8_t phys_disk_num;
76 uint16_t action_status;
79 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
80 (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
82 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
85 static mpt_probe_handler_t mpt_raid_probe;
86 static mpt_attach_handler_t mpt_raid_attach;
87 static mpt_enable_handler_t mpt_raid_enable;
88 static mpt_event_handler_t mpt_raid_event;
89 static mpt_shutdown_handler_t mpt_raid_shutdown;
90 static mpt_reset_handler_t mpt_raid_ioc_reset;
91 static mpt_detach_handler_t mpt_raid_detach;
93 static struct mpt_personality mpt_raid_personality =
96 .probe = mpt_raid_probe,
97 .attach = mpt_raid_attach,
98 .enable = mpt_raid_enable,
99 .event = mpt_raid_event,
100 .reset = mpt_raid_ioc_reset,
101 .shutdown = mpt_raid_shutdown,
102 .detach = mpt_raid_detach,
105 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
106 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
108 static mpt_reply_handler_t mpt_raid_reply_handler;
109 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
110 MSG_DEFAULT_REPLY *reply_frame);
111 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
112 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
113 static void mpt_raid_thread(void *arg);
114 static timeout_t mpt_raid_timer;
116 static void mpt_enable_vol(struct mpt_softc *mpt,
117 struct mpt_raid_volume *mpt_vol, int enable);
119 static void mpt_verify_mwce(struct mpt_softc *mpt,
120 struct mpt_raid_volume *mpt_vol);
121 static void mpt_adjust_queue_depth(struct mpt_softc *mpt,
122 struct mpt_raid_volume *mpt_vol,
123 struct cam_path *path);
124 static void mpt_raid_sysctl_attach(struct mpt_softc *mpt);
126 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
129 mpt_vol_type(struct mpt_raid_volume *vol)
131 switch (vol->config_page->VolumeType) {
132 case MPI_RAID_VOL_TYPE_IS:
134 case MPI_RAID_VOL_TYPE_IME:
136 case MPI_RAID_VOL_TYPE_IM:
144 mpt_vol_state(struct mpt_raid_volume *vol)
146 switch (vol->config_page->VolumeStatus.State) {
147 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
149 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
151 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
159 mpt_disk_state(struct mpt_raid_disk *disk)
161 switch (disk->config_page.PhysDiskStatus.State) {
162 case MPI_PHYSDISK0_STATUS_ONLINE:
164 case MPI_PHYSDISK0_STATUS_MISSING:
166 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
167 return ("Incompatible");
168 case MPI_PHYSDISK0_STATUS_FAILED:
170 case MPI_PHYSDISK0_STATUS_INITIALIZING:
171 return ("Initializing");
172 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
173 return ("Offline Requested");
174 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
175 return ("Failed per Host Request");
176 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
184 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
185 const char *fmt, ...)
189 printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
190 (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
191 vol->config_page->VolumeBus, vol->config_page->VolumeID);
198 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
199 const char *fmt, ...)
203 if (disk->volume != NULL) {
204 printf("(%s:vol%d:%d): ",
205 device_get_nameunit(mpt->dev),
206 disk->volume->config_page->VolumeID,
207 disk->member_number);
209 printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
210 disk->config_page.PhysDiskBus,
211 disk->config_page.PhysDiskID);
219 mpt_raid_async(void *callback_arg, u_int32_t code,
220 struct cam_path *path, void *arg)
222 struct mpt_softc *mpt;
224 mpt = (struct mpt_softc*)callback_arg;
226 case AC_FOUND_DEVICE:
228 struct ccb_getdev *cgd;
229 struct mpt_raid_volume *mpt_vol;
231 cgd = (struct ccb_getdev *)arg;
236 mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
237 cgd->ccb_h.target_id);
239 RAID_VOL_FOREACH(mpt, mpt_vol) {
240 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
243 if (mpt_vol->config_page->VolumeID
244 == cgd->ccb_h.target_id) {
245 mpt_adjust_queue_depth(mpt, mpt_vol, path);
256 mpt_raid_probe(struct mpt_softc *mpt)
258 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
265 mpt_raid_attach(struct mpt_softc *mpt)
267 struct ccb_setasync csa;
268 mpt_handler_t handler;
271 mpt_callout_init(&mpt->raid_timer);
273 handler.reply_handler = mpt_raid_reply_handler;
274 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
277 mpt_prt(mpt, "Unable to register RAID haandler!\n");
281 error = mpt_spawn_raid_thread(mpt);
283 mpt_prt(mpt, "Unable to spawn RAID thread!\n");
287 xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
288 csa.ccb_h.func_code = XPT_SASYNC_CB;
289 csa.event_enable = AC_FOUND_DEVICE;
290 csa.callback = mpt_raid_async;
291 csa.callback_arg = mpt;
292 MPTLOCK_2_CAMLOCK(mpt);
293 xpt_action((union ccb *)&csa);
294 CAMLOCK_2_MPTLOCK(mpt);
295 if (csa.ccb_h.status != CAM_REQ_CMP) {
296 mpt_prt(mpt, "mpt_raid_attach: Unable to register "
297 "CAM async handler.\n");
300 mpt_raid_sysctl_attach(mpt);
303 mpt_raid_detach(mpt);
308 mpt_raid_enable(struct mpt_softc *mpt)
314 mpt_raid_detach(struct mpt_softc *mpt)
316 struct ccb_setasync csa;
317 mpt_handler_t handler;
319 callout_stop(&mpt->raid_timer);
320 mpt_terminate_raid_thread(mpt);
322 handler.reply_handler = mpt_raid_reply_handler;
323 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
325 xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
326 csa.ccb_h.func_code = XPT_SASYNC_CB;
327 csa.event_enable = 0;
328 csa.callback = mpt_raid_async;
329 csa.callback_arg = mpt;
330 MPTLOCK_2_CAMLOCK(mpt);
331 xpt_action((union ccb *)&csa);
332 CAMLOCK_2_MPTLOCK(mpt);
336 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
338 /* Nothing to do yet. */
341 static const char *raid_event_txt[] =
345 "Volume Settings Changed",
346 "Volume Status Changed",
347 "Volume Physical Disk Membership Changed",
348 "Physical Disk Created",
349 "Physical Disk Deleted",
350 "Physical Disk Settings Changed",
351 "Physical Disk Status Changed",
352 "Domain Validation Required",
353 "SMART Data Received",
354 "Replace Action Started",
358 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
359 MSG_EVENT_NOTIFY_REPLY *msg)
361 EVENT_DATA_RAID *raid_event;
362 struct mpt_raid_volume *mpt_vol;
363 struct mpt_raid_disk *mpt_disk;
364 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
368 if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
372 raid_event = (EVENT_DATA_RAID *)&msg->Data;
376 if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
377 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
378 mpt_vol = &mpt->raid_volumes[i];
379 vol_pg = mpt_vol->config_page;
381 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
384 if (vol_pg->VolumeID == raid_event->VolumeID
385 && vol_pg->VolumeBus == raid_event->VolumeBus)
388 if (i >= mpt->ioc_page2->MaxVolumes) {
395 if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
396 mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
397 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
403 switch(raid_event->ReasonCode) {
404 case MPI_EVENT_RAID_RC_VOLUME_CREATED:
405 case MPI_EVENT_RAID_RC_VOLUME_DELETED:
407 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
408 if (mpt_vol != NULL) {
409 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
410 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
413 * Coalesce status messages into one
414 * per background run of our RAID thread.
415 * This removes "spurious" status messages
422 case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
423 case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
425 if (mpt_vol != NULL) {
426 mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
429 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
430 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
433 case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
434 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
436 if (mpt_disk != NULL) {
437 mpt_disk->flags &= ~MPT_RDF_UP2DATE;
440 case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
443 case MPI_EVENT_RAID_RC_SMART_DATA:
444 case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
449 if (mpt_disk != NULL) {
450 mpt_disk_prt(mpt, mpt_disk, "");
451 } else if (mpt_vol != NULL) {
452 mpt_vol_prt(mpt, mpt_vol, "");
454 mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
455 raid_event->VolumeID);
457 if (raid_event->PhysDiskNum != 0xFF)
458 mpt_prtc(mpt, ":%d): ",
459 raid_event->PhysDiskNum);
461 mpt_prtc(mpt, "): ");
464 if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
465 mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
466 raid_event->ReasonCode);
468 mpt_prtc(mpt, "%s\n",
469 raid_event_txt[raid_event->ReasonCode]);
472 if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
473 /* XXX Use CAM's print sense for this... */
474 if (mpt_disk != NULL)
475 mpt_disk_prt(mpt, mpt_disk, "");
477 mpt_prt(mpt, "Volume(%d:%d:%d: ",
478 raid_event->VolumeBus, raid_event->VolumeID,
479 raid_event->PhysDiskNum);
480 mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
481 raid_event->ASC, raid_event->ASCQ);
484 mpt_raid_wakeup(mpt);
489 mpt_raid_shutdown(struct mpt_softc *mpt)
491 struct mpt_raid_volume *mpt_vol;
493 if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
497 mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
498 RAID_VOL_FOREACH(mpt, mpt_vol) {
499 mpt_verify_mwce(mpt, mpt_vol);
504 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
505 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
513 if (reply_frame != NULL)
514 free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
516 else if (req->ccb != NULL) {
517 /* Complete Quiesce CCB with error... */
521 req->state &= ~REQ_STATE_QUEUED;
522 req->state |= REQ_STATE_DONE;
523 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
525 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
527 } else if (free_req) {
528 mpt_free_request(mpt, req);
535 * Parse additional completion information in the reply
536 * frame for RAID I/O requests.
539 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
540 MSG_DEFAULT_REPLY *reply_frame)
542 MSG_RAID_ACTION_REPLY *reply;
543 struct mpt_raid_action_result *action_result;
544 MSG_RAID_ACTION_REQUEST *rap;
546 reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
547 req->IOCStatus = le16toh(reply->IOCStatus);
548 rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
550 switch (rap->Action) {
551 case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
552 mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
554 case MPI_RAID_ACTION_ENABLE_PHYS_IO:
555 mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
560 action_result = REQ_TO_RAID_ACTION_RESULT(req);
561 memcpy(&action_result->action_data, &reply->ActionData,
562 sizeof(action_result->action_data));
563 action_result->action_status = reply->ActionStatus;
568 * Utiltity routine to perform a RAID action command;
571 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
572 struct mpt_raid_disk *disk, request_t *req, u_int Action,
573 uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
576 MSG_RAID_ACTION_REQUEST *rap;
580 memset(rap, 0, sizeof *rap);
581 rap->Action = Action;
582 rap->ActionDataWord = ActionDataWord;
583 rap->Function = MPI_FUNCTION_RAID_ACTION;
584 rap->VolumeID = vol->config_page->VolumeID;
585 rap->VolumeBus = vol->config_page->VolumeBus;
587 rap->PhysDiskNum = disk->config_page.PhysDiskNum;
589 rap->PhysDiskNum = 0xFF;
590 se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
592 MPI_pSGE_SET_LENGTH(se, len);
593 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
594 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
595 MPI_SGE_FLAGS_END_OF_LIST |
596 write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
597 rap->MsgContext = htole32(req->index | raid_handler_id);
599 mpt_check_doorbell(mpt);
600 mpt_send_cmd(mpt, req);
603 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
604 /*sleep_ok*/FALSE, /*time_ms*/2000));
610 /*************************** RAID Status Monitoring ***************************/
612 mpt_spawn_raid_thread(struct mpt_softc *mpt)
617 * Freeze out any CAM transactions until our thread
618 * is able to run at least once. We need to update
619 * our RAID pages before acception I/O or we may
620 * reject I/O to an ID we later determine is for a
623 xpt_freeze_simq(mpt->phydisk_sim, 1);
624 error = mpt_kthread_create(mpt_raid_thread, mpt,
625 &mpt->raid_thread, /*flags*/0, /*altstack*/0,
626 "mpt_raid%d", mpt->unit);
628 xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
633 mpt_terminate_raid_thread(struct mpt_softc *mpt)
636 if (mpt->raid_thread == NULL) {
639 mpt->shutdwn_raid = 1;
640 wakeup(mpt->raid_volumes);
642 * Sleep on a slightly different location
643 * for this interlock just for added safety.
645 mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
649 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
651 xpt_free_path(ccb->ccb_h.path);
656 mpt_raid_thread(void *arg)
658 struct mpt_softc *mpt;
661 #if __FreeBSD_version >= 500000
664 mpt = (struct mpt_softc *)arg;
667 while (mpt->shutdwn_raid == 0) {
669 if (mpt->raid_wakeup == 0) {
670 mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
674 mpt->raid_wakeup = 0;
676 if (mpt_refresh_raid_data(mpt)) {
677 mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */
682 * Now that we have our first snapshot of RAID data,
683 * allow CAM to access our physical disk bus.
687 MPTLOCK_2_CAMLOCK(mpt);
688 xpt_release_simq(mpt->phydisk_sim, TRUE);
689 CAMLOCK_2_MPTLOCK(mpt);
692 if (mpt->raid_rescan != 0) {
694 struct cam_path *path;
697 mpt->raid_rescan = 0;
699 ccb = malloc(sizeof(*ccb), M_DEVBUF, M_WAITOK);
700 error = xpt_create_path(&path, xpt_periph,
701 cam_sim_path(mpt->phydisk_sim),
702 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
703 if (error != CAM_REQ_CMP) {
705 mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
707 xpt_setup_ccb(&ccb->ccb_h, path, 5);
708 ccb->ccb_h.func_code = XPT_SCAN_BUS;
709 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
710 ccb->crcn.flags = CAM_FLAG_NONE;
711 MPTLOCK_2_CAMLOCK(mpt);
713 CAMLOCK_2_MPTLOCK(mpt);
717 mpt->raid_thread = NULL;
718 wakeup(&mpt->raid_thread);
720 #if __FreeBSD_version >= 500000
728 mpt_raid_quiesce_timeout(void *arg)
730 /* Complete the CCB with error */
734 static timeout_t mpt_raid_quiesce_timeout;
736 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
742 if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
743 return (CAM_REQ_CMP);
745 if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
748 mpt_disk->flags |= MPT_RDF_QUIESCING;
749 xpt_freeze_devq(ccb->ccb_h.path, 1);
751 rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
752 MPI_RAID_ACTION_QUIESCE_PHYS_IO,
753 /*ActionData*/0, /*addr*/0,
754 /*len*/0, /*write*/FALSE,
757 return (CAM_REQ_CMP_ERR);
759 ccb->ccb_h.timeout_ch =
760 timeout(mpt_raid_quiesce_timeout, (caddr_t)ccb, 5 * hz);
762 if (rv == ETIMEDOUT) {
763 mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
764 "Quiece Timed-out\n");
765 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
766 return (CAM_REQ_CMP_ERR);
769 ar = REQ_TO_RAID_ACTION_RESULT(req);
771 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
772 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
773 mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
774 "%d:%x:%x\n", rv, req->IOCStatus,
776 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
777 return (CAM_REQ_CMP_ERR);
780 return (CAM_REQ_INPROG);
782 return (CAM_REQUEUE_REQ);
786 /* XXX Ignores that there may be multiple busses/IOCs involved. */
788 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
790 struct mpt_raid_disk *mpt_disk;
792 mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
793 if (ccb->ccb_h.target_id < mpt->raid_max_disks
794 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
796 *tgt = mpt_disk->config_page.PhysDiskID;
799 mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
800 ccb->ccb_h.target_id);
804 /* XXX Ignores that there may be multiple busses/IOCs involved. */
806 mpt_is_raid_volume(struct mpt_softc *mpt, int tgt)
808 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
809 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
811 ioc_vol = mpt->ioc_page2->RaidVolume;
812 ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
813 for (;ioc_vol != ioc_last_vol; ioc_vol++) {
814 if (ioc_vol->VolumeID == tgt) {
823 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
827 struct mpt_raid_action_result *ar;
828 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
832 vol_pg = mpt_vol->config_page;
833 enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
836 * If the setting matches the configuration,
837 * there is nothing to do.
839 if ((enabled && enable)
840 || (!enabled && !enable))
843 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
845 mpt_vol_prt(mpt, mpt_vol,
846 "mpt_enable_vol: Get request failed!\n");
850 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
851 enable ? MPI_RAID_ACTION_ENABLE_VOLUME
852 : MPI_RAID_ACTION_DISABLE_VOLUME,
853 /*data*/0, /*addr*/0, /*len*/0,
854 /*write*/FALSE, /*wait*/TRUE);
855 if (rv == ETIMEDOUT) {
856 mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
857 "%s Volume Timed-out\n",
858 enable ? "Enable" : "Disable");
861 ar = REQ_TO_RAID_ACTION_RESULT(req);
863 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
864 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
865 mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
866 enable ? "Enable" : "Disable",
867 rv, req->IOCStatus, ar->action_status);
870 mpt_free_request(mpt, req);
875 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
878 struct mpt_raid_action_result *ar;
879 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
885 vol_pg = mpt_vol->config_page;
886 resyncing = vol_pg->VolumeStatus.Flags
887 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
888 mwce = vol_pg->VolumeSettings.Settings
889 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
892 * If the setting matches the configuration,
893 * there is nothing to do.
895 switch (mpt->raid_mwce_setting) {
896 case MPT_RAID_MWCE_REBUILD_ONLY:
897 if ((resyncing && mwce) || (!resyncing && !mwce)) {
900 mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
901 if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
903 * Wait one more status update to see if
904 * resyncing gets enabled. It gets disabled
905 * temporarilly when WCE is changed.
910 case MPT_RAID_MWCE_ON:
914 case MPT_RAID_MWCE_OFF:
918 case MPT_RAID_MWCE_NC:
922 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
924 mpt_vol_prt(mpt, mpt_vol,
925 "mpt_verify_mwce: Get request failed!\n");
929 vol_pg->VolumeSettings.Settings ^=
930 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
931 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
932 vol_pg->VolumeSettings.Settings ^=
933 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
934 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
935 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
936 data, /*addr*/0, /*len*/0,
937 /*write*/FALSE, /*wait*/TRUE);
938 if (rv == ETIMEDOUT) {
939 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
940 "Write Cache Enable Timed-out\n");
943 ar = REQ_TO_RAID_ACTION_RESULT(req);
945 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
946 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
947 mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
948 "%d:%x:%x\n", rv, req->IOCStatus,
951 vol_pg->VolumeSettings.Settings ^=
952 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
954 mpt_free_request(mpt, req);
958 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
961 struct mpt_raid_action_result *ar;
962 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
966 vol_pg = mpt_vol->config_page;
968 if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
972 * If the current RAID resync rate does not
973 * match our configured rate, update it.
975 prio = vol_pg->VolumeSettings.Settings
976 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
977 if (vol_pg->ResyncRate != 0
978 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
980 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
982 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
983 "Get request failed!\n");
987 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
988 MPI_RAID_ACTION_SET_RESYNC_RATE,
989 mpt->raid_resync_rate, /*addr*/0,
990 /*len*/0, /*write*/FALSE, /*wait*/TRUE);
991 if (rv == ETIMEDOUT) {
992 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
993 "Resync Rate Setting Timed-out\n");
997 ar = REQ_TO_RAID_ACTION_RESULT(req);
999 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1000 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1001 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1002 "%d:%x:%x\n", rv, req->IOCStatus,
1005 vol_pg->ResyncRate = mpt->raid_resync_rate;
1006 mpt_free_request(mpt, req);
1007 } else if ((prio && mpt->raid_resync_rate < 128)
1008 || (!prio && mpt->raid_resync_rate >= 128)) {
1011 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1013 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1014 "Get request failed!\n");
1018 vol_pg->VolumeSettings.Settings ^=
1019 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1020 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1021 vol_pg->VolumeSettings.Settings ^=
1022 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1023 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1024 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1025 data, /*addr*/0, /*len*/0,
1026 /*write*/FALSE, /*wait*/TRUE);
1027 if (rv == ETIMEDOUT) {
1028 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1029 "Resync Rate Setting Timed-out\n");
1032 ar = REQ_TO_RAID_ACTION_RESULT(req);
1034 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1035 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1036 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1037 "%d:%x:%x\n", rv, req->IOCStatus,
1040 vol_pg->VolumeSettings.Settings ^=
1041 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1044 mpt_free_request(mpt, req);
1049 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1050 struct cam_path *path)
1052 struct ccb_relsim crs;
1054 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1055 crs.ccb_h.func_code = XPT_REL_SIMQ;
1056 crs.release_flags = RELSIM_ADJUST_OPENINGS;
1057 crs.openings = mpt->raid_queue_depth;
1058 xpt_action((union ccb *)&crs);
1059 if (crs.ccb_h.status != CAM_REQ_CMP)
1060 mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1061 "with CAM status %#x\n", crs.ccb_h.status);
1065 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1067 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1070 vol_pg = mpt_vol->config_page;
1071 mpt_vol_prt(mpt, mpt_vol, "Settings (");
1072 for (i = 1; i <= 0x8000; i <<= 1) {
1073 switch (vol_pg->VolumeSettings.Settings & i) {
1074 case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1075 mpt_prtc(mpt, " Member-WCE");
1077 case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1078 mpt_prtc(mpt, " Offline-On-SMART-Err");
1080 case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1081 mpt_prtc(mpt, " Hot-Plug-Spares");
1083 case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1084 mpt_prtc(mpt, " High-Priority-ReSync");
1090 mpt_prtc(mpt, " )\n");
1091 if (vol_pg->VolumeSettings.HotSparePool != 0) {
1092 mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1093 powerof2(vol_pg->VolumeSettings.HotSparePool)
1095 for (i = 0; i < 8; i++) {
1099 if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1101 mpt_prtc(mpt, " %d", i);
1103 mpt_prtc(mpt, "\n");
1105 mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1106 for (i = 0; i < vol_pg->NumPhysDisks; i++){
1107 struct mpt_raid_disk *mpt_disk;
1108 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1110 mpt_disk = mpt->raid_disks
1111 + vol_pg->PhysDisk[i].PhysDiskNum;
1112 disk_pg = &mpt_disk->config_page;
1114 mpt_prtc(mpt, "(%s:%d:%d): ", device_get_nameunit(mpt->dev),
1115 disk_pg->PhysDiskBus, disk_pg->PhysDiskID);
1116 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM)
1117 mpt_prtc(mpt, "%s\n",
1118 mpt_disk->member_number == 0
1119 ? "Primary" : "Secondary");
1121 mpt_prtc(mpt, "Stripe Position %d\n",
1122 mpt_disk->member_number);
1127 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1129 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1132 disk_pg = &mpt_disk->config_page;
1133 mpt_disk_prt(mpt, mpt_disk,
1134 "Physical (%s:%d:%d), Pass-thru (%s:%d:%d)\n",
1135 device_get_nameunit(mpt->dev), disk_pg->PhysDiskBus,
1136 disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1137 /*bus*/1, mpt_disk - mpt->raid_disks);
1139 if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1141 mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1142 powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1144 for (i = 0; i < 8; i++) {
1148 if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1150 mpt_prtc(mpt, " %d", i);
1152 mpt_prtc(mpt, "\n");
1156 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1157 IOC_3_PHYS_DISK *ioc_disk)
1161 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1162 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1163 &mpt_disk->config_page.Header,
1164 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1166 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1167 "Failed to read RAID Disk Hdr(%d)\n",
1168 ioc_disk->PhysDiskNum);
1171 rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1172 &mpt_disk->config_page.Header,
1173 sizeof(mpt_disk->config_page),
1174 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1176 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1177 "Failed to read RAID Disk Page(%d)\n",
1178 ioc_disk->PhysDiskNum);
1182 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1183 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1185 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1186 struct mpt_raid_action_result *ar;
1191 vol_pg = mpt_vol->config_page;
1192 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1193 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME,
1194 /*PageNumber*/0, ioc_vol->VolumePageNumber,
1195 &vol_pg->Header, /*sleep_ok*/TRUE,
1196 /*timeout_ms*/5000);
1198 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1199 "Failed to read RAID Vol Hdr(%d)\n",
1200 ioc_vol->VolumePageNumber);
1203 rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1204 &vol_pg->Header, mpt->raid_page0_len,
1205 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1207 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1208 "Failed to read RAID Vol Page(%d)\n",
1209 ioc_vol->VolumePageNumber);
1212 mpt_vol->flags |= MPT_RVF_ACTIVE;
1214 /* Update disk entry array data. */
1215 for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1216 struct mpt_raid_disk *mpt_disk;
1218 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1219 mpt_disk->volume = mpt_vol;
1220 mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1221 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM)
1222 mpt_disk->member_number--;
1225 if ((vol_pg->VolumeStatus.Flags
1226 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1229 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1231 mpt_vol_prt(mpt, mpt_vol,
1232 "mpt_refresh_raid_vol: Get request failed!\n");
1235 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1236 MPI_RAID_ACTION_INDICATOR_STRUCT,
1237 /*ActionWord*/0, /*addr*/0, /*len*/0,
1238 /*write*/FALSE, /*wait*/TRUE);
1239 if (rv == ETIMEDOUT) {
1240 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1241 "Progress indicator fetch timedout!\n");
1245 ar = REQ_TO_RAID_ACTION_RESULT(req);
1247 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1248 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1249 memcpy(&mpt_vol->sync_progress,
1250 &ar->action_data.indicator_struct,
1251 sizeof(mpt_vol->sync_progress));
1253 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1254 "Progress indicator fetch failed!\n");
1256 mpt_free_request(mpt, req);
1260 * Update in-core information about RAID support. We update any entries
1261 * that didn't previously exists or have been marked as needing to
1262 * be updated by our event handler. Interesting changes are displayed
1266 mpt_refresh_raid_data(struct mpt_softc *mpt)
1268 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1269 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1270 IOC_3_PHYS_DISK *ioc_disk;
1271 IOC_3_PHYS_DISK *ioc_last_disk;
1272 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1276 u_int nonopt_volumes;
1278 if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1283 * Mark all items as unreferenced by the configuration.
1284 * This allows us to find, report, and discard stale
1287 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1288 mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1290 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1291 mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1295 * Get Physical Disk information.
1297 len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1298 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1299 &mpt->ioc_page3->Header, len,
1300 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1303 "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1307 ioc_disk = mpt->ioc_page3->PhysDisk;
1308 ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1309 for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1310 struct mpt_raid_disk *mpt_disk;
1312 mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1313 mpt_disk->flags |= MPT_RDF_REFERENCED;
1314 if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1315 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1317 mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1320 mpt_disk->flags |= MPT_RDF_ACTIVE;
1325 * Refresh volume data.
1327 len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1328 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1329 &mpt->ioc_page2->Header, len,
1330 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1332 mpt_prt(mpt, "mpt_refresh_raid_data: "
1333 "Failed to read IOC Page 2\n");
1337 ioc_vol = mpt->ioc_page2->RaidVolume;
1338 ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1339 for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1340 struct mpt_raid_volume *mpt_vol;
1342 mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1343 mpt_vol->flags |= MPT_RVF_REFERENCED;
1344 vol_pg = mpt_vol->config_page;
1347 if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1348 != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1349 || (vol_pg->VolumeStatus.Flags
1350 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1352 mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1354 mpt_vol->flags |= MPT_RVF_ACTIVE;
1358 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1359 struct mpt_raid_volume *mpt_vol;
1365 mpt_vol = &mpt->raid_volumes[i];
1367 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1370 vol_pg = mpt_vol->config_page;
1371 if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1372 == MPT_RVF_ANNOUNCED) {
1373 mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1378 if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1380 mpt_announce_vol(mpt, mpt_vol);
1381 mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1384 if (vol_pg->VolumeStatus.State !=
1385 MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1388 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1391 mpt_vol->flags |= MPT_RVF_UP2DATE;
1392 mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1393 mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1394 mpt_verify_mwce(mpt, mpt_vol);
1396 if (vol_pg->VolumeStatus.Flags == 0)
1399 mpt_vol_prt(mpt, mpt_vol, "Status (");
1400 for (m = 1; m <= 0x80; m <<= 1) {
1401 switch (vol_pg->VolumeStatus.Flags & m) {
1402 case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1403 mpt_prtc(mpt, " Enabled");
1405 case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1406 mpt_prtc(mpt, " Quiesced");
1408 case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1409 mpt_prtc(mpt, " Re-Syncing");
1411 case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1412 mpt_prtc(mpt, " Inactive");
1418 mpt_prtc(mpt, " )\n");
1420 if ((vol_pg->VolumeStatus.Flags
1421 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1424 mpt_verify_resync_rate(mpt, mpt_vol);
1426 left = u64toh(mpt_vol->sync_progress.BlocksRemaining);
1427 total = u64toh(mpt_vol->sync_progress.TotalBlocks);
1428 if (vol_pg->ResyncRate != 0) {
1430 prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1431 mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1432 prio / 1000, prio % 1000);
1434 prio = vol_pg->VolumeSettings.Settings
1435 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1436 mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1437 prio ? "High" : "Low");
1439 #if __FreeBSD_version >= 500000
1440 mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1441 "blocks remaining\n", (uintmax_t)left,
1444 mpt_vol_prt(mpt, mpt_vol, "%llu of %llu "
1445 "blocks remaining\n", (uint64_t)left,
1449 /* Periodically report on sync progress. */
1450 mpt_schedule_raid_refresh(mpt);
1453 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1454 struct mpt_raid_disk *mpt_disk;
1455 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1458 mpt_disk = &mpt->raid_disks[i];
1459 disk_pg = &mpt_disk->config_page;
1461 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1464 if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1465 == MPT_RDF_ANNOUNCED) {
1466 mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1467 mpt_disk->flags = 0;
1472 if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1474 mpt_announce_disk(mpt, mpt_disk);
1475 mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1478 if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1481 mpt_disk->flags |= MPT_RDF_UP2DATE;
1482 mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1483 if (disk_pg->PhysDiskStatus.Flags == 0)
1486 mpt_disk_prt(mpt, mpt_disk, "Status (");
1487 for (m = 1; m <= 0x80; m <<= 1) {
1488 switch (disk_pg->PhysDiskStatus.Flags & m) {
1489 case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1490 mpt_prtc(mpt, " Out-Of-Sync");
1492 case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1493 mpt_prtc(mpt, " Quiesced");
1499 mpt_prtc(mpt, " )\n");
1502 mpt->raid_nonopt_volumes = nonopt_volumes;
1507 mpt_raid_timer(void *arg)
1509 struct mpt_softc *mpt;
1511 mpt = (struct mpt_softc *)arg;
1513 mpt_raid_wakeup(mpt);
1518 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1520 callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1521 mpt_raid_timer, mpt);
1525 mpt_raid_free_mem(struct mpt_softc *mpt)
1528 if (mpt->raid_volumes) {
1529 struct mpt_raid_volume *mpt_raid;
1531 for (i = 0; i < mpt->raid_max_volumes; i++) {
1532 mpt_raid = &mpt->raid_volumes[i];
1533 if (mpt_raid->config_page) {
1534 free(mpt_raid->config_page, M_DEVBUF);
1535 mpt_raid->config_page = NULL;
1538 free(mpt->raid_volumes, M_DEVBUF);
1539 mpt->raid_volumes = NULL;
1541 if (mpt->raid_disks) {
1542 free(mpt->raid_disks, M_DEVBUF);
1543 mpt->raid_disks = NULL;
1545 if (mpt->ioc_page2) {
1546 free(mpt->ioc_page2, M_DEVBUF);
1547 mpt->ioc_page2 = NULL;
1549 if (mpt->ioc_page3) {
1550 free(mpt->ioc_page3, M_DEVBUF);
1551 mpt->ioc_page3 = NULL;
1553 mpt->raid_max_volumes = 0;
1554 mpt->raid_max_disks = 0;
1558 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1560 struct mpt_raid_volume *mpt_vol;
1562 if ((rate > MPT_RAID_RESYNC_RATE_MAX
1563 || rate < MPT_RAID_RESYNC_RATE_MIN)
1564 && rate != MPT_RAID_RESYNC_RATE_NC)
1568 mpt->raid_resync_rate = rate;
1569 RAID_VOL_FOREACH(mpt, mpt_vol) {
1570 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1573 mpt_verify_resync_rate(mpt, mpt_vol);
1580 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1582 struct mpt_raid_volume *mpt_vol;
1584 if (vol_queue_depth > 255 || vol_queue_depth < 1)
1588 mpt->raid_queue_depth = vol_queue_depth;
1589 RAID_VOL_FOREACH(mpt, mpt_vol) {
1590 struct cam_path *path;
1593 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1596 mpt->raid_rescan = 0;
1598 error = xpt_create_path(&path, xpt_periph,
1599 cam_sim_path(mpt->sim),
1600 mpt_vol->config_page->VolumeID,
1602 if (error != CAM_REQ_CMP) {
1603 mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1606 mpt_adjust_queue_depth(mpt, mpt_vol, path);
1607 xpt_free_path(path);
1614 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1616 struct mpt_raid_volume *mpt_vol;
1617 int force_full_resync;
1620 if (mwce == mpt->raid_mwce_setting) {
1626 * Catch MWCE being left on due to a failed shutdown. Since
1627 * sysctls cannot be set by the loader, we treat the first
1628 * setting of this varible specially and force a full volume
1629 * resync if MWCE is enabled and a resync is in progress.
1631 force_full_resync = 0;
1632 if (mpt->raid_mwce_set == 0
1633 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1634 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1635 force_full_resync = 1;
1637 mpt->raid_mwce_setting = mwce;
1638 RAID_VOL_FOREACH(mpt, mpt_vol) {
1639 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1643 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1646 vol_pg = mpt_vol->config_page;
1647 resyncing = vol_pg->VolumeStatus.Flags
1648 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1649 mwce = vol_pg->VolumeSettings.Settings
1650 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1651 if (force_full_resync && resyncing && mwce) {
1654 * XXX disable/enable volume should force a resync,
1655 * but we'll need to queice, drain, and restart
1658 mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1659 "detected. Suggest full resync.\n");
1661 mpt_verify_mwce(mpt, mpt_vol);
1663 mpt->raid_mwce_set = 1;
1668 const char *mpt_vol_mwce_strs[] =
1672 "On-During-Rebuild",
1677 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1680 struct mpt_softc *mpt;
1688 mpt = (struct mpt_softc *)arg1;
1689 str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1690 error = SYSCTL_OUT(req, str, strlen(str) + 1);
1691 if (error || !req->newptr) {
1695 size = req->newlen - req->newidx;
1696 if (size >= sizeof(inbuf)) {
1700 error = SYSCTL_IN(req, inbuf, size);
1705 for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1706 if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1707 return (mpt_raid_set_vol_mwce(mpt, i));
1714 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1716 struct mpt_softc *mpt;
1717 u_int raid_resync_rate;
1722 mpt = (struct mpt_softc *)arg1;
1723 raid_resync_rate = mpt->raid_resync_rate;
1725 error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1726 if (error || !req->newptr) {
1730 return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1734 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1736 struct mpt_softc *mpt;
1737 u_int raid_queue_depth;
1742 mpt = (struct mpt_softc *)arg1;
1743 raid_queue_depth = mpt->raid_queue_depth;
1745 error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1746 if (error || !req->newptr) {
1750 return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1754 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1756 #if __FreeBSD_version >= 500000
1757 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1758 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1760 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1761 "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1762 mpt_raid_sysctl_vol_member_wce, "A",
1763 "volume member WCE(On,Off,On-During-Rebuild,NC)");
1765 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1766 "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1767 mpt_raid_sysctl_vol_queue_depth, "I",
1768 "default volume queue depth");
1770 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1771 "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1772 mpt_raid_sysctl_vol_resync_rate, "I",
1773 "volume resync priority (0 == NC, 1 - 255)");
1774 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1775 "nonoptimal_volumes", CTLFLAG_RD,
1776 &mpt->raid_nonopt_volumes, 0,
1777 "number of nonoptimal volumes");