2 * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
4 * Copyright (c) 2005, WHEEL Sp. z o.o.
5 * Copyright (c) 2005 Justin T. Gibbs.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon including
16 * a substantially similar Disclaimer requirement for further binary
18 * 3. Neither the names of the above listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <dev/mpt/mpt.h>
39 #include <dev/mpt/mpt_raid.h>
41 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
42 #include "dev/mpt/mpilib/mpi_raid.h"
45 #include <cam/cam_ccb.h>
46 #include <cam/cam_sim.h>
47 #include <cam/cam_xpt_sim.h>
49 #if __FreeBSD_version < 500000
50 #include <sys/devicestat.h>
51 #define GIANT_REQUIRED
53 #include <cam/cam_periph.h>
55 #include <sys/callout.h>
56 #include <sys/kthread.h>
57 #include <sys/sysctl.h>
59 #include <machine/stdarg.h>
61 struct mpt_raid_action_result
64 MPI_RAID_VOL_INDICATOR indicator_struct;
65 uint32_t new_settings;
66 uint8_t phys_disk_num;
68 uint16_t action_status;
71 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
72 (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
74 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
77 static mpt_probe_handler_t mpt_raid_probe;
78 static mpt_attach_handler_t mpt_raid_attach;
79 static mpt_enable_handler_t mpt_raid_enable;
80 static mpt_event_handler_t mpt_raid_event;
81 static mpt_shutdown_handler_t mpt_raid_shutdown;
82 static mpt_reset_handler_t mpt_raid_ioc_reset;
83 static mpt_detach_handler_t mpt_raid_detach;
85 static struct mpt_personality mpt_raid_personality =
88 .probe = mpt_raid_probe,
89 .attach = mpt_raid_attach,
90 .enable = mpt_raid_enable,
91 .event = mpt_raid_event,
92 .reset = mpt_raid_ioc_reset,
93 .shutdown = mpt_raid_shutdown,
94 .detach = mpt_raid_detach,
97 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
98 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
100 static mpt_reply_handler_t mpt_raid_reply_handler;
101 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
102 MSG_DEFAULT_REPLY *reply_frame);
103 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
104 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
105 static void mpt_raid_thread(void *arg);
106 static timeout_t mpt_raid_timer;
107 static timeout_t mpt_raid_quiesce_timeout;
109 static void mpt_enable_vol(struct mpt_softc *mpt,
110 struct mpt_raid_volume *mpt_vol, int enable);
112 static void mpt_verify_mwce(struct mpt_softc *mpt,
113 struct mpt_raid_volume *mpt_vol);
114 static void mpt_adjust_queue_depth(struct mpt_softc *mpt,
115 struct mpt_raid_volume *mpt_vol,
116 struct cam_path *path);
117 static void mpt_raid_sysctl_attach(struct mpt_softc *mpt);
119 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
122 mpt_vol_type(struct mpt_raid_volume *vol)
124 switch (vol->config_page->VolumeType) {
125 case MPI_RAID_VOL_TYPE_IS:
127 case MPI_RAID_VOL_TYPE_IME:
129 case MPI_RAID_VOL_TYPE_IM:
137 mpt_vol_state(struct mpt_raid_volume *vol)
139 switch (vol->config_page->VolumeStatus.State) {
140 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
142 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
144 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
152 mpt_disk_state(struct mpt_raid_disk *disk)
154 switch (disk->config_page.PhysDiskStatus.State) {
155 case MPI_PHYSDISK0_STATUS_ONLINE:
157 case MPI_PHYSDISK0_STATUS_MISSING:
159 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
160 return ("Incompatible");
161 case MPI_PHYSDISK0_STATUS_FAILED:
163 case MPI_PHYSDISK0_STATUS_INITIALIZING:
164 return ("Initializing");
165 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
166 return ("Offline Requested");
167 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
168 return ("Failed per Host Request");
169 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
177 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
178 const char *fmt, ...)
182 printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
183 (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
184 vol->config_page->VolumeBus, vol->config_page->VolumeID);
191 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
192 const char *fmt, ...)
196 if (disk->volume != NULL) {
197 printf("(%s:vol%d:%d): ",
198 device_get_nameunit(mpt->dev),
199 disk->volume->config_page->VolumeID,
200 disk->member_number);
202 printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
203 disk->config_page.PhysDiskBus,
204 disk->config_page.PhysDiskID);
212 mpt_raid_async(void *callback_arg, u_int32_t code,
213 struct cam_path *path, void *arg)
215 struct mpt_softc *mpt;
217 mpt = (struct mpt_softc*)callback_arg;
219 case AC_FOUND_DEVICE:
221 struct ccb_getdev *cgd;
222 struct mpt_raid_volume *mpt_vol;
224 cgd = (struct ccb_getdev *)arg;
229 mpt_lprt(mpt, MPT_PRT_DEBUG, " Callback for %d\n",
230 cgd->ccb_h.target_id);
232 RAID_VOL_FOREACH(mpt, mpt_vol) {
233 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
236 if (mpt_vol->config_page->VolumeID
237 == cgd->ccb_h.target_id) {
238 mpt_adjust_queue_depth(mpt, mpt_vol, path);
249 mpt_raid_probe(struct mpt_softc *mpt)
251 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
258 mpt_raid_attach(struct mpt_softc *mpt)
260 struct ccb_setasync csa;
261 mpt_handler_t handler;
264 mpt_callout_init(&mpt->raid_timer);
266 handler.reply_handler = mpt_raid_reply_handler;
267 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
270 mpt_prt(mpt, "Unable to register RAID haandler!\n");
274 error = mpt_spawn_raid_thread(mpt);
276 mpt_prt(mpt, "Unable to spawn RAID thread!\n");
280 xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
281 csa.ccb_h.func_code = XPT_SASYNC_CB;
282 csa.event_enable = AC_FOUND_DEVICE;
283 csa.callback = mpt_raid_async;
284 csa.callback_arg = mpt;
285 MPTLOCK_2_CAMLOCK(mpt);
286 xpt_action((union ccb *)&csa);
287 CAMLOCK_2_MPTLOCK(mpt);
288 if (csa.ccb_h.status != CAM_REQ_CMP) {
289 mpt_prt(mpt, "mpt_raid_attach: Unable to register "
290 "CAM async handler.\n");
293 mpt_raid_sysctl_attach(mpt);
296 mpt_raid_detach(mpt);
301 mpt_raid_enable(struct mpt_softc *mpt)
307 mpt_raid_detach(struct mpt_softc *mpt)
309 struct ccb_setasync csa;
310 mpt_handler_t handler;
312 callout_stop(&mpt->raid_timer);
313 mpt_terminate_raid_thread(mpt);
315 handler.reply_handler = mpt_raid_reply_handler;
316 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
318 xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
319 csa.ccb_h.func_code = XPT_SASYNC_CB;
320 csa.event_enable = 0;
321 csa.callback = mpt_raid_async;
322 csa.callback_arg = mpt;
323 MPTLOCK_2_CAMLOCK(mpt);
324 xpt_action((union ccb *)&csa);
325 CAMLOCK_2_MPTLOCK(mpt);
329 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
331 /* Nothing to do yet. */
334 static const char *raid_event_txt[] =
338 "Volume Settings Changed",
339 "Volume Status Changed",
340 "Volume Physical Disk Membership Changed",
341 "Physical Disk Created",
342 "Physical Disk Deleted",
343 "Physical Disk Settings Changed",
344 "Physical Disk Status Changed",
345 "Domain Validation Required",
346 "SMART Data Received",
347 "Replace Action Started",
351 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
352 MSG_EVENT_NOTIFY_REPLY *msg)
354 EVENT_DATA_RAID *raid_event;
355 struct mpt_raid_volume *mpt_vol;
356 struct mpt_raid_disk *mpt_disk;
357 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
361 if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
365 raid_event = (EVENT_DATA_RAID *)&msg->Data;
369 if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
370 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
371 mpt_vol = &mpt->raid_volumes[i];
372 vol_pg = mpt_vol->config_page;
374 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
377 if (vol_pg->VolumeID == raid_event->VolumeID
378 && vol_pg->VolumeBus == raid_event->VolumeBus)
381 if (i >= mpt->ioc_page2->MaxVolumes) {
388 if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
389 mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
390 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
396 switch(raid_event->ReasonCode) {
397 case MPI_EVENT_RAID_RC_VOLUME_CREATED:
398 case MPI_EVENT_RAID_RC_VOLUME_DELETED:
400 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
401 if (mpt_vol != NULL) {
402 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
403 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
406 * Coalesce status messages into one
407 * per background run of our RAID thread.
408 * This removes "spurious" status messages
415 case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
416 case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
418 if (mpt_vol != NULL) {
419 mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
422 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
423 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
426 case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
427 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
429 if (mpt_disk != NULL) {
430 mpt_disk->flags &= ~MPT_RDF_UP2DATE;
433 case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
436 case MPI_EVENT_RAID_RC_SMART_DATA:
437 case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
442 if (mpt_disk != NULL) {
443 mpt_disk_prt(mpt, mpt_disk, "");
444 } else if (mpt_vol != NULL) {
445 mpt_vol_prt(mpt, mpt_vol, "");
447 mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
448 raid_event->VolumeID);
450 if (raid_event->PhysDiskNum != 0xFF)
451 mpt_prtc(mpt, ":%d): ",
452 raid_event->PhysDiskNum);
454 mpt_prtc(mpt, "): ");
457 if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
458 mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
459 raid_event->ReasonCode);
461 mpt_prtc(mpt, "%s\n",
462 raid_event_txt[raid_event->ReasonCode]);
465 if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
466 /* XXX Use CAM's print sense for this... */
467 if (mpt_disk != NULL)
468 mpt_disk_prt(mpt, mpt_disk, "");
470 mpt_prt(mpt, "Volume(%d:%d:%d: ",
471 raid_event->VolumeBus, raid_event->VolumeID,
472 raid_event->PhysDiskNum);
473 mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
474 raid_event->ASC, raid_event->ASCQ);
477 mpt_raid_wakeup(mpt);
482 mpt_raid_shutdown(struct mpt_softc *mpt)
484 struct mpt_raid_volume *mpt_vol;
486 if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
490 mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
491 RAID_VOL_FOREACH(mpt, mpt_vol) {
492 mpt_verify_mwce(mpt, mpt_vol);
497 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
498 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
506 if (reply_frame != NULL)
507 free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
509 else if (req->ccb != NULL) {
510 /* Complete Quiesce CCB with error... */
514 req->state &= ~REQ_STATE_QUEUED;
515 req->state |= REQ_STATE_DONE;
516 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
518 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
520 } else if (free_req) {
521 mpt_free_request(mpt, req);
528 * Parse additional completion information in the reply
529 * frame for RAID I/O requests.
532 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
533 MSG_DEFAULT_REPLY *reply_frame)
535 MSG_RAID_ACTION_REPLY *reply;
536 struct mpt_raid_action_result *action_result;
537 MSG_RAID_ACTION_REQUEST *rap;
539 reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
540 req->IOCStatus = le16toh(reply->IOCStatus);
541 rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
543 switch (rap->Action) {
544 case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
545 mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
547 case MPI_RAID_ACTION_ENABLE_PHYS_IO:
548 mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
553 action_result = REQ_TO_RAID_ACTION_RESULT(req);
554 memcpy(&action_result->action_data, &reply->ActionData,
555 sizeof(action_result->action_data));
556 action_result->action_status = reply->ActionStatus;
561 * Utiltity routine to perform a RAID action command;
564 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
565 struct mpt_raid_disk *disk, request_t *req, u_int Action,
566 uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
569 MSG_RAID_ACTION_REQUEST *rap;
573 memset(rap, 0, sizeof *rap);
574 rap->Action = Action;
575 rap->ActionDataWord = ActionDataWord;
576 rap->Function = MPI_FUNCTION_RAID_ACTION;
577 rap->VolumeID = vol->config_page->VolumeID;
578 rap->VolumeBus = vol->config_page->VolumeBus;
580 rap->PhysDiskNum = disk->config_page.PhysDiskNum;
582 rap->PhysDiskNum = 0xFF;
583 se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
585 MPI_pSGE_SET_LENGTH(se, len);
586 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
587 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
588 MPI_SGE_FLAGS_END_OF_LIST |
589 write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
590 rap->MsgContext = htole32(req->index | raid_handler_id);
592 mpt_check_doorbell(mpt);
593 mpt_send_cmd(mpt, req);
596 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
597 /*sleep_ok*/FALSE, /*time_ms*/2000));
603 /*************************** RAID Status Monitoring ***************************/
605 mpt_spawn_raid_thread(struct mpt_softc *mpt)
610 * Freeze out any CAM transactions until our thread
611 * is able to run at least once. We need to update
612 * our RAID pages before acception I/O or we may
613 * reject I/O to an ID we later determine is for a
616 xpt_freeze_simq(mpt->phydisk_sim, 1);
617 error = mpt_kthread_create(mpt_raid_thread, mpt,
618 &mpt->raid_thread, /*flags*/0, /*altstack*/0,
619 "mpt_raid%d", mpt->unit);
621 xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
626 mpt_terminate_raid_thread(struct mpt_softc *mpt)
629 if (mpt->raid_thread == NULL) {
632 mpt->shutdwn_raid = 1;
633 wakeup(mpt->raid_volumes);
635 * Sleep on a slightly different location
636 * for this interlock just for added safety.
638 mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
642 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
644 xpt_free_path(ccb->ccb_h.path);
649 mpt_raid_thread(void *arg)
651 struct mpt_softc *mpt;
654 #if __FreeBSD_version >= 500000
657 mpt = (struct mpt_softc *)arg;
660 while (mpt->shutdwn_raid == 0) {
662 if (mpt->raid_wakeup == 0) {
663 mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
667 mpt->raid_wakeup = 0;
669 if (mpt_refresh_raid_data(mpt)) {
670 mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */
675 * Now that we have our first snapshot of RAID data,
676 * allow CAM to access our physical disk bus.
680 MPTLOCK_2_CAMLOCK(mpt);
681 xpt_release_simq(mpt->phydisk_sim, TRUE);
682 CAMLOCK_2_MPTLOCK(mpt);
685 if (mpt->raid_rescan != 0) {
687 struct cam_path *path;
690 mpt->raid_rescan = 0;
692 ccb = malloc(sizeof(*ccb), M_DEVBUF, M_WAITOK);
693 error = xpt_create_path(&path, xpt_periph,
694 cam_sim_path(mpt->phydisk_sim),
695 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
696 if (error != CAM_REQ_CMP) {
698 mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
700 xpt_setup_ccb(&ccb->ccb_h, path, 5);
701 ccb->ccb_h.func_code = XPT_SCAN_BUS;
702 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
703 ccb->crcn.flags = CAM_FLAG_NONE;
704 MPTLOCK_2_CAMLOCK(mpt);
706 CAMLOCK_2_MPTLOCK(mpt);
710 mpt->raid_thread = NULL;
711 wakeup(&mpt->raid_thread);
713 #if __FreeBSD_version >= 500000
720 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
726 if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
727 return (CAM_REQ_CMP);
729 if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
732 mpt_disk->flags |= MPT_RDF_QUIESCING;
733 xpt_freeze_devq(ccb->ccb_h.path, 1);
735 rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
736 MPI_RAID_ACTION_QUIESCE_PHYS_IO,
737 /*ActionData*/0, /*addr*/0,
738 /*len*/0, /*write*/FALSE,
741 return (CAM_REQ_CMP_ERR);
743 ccb->ccb_h.timeout_ch =
744 timeout(mpt_raid_quiesce_timeout, (caddr_t)ccb, 5 * hz);
746 if (rv == ETIMEDOUT) {
747 mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
748 "Quiece Timed-out\n");
749 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
750 return (CAM_REQ_CMP_ERR);
753 ar = REQ_TO_RAID_ACTION_RESULT(req);
755 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
756 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
757 mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
758 "%d:%x:%x\n", rv, req->IOCStatus,
760 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
761 return (CAM_REQ_CMP_ERR);
764 return (CAM_REQ_INPROG);
766 return (CAM_REQUEUE_REQ);
769 /* XXX Ignores that there may be multiple busses/IOCs involved. */
771 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
773 struct mpt_raid_disk *mpt_disk;
775 mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
776 if (ccb->ccb_h.target_id < mpt->raid_max_disks
777 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
779 *tgt = mpt_disk->config_page.PhysDiskID;
782 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_map_physdisk(%d) - Not Active\n",
783 ccb->ccb_h.target_id);
789 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
793 struct mpt_raid_action_result *ar;
794 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
798 vol_pg = mpt_vol->config_page;
799 enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
802 * If the setting matches the configuration,
803 * there is nothing to do.
805 if ((enabled && enable)
806 || (!enabled && !enable))
809 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
811 mpt_vol_prt(mpt, mpt_vol,
812 "mpt_enable_vol: Get request failed!\n");
816 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
817 enable ? MPI_RAID_ACTION_ENABLE_VOLUME
818 : MPI_RAID_ACTION_DISABLE_VOLUME,
819 /*data*/0, /*addr*/0, /*len*/0,
820 /*write*/FALSE, /*wait*/TRUE);
821 if (rv == ETIMEDOUT) {
822 mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
823 "%s Volume Timed-out\n",
824 enable ? "Enable" : "Disable");
827 ar = REQ_TO_RAID_ACTION_RESULT(req);
829 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
830 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
831 mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
832 enable ? "Enable" : "Disable",
833 rv, req->IOCStatus, ar->action_status);
836 mpt_free_request(mpt, req);
841 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
844 struct mpt_raid_action_result *ar;
845 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
851 vol_pg = mpt_vol->config_page;
852 resyncing = vol_pg->VolumeStatus.Flags
853 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
854 mwce = vol_pg->VolumeSettings.Settings
855 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
858 * If the setting matches the configuration,
859 * there is nothing to do.
861 switch (mpt->raid_mwce_setting) {
862 case MPT_RAID_MWCE_REBUILD_ONLY:
863 if ((resyncing && mwce) || (!resyncing && !mwce)) {
866 mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
867 if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
869 * Wait one more status update to see if
870 * resyncing gets enabled. It gets disabled
871 * temporarilly when WCE is changed.
876 case MPT_RAID_MWCE_ON:
880 case MPT_RAID_MWCE_OFF:
884 case MPT_RAID_MWCE_NC:
888 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
890 mpt_vol_prt(mpt, mpt_vol,
891 "mpt_verify_mwce: Get request failed!\n");
895 vol_pg->VolumeSettings.Settings ^=
896 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
897 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
898 vol_pg->VolumeSettings.Settings ^=
899 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
900 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
901 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
902 data, /*addr*/0, /*len*/0,
903 /*write*/FALSE, /*wait*/TRUE);
904 if (rv == ETIMEDOUT) {
905 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
906 "Write Cache Enable Timed-out\n");
909 ar = REQ_TO_RAID_ACTION_RESULT(req);
911 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
912 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
913 mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
914 "%d:%x:%x\n", rv, req->IOCStatus,
917 vol_pg->VolumeSettings.Settings ^=
918 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
920 mpt_free_request(mpt, req);
924 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
927 struct mpt_raid_action_result *ar;
928 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
932 vol_pg = mpt_vol->config_page;
934 if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
938 * If the current RAID resync rate does not
939 * match our configured rate, update it.
941 prio = vol_pg->VolumeSettings.Settings
942 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
943 if (vol_pg->ResyncRate != 0
944 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
946 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
948 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
949 "Get request failed!\n");
953 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
954 MPI_RAID_ACTION_SET_RESYNC_RATE,
955 mpt->raid_resync_rate, /*addr*/0,
956 /*len*/0, /*write*/FALSE, /*wait*/TRUE);
957 if (rv == ETIMEDOUT) {
958 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
959 "Resync Rate Setting Timed-out\n");
963 ar = REQ_TO_RAID_ACTION_RESULT(req);
965 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
966 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
967 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
968 "%d:%x:%x\n", rv, req->IOCStatus,
971 vol_pg->ResyncRate = mpt->raid_resync_rate;
972 mpt_free_request(mpt, req);
973 } else if ((prio && mpt->raid_resync_rate < 128)
974 || (!prio && mpt->raid_resync_rate >= 128)) {
977 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
979 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
980 "Get request failed!\n");
984 vol_pg->VolumeSettings.Settings ^=
985 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
986 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
987 vol_pg->VolumeSettings.Settings ^=
988 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
989 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
990 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
991 data, /*addr*/0, /*len*/0,
992 /*write*/FALSE, /*wait*/TRUE);
993 if (rv == ETIMEDOUT) {
994 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
995 "Resync Rate Setting Timed-out\n");
998 ar = REQ_TO_RAID_ACTION_RESULT(req);
1000 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1001 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1002 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1003 "%d:%x:%x\n", rv, req->IOCStatus,
1006 vol_pg->VolumeSettings.Settings ^=
1007 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1010 mpt_free_request(mpt, req);
1015 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1016 struct cam_path *path)
1018 struct ccb_relsim crs;
1020 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1021 crs.ccb_h.func_code = XPT_REL_SIMQ;
1022 crs.release_flags = RELSIM_ADJUST_OPENINGS;
1023 crs.openings = mpt->raid_queue_depth;
1024 xpt_action((union ccb *)&crs);
1025 if (crs.ccb_h.status != CAM_REQ_CMP)
1026 mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1027 "with CAM status %#x\n", crs.ccb_h.status);
1031 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1033 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1036 vol_pg = mpt_vol->config_page;
1037 mpt_vol_prt(mpt, mpt_vol, "Settings (");
1038 for (i = 1; i <= 0x8000; i <<= 1) {
1039 switch (vol_pg->VolumeSettings.Settings & i) {
1040 case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1041 mpt_prtc(mpt, " Member-WCE");
1043 case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1044 mpt_prtc(mpt, " Offline-On-SMART-Err");
1046 case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1047 mpt_prtc(mpt, " Hot-Plug-Spares");
1049 case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1050 mpt_prtc(mpt, " High-Priority-ReSync");
1056 mpt_prtc(mpt, " )\n");
1057 if (vol_pg->VolumeSettings.HotSparePool != 0) {
1058 mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1059 powerof2(vol_pg->VolumeSettings.HotSparePool)
1061 for (i = 0; i < 8; i++) {
1065 if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1067 mpt_prtc(mpt, " %d", i);
1069 mpt_prtc(mpt, "\n");
1071 mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1072 for (i = 0; i < vol_pg->NumPhysDisks; i++){
1073 struct mpt_raid_disk *mpt_disk;
1074 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1076 mpt_disk = mpt->raid_disks
1077 + vol_pg->PhysDisk[i].PhysDiskNum;
1078 disk_pg = &mpt_disk->config_page;
1080 mpt_prtc(mpt, "(%s:%d:%d): ", device_get_nameunit(mpt->dev),
1081 disk_pg->PhysDiskBus, disk_pg->PhysDiskID);
1082 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM)
1083 mpt_prtc(mpt, "%s\n",
1084 mpt_disk->member_number == 0
1085 ? "Primary" : "Secondary");
1087 mpt_prtc(mpt, "Stripe Position %d\n",
1088 mpt_disk->member_number);
1093 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1095 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1098 disk_pg = &mpt_disk->config_page;
1099 mpt_disk_prt(mpt, mpt_disk,
1100 "Physical (%s:%d:%d), Pass-thru (%s:%d:%d)\n",
1101 device_get_nameunit(mpt->dev), disk_pg->PhysDiskBus,
1102 disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1103 /*bus*/1, mpt_disk - mpt->raid_disks);
1105 if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1107 mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1108 powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1110 for (i = 0; i < 8; i++) {
1114 if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1116 mpt_prtc(mpt, " %d", i);
1118 mpt_prtc(mpt, "\n");
1122 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1123 IOC_3_PHYS_DISK *ioc_disk)
1127 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1128 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1129 &mpt_disk->config_page.Header,
1130 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1132 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1133 "Failed to read RAID Disk Hdr(%d)\n",
1134 ioc_disk->PhysDiskNum);
1137 rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1138 &mpt_disk->config_page.Header,
1139 sizeof(mpt_disk->config_page),
1140 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1142 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1143 "Failed to read RAID Disk Page(%d)\n",
1144 ioc_disk->PhysDiskNum);
1148 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1149 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1151 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1152 struct mpt_raid_action_result *ar;
1157 vol_pg = mpt_vol->config_page;
1158 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1159 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME,
1160 /*PageNumber*/0, ioc_vol->VolumePageNumber,
1161 &vol_pg->Header, /*sleep_ok*/TRUE,
1162 /*timeout_ms*/5000);
1164 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1165 "Failed to read RAID Vol Hdr(%d)\n",
1166 ioc_vol->VolumePageNumber);
1169 rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1170 &vol_pg->Header, mpt->raid_page0_len,
1171 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1173 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1174 "Failed to read RAID Vol Page(%d)\n",
1175 ioc_vol->VolumePageNumber);
1178 mpt_vol->flags |= MPT_RVF_ACTIVE;
1180 /* Update disk entry array data. */
1181 for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1182 struct mpt_raid_disk *mpt_disk;
1184 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1185 mpt_disk->volume = mpt_vol;
1186 mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1187 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM)
1188 mpt_disk->member_number--;
1191 if ((vol_pg->VolumeStatus.Flags
1192 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1195 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1197 mpt_vol_prt(mpt, mpt_vol,
1198 "mpt_refresh_raid_vol: Get request failed!\n");
1201 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1202 MPI_RAID_ACTION_INDICATOR_STRUCT,
1203 /*ActionWord*/0, /*addr*/0, /*len*/0,
1204 /*write*/FALSE, /*wait*/TRUE);
1205 if (rv == ETIMEDOUT) {
1206 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1207 "Progress indicator fetch timedout!\n");
1211 ar = REQ_TO_RAID_ACTION_RESULT(req);
1213 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1214 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1215 memcpy(&mpt_vol->sync_progress,
1216 &ar->action_data.indicator_struct,
1217 sizeof(mpt_vol->sync_progress));
1219 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1220 "Progress indicator fetch failed!\n");
1222 mpt_free_request(mpt, req);
1226 * Update in-core information about RAID support. We update any entries
1227 * that didn't previously exists or have been marked as needing to
1228 * be updated by our event handler. Interesting changes are displayed
1232 mpt_refresh_raid_data(struct mpt_softc *mpt)
1234 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1235 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1236 IOC_3_PHYS_DISK *ioc_disk;
1237 IOC_3_PHYS_DISK *ioc_last_disk;
1238 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1242 u_int nonopt_volumes;
1244 if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1249 * Mark all items as unreferenced by the configuration.
1250 * This allows us to find, report, and discard stale
1253 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1254 mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1256 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1257 mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1261 * Get Physical Disk information.
1263 len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1264 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1265 &mpt->ioc_page3->Header, len,
1266 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1269 "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1273 ioc_disk = mpt->ioc_page3->PhysDisk;
1274 ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1275 for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1276 struct mpt_raid_disk *mpt_disk;
1278 mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1279 mpt_disk->flags |= MPT_RDF_REFERENCED;
1280 if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1281 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1283 mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1286 mpt_disk->flags |= MPT_RDF_ACTIVE;
1291 * Refresh volume data.
1293 len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1294 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1295 &mpt->ioc_page2->Header, len,
1296 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1298 mpt_prt(mpt, "mpt_refresh_raid_data: "
1299 "Failed to read IOC Page 2\n");
1303 ioc_vol = mpt->ioc_page2->RaidVolume;
1304 ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1305 for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1306 struct mpt_raid_volume *mpt_vol;
1308 mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1309 mpt_vol->flags |= MPT_RVF_REFERENCED;
1310 vol_pg = mpt_vol->config_page;
1313 if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1314 != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1315 || (vol_pg->VolumeStatus.Flags
1316 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1318 mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1320 mpt_vol->flags |= MPT_RVF_ACTIVE;
1324 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1325 struct mpt_raid_volume *mpt_vol;
1331 mpt_vol = &mpt->raid_volumes[i];
1333 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1336 vol_pg = mpt_vol->config_page;
1337 if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1338 == MPT_RVF_ANNOUNCED) {
1339 mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1344 if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1346 mpt_announce_vol(mpt, mpt_vol);
1347 mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1350 if (vol_pg->VolumeStatus.State !=
1351 MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1354 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1357 mpt_vol->flags |= MPT_RVF_UP2DATE;
1358 mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1359 mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1360 mpt_verify_mwce(mpt, mpt_vol);
1362 if (vol_pg->VolumeStatus.Flags == 0)
1365 mpt_vol_prt(mpt, mpt_vol, "Status (");
1366 for (m = 1; m <= 0x80; m <<= 1) {
1367 switch (vol_pg->VolumeStatus.Flags & m) {
1368 case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1369 mpt_prtc(mpt, " Enabled");
1371 case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1372 mpt_prtc(mpt, " Quiesced");
1374 case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1375 mpt_prtc(mpt, " Re-Syncing");
1377 case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1378 mpt_prtc(mpt, " Inactive");
1384 mpt_prtc(mpt, " )\n");
1386 if ((vol_pg->VolumeStatus.Flags
1387 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1390 mpt_verify_resync_rate(mpt, mpt_vol);
1392 left = u64toh(mpt_vol->sync_progress.BlocksRemaining);
1393 total = u64toh(mpt_vol->sync_progress.TotalBlocks);
1394 if (vol_pg->ResyncRate != 0) {
1396 prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1397 mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1398 prio / 1000, prio % 1000);
1400 prio = vol_pg->VolumeSettings.Settings
1401 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1402 mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1403 prio ? "High" : "Low");
1405 #if __FreeBSD_version >= 500000
1406 mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1407 "blocks remaining\n", (uintmax_t)left,
1410 mpt_vol_prt(mpt, mpt_vol, "%llu of %llu "
1411 "blocks remaining\n", (uint64_t)left,
1415 /* Periodically report on sync progress. */
1416 mpt_schedule_raid_refresh(mpt);
1419 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1420 struct mpt_raid_disk *mpt_disk;
1421 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1424 mpt_disk = &mpt->raid_disks[i];
1425 disk_pg = &mpt_disk->config_page;
1427 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1430 if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1431 == MPT_RDF_ANNOUNCED) {
1432 mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1433 mpt_disk->flags = 0;
1438 if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1440 mpt_announce_disk(mpt, mpt_disk);
1441 mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1444 if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1447 mpt_disk->flags |= MPT_RDF_UP2DATE;
1448 mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1449 if (disk_pg->PhysDiskStatus.Flags == 0)
1452 mpt_disk_prt(mpt, mpt_disk, "Status (");
1453 for (m = 1; m <= 0x80; m <<= 1) {
1454 switch (disk_pg->PhysDiskStatus.Flags & m) {
1455 case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1456 mpt_prtc(mpt, " Out-Of-Sync");
1458 case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1459 mpt_prtc(mpt, " Quiesced");
1465 mpt_prtc(mpt, " )\n");
1468 mpt->raid_nonopt_volumes = nonopt_volumes;
1473 mpt_raid_timer(void *arg)
1475 struct mpt_softc *mpt;
1477 mpt = (struct mpt_softc *)arg;
1479 mpt_raid_wakeup(mpt);
1484 mpt_raid_quiesce_timeout(void *arg)
1486 /* Complete the CCB with error */
1491 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1493 callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1494 mpt_raid_timer, mpt);
1498 mpt_raid_free_mem(struct mpt_softc *mpt)
1501 if (mpt->raid_volumes) {
1502 struct mpt_raid_volume *mpt_raid;
1504 for (i = 0; i < mpt->raid_max_volumes; i++) {
1505 mpt_raid = &mpt->raid_volumes[i];
1506 if (mpt_raid->config_page) {
1507 free(mpt_raid->config_page, M_DEVBUF);
1508 mpt_raid->config_page = NULL;
1511 free(mpt->raid_volumes, M_DEVBUF);
1512 mpt->raid_volumes = NULL;
1514 if (mpt->raid_disks) {
1515 free(mpt->raid_disks, M_DEVBUF);
1516 mpt->raid_disks = NULL;
1518 if (mpt->ioc_page2) {
1519 free(mpt->ioc_page2, M_DEVBUF);
1520 mpt->ioc_page2 = NULL;
1522 if (mpt->ioc_page3) {
1523 free(mpt->ioc_page3, M_DEVBUF);
1524 mpt->ioc_page3 = NULL;
1526 mpt->raid_max_volumes = 0;
1527 mpt->raid_max_disks = 0;
1531 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1533 struct mpt_raid_volume *mpt_vol;
1535 if ((rate > MPT_RAID_RESYNC_RATE_MAX
1536 || rate < MPT_RAID_RESYNC_RATE_MIN)
1537 && rate != MPT_RAID_RESYNC_RATE_NC)
1541 mpt->raid_resync_rate = rate;
1542 RAID_VOL_FOREACH(mpt, mpt_vol) {
1543 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1546 mpt_verify_resync_rate(mpt, mpt_vol);
1553 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1555 struct mpt_raid_volume *mpt_vol;
1557 if (vol_queue_depth > 255 || vol_queue_depth < 1)
1561 mpt->raid_queue_depth = vol_queue_depth;
1562 RAID_VOL_FOREACH(mpt, mpt_vol) {
1563 struct cam_path *path;
1566 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1569 mpt->raid_rescan = 0;
1571 error = xpt_create_path(&path, xpt_periph,
1572 cam_sim_path(mpt->sim),
1573 mpt_vol->config_page->VolumeID,
1575 if (error != CAM_REQ_CMP) {
1576 mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1579 mpt_adjust_queue_depth(mpt, mpt_vol, path);
1580 xpt_free_path(path);
1587 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1589 struct mpt_raid_volume *mpt_vol;
1590 int force_full_resync;
1593 if (mwce == mpt->raid_mwce_setting) {
1599 * Catch MWCE being left on due to a failed shutdown. Since
1600 * sysctls cannot be set by the loader, we treat the first
1601 * setting of this varible specially and force a full volume
1602 * resync if MWCE is enabled and a resync is in progress.
1604 force_full_resync = 0;
1605 if (mpt->raid_mwce_set == 0
1606 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1607 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1608 force_full_resync = 1;
1610 mpt->raid_mwce_setting = mwce;
1611 RAID_VOL_FOREACH(mpt, mpt_vol) {
1612 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1616 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1619 vol_pg = mpt_vol->config_page;
1620 resyncing = vol_pg->VolumeStatus.Flags
1621 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1622 mwce = vol_pg->VolumeSettings.Settings
1623 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1624 if (force_full_resync && resyncing && mwce) {
1627 * XXX disable/enable volume should force a resync,
1628 * but we'll need to queice, drain, and restart
1631 mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1632 "detected. Suggest full resync.\n");
1634 mpt_verify_mwce(mpt, mpt_vol);
1636 mpt->raid_mwce_set = 1;
1641 const char *mpt_vol_mwce_strs[] =
1645 "On-During-Rebuild",
1650 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1653 struct mpt_softc *mpt;
1661 mpt = (struct mpt_softc *)arg1;
1662 str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1663 error = SYSCTL_OUT(req, str, strlen(str) + 1);
1664 if (error || !req->newptr) {
1668 size = req->newlen - req->newidx;
1669 if (size >= sizeof(inbuf)) {
1673 error = SYSCTL_IN(req, inbuf, size);
1678 for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1679 if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1680 return (mpt_raid_set_vol_mwce(mpt, i));
1687 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1689 struct mpt_softc *mpt;
1690 u_int raid_resync_rate;
1695 mpt = (struct mpt_softc *)arg1;
1696 raid_resync_rate = mpt->raid_resync_rate;
1698 error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1699 if (error || !req->newptr) {
1703 return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1707 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1709 struct mpt_softc *mpt;
1710 u_int raid_queue_depth;
1715 mpt = (struct mpt_softc *)arg1;
1716 raid_queue_depth = mpt->raid_queue_depth;
1718 error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1719 if (error || !req->newptr) {
1723 return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1727 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1729 #if __FreeBSD_version >= 500000
1730 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1731 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1733 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1734 "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1735 mpt_raid_sysctl_vol_member_wce, "A",
1736 "volume member WCE(On,Off,On-During-Rebuild,NC)");
1738 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1739 "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1740 mpt_raid_sysctl_vol_queue_depth, "I",
1741 "default volume queue depth");
1743 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1744 "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1745 mpt_raid_sysctl_vol_resync_rate, "I",
1746 "volume resync priority (0 == NC, 1 - 255)");
1747 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1748 "nonoptimal_volumes", CTLFLAG_RD,
1749 &mpt->raid_nonopt_volumes, 0,
1750 "number of nonoptimal volumes");