2 * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
4 * Copyright (c) 2005, WHEEL Sp. z o.o.
5 * Copyright (c) 2005 Justin T. Gibbs.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon including
16 * a substantially similar Disclaimer requirement for further binary
18 * 3. Neither the names of the above listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Some Breakage and Bug Fixing added later.
36 * Copyright (c) 2006, by Matthew Jacob
39 * Support from LSI-Logic has also gone a great deal toward making this a
40 * workable subsystem and is gratefully acknowledged.
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
46 #include <dev/mpt/mpt.h>
47 #include <dev/mpt/mpt_raid.h>
49 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
50 #include "dev/mpt/mpilib/mpi_raid.h"
53 #include <cam/cam_ccb.h>
54 #include <cam/cam_sim.h>
55 #include <cam/cam_xpt_sim.h>
57 #if __FreeBSD_version < 500000
58 #include <sys/devicestat.h>
59 #define GIANT_REQUIRED
61 #include <cam/cam_periph.h>
63 #include <sys/callout.h>
64 #include <sys/kthread.h>
65 #include <sys/sysctl.h>
67 #include <machine/stdarg.h>
69 struct mpt_raid_action_result
72 MPI_RAID_VOL_INDICATOR indicator_struct;
73 uint32_t new_settings;
74 uint8_t phys_disk_num;
76 uint16_t action_status;
79 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
80 (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
82 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
84 static mpt_probe_handler_t mpt_raid_probe;
85 static mpt_attach_handler_t mpt_raid_attach;
86 static mpt_enable_handler_t mpt_raid_enable;
87 static mpt_event_handler_t mpt_raid_event;
88 static mpt_shutdown_handler_t mpt_raid_shutdown;
89 static mpt_reset_handler_t mpt_raid_ioc_reset;
90 static mpt_detach_handler_t mpt_raid_detach;
92 static struct mpt_personality mpt_raid_personality =
95 .probe = mpt_raid_probe,
96 .attach = mpt_raid_attach,
97 .enable = mpt_raid_enable,
98 .event = mpt_raid_event,
99 .reset = mpt_raid_ioc_reset,
100 .shutdown = mpt_raid_shutdown,
101 .detach = mpt_raid_detach,
104 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
105 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
107 static mpt_reply_handler_t mpt_raid_reply_handler;
108 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
109 MSG_DEFAULT_REPLY *reply_frame);
110 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
111 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
112 static void mpt_raid_thread(void *arg);
113 static timeout_t mpt_raid_timer;
115 static void mpt_enable_vol(struct mpt_softc *mpt,
116 struct mpt_raid_volume *mpt_vol, int enable);
118 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
119 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
121 #if __FreeBSD_version < 500000
122 #define mpt_raid_sysctl_attach(x) do { } while (0)
124 static void mpt_raid_sysctl_attach(struct mpt_softc *);
127 static const char *mpt_vol_type(struct mpt_raid_volume *vol);
128 static const char *mpt_vol_state(struct mpt_raid_volume *vol);
129 static const char *mpt_disk_state(struct mpt_raid_disk *disk);
130 static void mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
131 const char *fmt, ...);
132 static void mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
133 const char *fmt, ...);
135 static int mpt_issue_raid_req(struct mpt_softc *mpt,
136 struct mpt_raid_volume *vol, struct mpt_raid_disk *disk, request_t *req,
137 u_int Action, uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
138 int write, int wait);
140 static int mpt_refresh_raid_data(struct mpt_softc *mpt);
141 static void mpt_schedule_raid_refresh(struct mpt_softc *mpt);
143 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
146 mpt_vol_type(struct mpt_raid_volume *vol)
148 switch (vol->config_page->VolumeType) {
149 case MPI_RAID_VOL_TYPE_IS:
151 case MPI_RAID_VOL_TYPE_IME:
153 case MPI_RAID_VOL_TYPE_IM:
161 mpt_vol_state(struct mpt_raid_volume *vol)
163 switch (vol->config_page->VolumeStatus.State) {
164 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
166 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
168 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
176 mpt_disk_state(struct mpt_raid_disk *disk)
178 switch (disk->config_page.PhysDiskStatus.State) {
179 case MPI_PHYSDISK0_STATUS_ONLINE:
181 case MPI_PHYSDISK0_STATUS_MISSING:
183 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
184 return ("Incompatible");
185 case MPI_PHYSDISK0_STATUS_FAILED:
187 case MPI_PHYSDISK0_STATUS_INITIALIZING:
188 return ("Initializing");
189 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
190 return ("Offline Requested");
191 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
192 return ("Failed per Host Request");
193 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
201 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
202 const char *fmt, ...)
206 printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
207 (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
208 vol->config_page->VolumeBus, vol->config_page->VolumeID);
215 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
216 const char *fmt, ...)
220 if (disk->volume != NULL) {
221 printf("(%s:vol%d:%d): ",
222 device_get_nameunit(mpt->dev),
223 disk->volume->config_page->VolumeID,
224 disk->member_number);
226 printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
227 disk->config_page.PhysDiskBus,
228 disk->config_page.PhysDiskID);
236 mpt_raid_async(void *callback_arg, u_int32_t code,
237 struct cam_path *path, void *arg)
239 struct mpt_softc *mpt;
241 mpt = (struct mpt_softc*)callback_arg;
243 case AC_FOUND_DEVICE:
245 struct ccb_getdev *cgd;
246 struct mpt_raid_volume *mpt_vol;
248 cgd = (struct ccb_getdev *)arg;
253 mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
254 cgd->ccb_h.target_id);
256 RAID_VOL_FOREACH(mpt, mpt_vol) {
257 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
260 if (mpt_vol->config_page->VolumeID
261 == cgd->ccb_h.target_id) {
262 mpt_adjust_queue_depth(mpt, mpt_vol, path);
273 mpt_raid_probe(struct mpt_softc *mpt)
276 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
283 mpt_raid_attach(struct mpt_softc *mpt)
285 struct ccb_setasync csa;
286 mpt_handler_t handler;
289 mpt_callout_init(mpt, &mpt->raid_timer);
291 error = mpt_spawn_raid_thread(mpt);
293 mpt_prt(mpt, "Unable to spawn RAID thread!\n");
298 handler.reply_handler = mpt_raid_reply_handler;
299 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
302 mpt_prt(mpt, "Unable to register RAID haandler!\n");
306 xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
307 csa.ccb_h.func_code = XPT_SASYNC_CB;
308 csa.event_enable = AC_FOUND_DEVICE;
309 csa.callback = mpt_raid_async;
310 csa.callback_arg = mpt;
311 xpt_action((union ccb *)&csa);
312 if (csa.ccb_h.status != CAM_REQ_CMP) {
313 mpt_prt(mpt, "mpt_raid_attach: Unable to register "
314 "CAM async handler.\n");
318 mpt_raid_sysctl_attach(mpt);
322 mpt_raid_detach(mpt);
327 mpt_raid_enable(struct mpt_softc *mpt)
334 mpt_raid_detach(struct mpt_softc *mpt)
336 struct ccb_setasync csa;
337 mpt_handler_t handler;
339 mpt_callout_drain(mpt, &mpt->raid_timer);
342 mpt_terminate_raid_thread(mpt);
343 handler.reply_handler = mpt_raid_reply_handler;
344 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
346 xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
347 csa.ccb_h.func_code = XPT_SASYNC_CB;
348 csa.event_enable = 0;
349 csa.callback = mpt_raid_async;
350 csa.callback_arg = mpt;
351 xpt_action((union ccb *)&csa);
356 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
359 /* Nothing to do yet. */
362 static const char *raid_event_txt[] =
366 "Volume Settings Changed",
367 "Volume Status Changed",
368 "Volume Physical Disk Membership Changed",
369 "Physical Disk Created",
370 "Physical Disk Deleted",
371 "Physical Disk Settings Changed",
372 "Physical Disk Status Changed",
373 "Domain Validation Required",
374 "SMART Data Received",
375 "Replace Action Started",
379 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
380 MSG_EVENT_NOTIFY_REPLY *msg)
382 EVENT_DATA_RAID *raid_event;
383 struct mpt_raid_volume *mpt_vol;
384 struct mpt_raid_disk *mpt_disk;
385 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
389 if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
393 raid_event = (EVENT_DATA_RAID *)&msg->Data;
397 if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
398 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
399 mpt_vol = &mpt->raid_volumes[i];
400 vol_pg = mpt_vol->config_page;
402 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
405 if (vol_pg->VolumeID == raid_event->VolumeID
406 && vol_pg->VolumeBus == raid_event->VolumeBus)
409 if (i >= mpt->ioc_page2->MaxVolumes) {
416 if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
417 mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
418 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
424 switch(raid_event->ReasonCode) {
425 case MPI_EVENT_RAID_RC_VOLUME_CREATED:
426 case MPI_EVENT_RAID_RC_VOLUME_DELETED:
428 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
429 if (mpt_vol != NULL) {
430 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
431 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
434 * Coalesce status messages into one
435 * per background run of our RAID thread.
436 * This removes "spurious" status messages
443 case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
444 case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
446 if (mpt_vol != NULL) {
447 mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
450 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
451 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
454 case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
455 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
457 if (mpt_disk != NULL) {
458 mpt_disk->flags &= ~MPT_RDF_UP2DATE;
461 case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
464 case MPI_EVENT_RAID_RC_SMART_DATA:
465 case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
470 if (mpt_disk != NULL) {
471 mpt_disk_prt(mpt, mpt_disk, "");
472 } else if (mpt_vol != NULL) {
473 mpt_vol_prt(mpt, mpt_vol, "");
475 mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
476 raid_event->VolumeID);
478 if (raid_event->PhysDiskNum != 0xFF)
479 mpt_prtc(mpt, ":%d): ",
480 raid_event->PhysDiskNum);
482 mpt_prtc(mpt, "): ");
485 if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
486 mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
487 raid_event->ReasonCode);
489 mpt_prtc(mpt, "%s\n",
490 raid_event_txt[raid_event->ReasonCode]);
493 if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
494 /* XXX Use CAM's print sense for this... */
495 if (mpt_disk != NULL)
496 mpt_disk_prt(mpt, mpt_disk, "");
498 mpt_prt(mpt, "Volume(%d:%d:%d: ",
499 raid_event->VolumeBus, raid_event->VolumeID,
500 raid_event->PhysDiskNum);
501 mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
502 raid_event->ASC, raid_event->ASCQ);
505 mpt_raid_wakeup(mpt);
510 mpt_raid_shutdown(struct mpt_softc *mpt)
512 struct mpt_raid_volume *mpt_vol;
514 if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
518 mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
519 RAID_VOL_FOREACH(mpt, mpt_vol) {
520 mpt_verify_mwce(mpt, mpt_vol);
525 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
526 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
534 if (reply_frame != NULL)
535 free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
537 else if (req->ccb != NULL) {
538 /* Complete Quiesce CCB with error... */
542 req->state &= ~REQ_STATE_QUEUED;
543 req->state |= REQ_STATE_DONE;
544 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
546 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
548 } else if (free_req) {
549 mpt_free_request(mpt, req);
556 * Parse additional completion information in the reply
557 * frame for RAID I/O requests.
560 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
561 MSG_DEFAULT_REPLY *reply_frame)
563 MSG_RAID_ACTION_REPLY *reply;
564 struct mpt_raid_action_result *action_result;
565 MSG_RAID_ACTION_REQUEST *rap;
567 reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
568 req->IOCStatus = le16toh(reply->IOCStatus);
569 rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
571 switch (rap->Action) {
572 case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
573 mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
575 case MPI_RAID_ACTION_ENABLE_PHYS_IO:
576 mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
581 action_result = REQ_TO_RAID_ACTION_RESULT(req);
582 memcpy(&action_result->action_data, &reply->ActionData,
583 sizeof(action_result->action_data));
584 action_result->action_status = le16toh(reply->ActionStatus);
589 * Utiltity routine to perform a RAID action command;
592 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
593 struct mpt_raid_disk *disk, request_t *req, u_int Action,
594 uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
597 MSG_RAID_ACTION_REQUEST *rap;
601 memset(rap, 0, sizeof *rap);
602 rap->Action = Action;
603 rap->ActionDataWord = htole32(ActionDataWord);
604 rap->Function = MPI_FUNCTION_RAID_ACTION;
605 rap->VolumeID = vol->config_page->VolumeID;
606 rap->VolumeBus = vol->config_page->VolumeBus;
608 rap->PhysDiskNum = disk->config_page.PhysDiskNum;
610 rap->PhysDiskNum = 0xFF;
611 se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
612 se->Address = htole32(addr);
613 MPI_pSGE_SET_LENGTH(se, len);
614 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
615 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
616 MPI_SGE_FLAGS_END_OF_LIST |
617 write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
618 se->FlagsLength = htole32(se->FlagsLength);
619 rap->MsgContext = htole32(req->index | raid_handler_id);
621 mpt_check_doorbell(mpt);
622 mpt_send_cmd(mpt, req);
625 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
626 /*sleep_ok*/FALSE, /*time_ms*/2000));
632 /*************************** RAID Status Monitoring ***************************/
634 mpt_spawn_raid_thread(struct mpt_softc *mpt)
639 * Freeze out any CAM transactions until our thread
640 * is able to run at least once. We need to update
641 * our RAID pages before acception I/O or we may
642 * reject I/O to an ID we later determine is for a
646 xpt_freeze_simq(mpt->phydisk_sim, 1);
648 error = mpt_kthread_create(mpt_raid_thread, mpt,
649 &mpt->raid_thread, /*flags*/0, /*altstack*/0,
650 "mpt_raid%d", mpt->unit);
653 xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
660 mpt_terminate_raid_thread(struct mpt_softc *mpt)
663 if (mpt->raid_thread == NULL) {
666 mpt->shutdwn_raid = 1;
667 wakeup(&mpt->raid_volumes);
669 * Sleep on a slightly different location
670 * for this interlock just for added safety.
672 mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
676 mpt_raid_thread(void *arg)
678 struct mpt_softc *mpt;
681 mpt = (struct mpt_softc *)arg;
684 while (mpt->shutdwn_raid == 0) {
686 if (mpt->raid_wakeup == 0) {
687 mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
691 mpt->raid_wakeup = 0;
693 if (mpt_refresh_raid_data(mpt)) {
694 mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */
699 * Now that we have our first snapshot of RAID data,
700 * allow CAM to access our physical disk bus.
704 MPTLOCK_2_CAMLOCK(mpt);
705 xpt_release_simq(mpt->phydisk_sim, TRUE);
706 CAMLOCK_2_MPTLOCK(mpt);
709 if (mpt->raid_rescan != 0) {
713 mpt->raid_rescan = 0;
716 ccb = xpt_alloc_ccb();
719 error = xpt_create_path(&ccb->ccb_h.path, xpt_periph,
720 cam_sim_path(mpt->phydisk_sim),
721 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
722 if (error != CAM_REQ_CMP) {
724 mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
730 mpt->raid_thread = NULL;
731 wakeup(&mpt->raid_thread);
738 mpt_raid_quiesce_timeout(void *arg)
741 /* Complete the CCB with error */
745 static timeout_t mpt_raid_quiesce_timeout;
747 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
753 if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
754 return (CAM_REQ_CMP);
756 if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
759 mpt_disk->flags |= MPT_RDF_QUIESCING;
760 xpt_freeze_devq(ccb->ccb_h.path, 1);
762 rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
763 MPI_RAID_ACTION_QUIESCE_PHYS_IO,
764 /*ActionData*/0, /*addr*/0,
765 /*len*/0, /*write*/FALSE,
768 return (CAM_REQ_CMP_ERR);
770 mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
772 if (rv == ETIMEDOUT) {
773 mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
774 "Quiece Timed-out\n");
775 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
776 return (CAM_REQ_CMP_ERR);
779 ar = REQ_TO_RAID_ACTION_RESULT(req);
781 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
782 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
783 mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
784 "%d:%x:%x\n", rv, req->IOCStatus,
786 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
787 return (CAM_REQ_CMP_ERR);
790 return (CAM_REQ_INPROG);
792 return (CAM_REQUEUE_REQ);
796 /* XXX Ignores that there may be multiple busses/IOCs involved. */
798 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, target_id_t *tgt)
800 struct mpt_raid_disk *mpt_disk;
802 mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
803 if (ccb->ccb_h.target_id < mpt->raid_max_disks
804 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
805 *tgt = mpt_disk->config_page.PhysDiskID;
808 mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
809 ccb->ccb_h.target_id);
813 /* XXX Ignores that there may be multiple busses/IOCs involved. */
815 mpt_is_raid_member(struct mpt_softc *mpt, target_id_t tgt)
817 struct mpt_raid_disk *mpt_disk;
820 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0)
822 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
823 mpt_disk = &mpt->raid_disks[i];
824 if ((mpt_disk->flags & MPT_RDF_ACTIVE) != 0 &&
825 mpt_disk->config_page.PhysDiskID == tgt)
832 /* XXX Ignores that there may be multiple busses/IOCs involved. */
834 mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt)
836 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
837 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
839 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
842 ioc_vol = mpt->ioc_page2->RaidVolume;
843 ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
844 for (;ioc_vol != ioc_last_vol; ioc_vol++) {
845 if (ioc_vol->VolumeID == tgt) {
854 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
858 struct mpt_raid_action_result *ar;
859 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
863 vol_pg = mpt_vol->config_page;
864 enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
867 * If the setting matches the configuration,
868 * there is nothing to do.
870 if ((enabled && enable)
871 || (!enabled && !enable))
874 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
876 mpt_vol_prt(mpt, mpt_vol,
877 "mpt_enable_vol: Get request failed!\n");
881 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
882 enable ? MPI_RAID_ACTION_ENABLE_VOLUME
883 : MPI_RAID_ACTION_DISABLE_VOLUME,
884 /*data*/0, /*addr*/0, /*len*/0,
885 /*write*/FALSE, /*wait*/TRUE);
886 if (rv == ETIMEDOUT) {
887 mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
888 "%s Volume Timed-out\n",
889 enable ? "Enable" : "Disable");
892 ar = REQ_TO_RAID_ACTION_RESULT(req);
894 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
895 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
896 mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
897 enable ? "Enable" : "Disable",
898 rv, req->IOCStatus, ar->action_status);
901 mpt_free_request(mpt, req);
906 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
909 struct mpt_raid_action_result *ar;
910 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
916 vol_pg = mpt_vol->config_page;
917 resyncing = vol_pg->VolumeStatus.Flags
918 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
919 mwce = vol_pg->VolumeSettings.Settings
920 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
923 * If the setting matches the configuration,
924 * there is nothing to do.
926 switch (mpt->raid_mwce_setting) {
927 case MPT_RAID_MWCE_REBUILD_ONLY:
928 if ((resyncing && mwce) || (!resyncing && !mwce)) {
931 mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
932 if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
934 * Wait one more status update to see if
935 * resyncing gets enabled. It gets disabled
936 * temporarilly when WCE is changed.
941 case MPT_RAID_MWCE_ON:
945 case MPT_RAID_MWCE_OFF:
949 case MPT_RAID_MWCE_NC:
953 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
955 mpt_vol_prt(mpt, mpt_vol,
956 "mpt_verify_mwce: Get request failed!\n");
960 vol_pg->VolumeSettings.Settings ^=
961 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
962 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
963 vol_pg->VolumeSettings.Settings ^=
964 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
965 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
966 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
967 data, /*addr*/0, /*len*/0,
968 /*write*/FALSE, /*wait*/TRUE);
969 if (rv == ETIMEDOUT) {
970 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
971 "Write Cache Enable Timed-out\n");
974 ar = REQ_TO_RAID_ACTION_RESULT(req);
976 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
977 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
978 mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
979 "%d:%x:%x\n", rv, req->IOCStatus,
982 vol_pg->VolumeSettings.Settings ^=
983 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
985 mpt_free_request(mpt, req);
989 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
992 struct mpt_raid_action_result *ar;
993 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
997 vol_pg = mpt_vol->config_page;
999 if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
1003 * If the current RAID resync rate does not
1004 * match our configured rate, update it.
1006 prio = vol_pg->VolumeSettings.Settings
1007 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1008 if (vol_pg->ResyncRate != 0
1009 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
1011 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1013 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1014 "Get request failed!\n");
1018 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1019 MPI_RAID_ACTION_SET_RESYNC_RATE,
1020 mpt->raid_resync_rate, /*addr*/0,
1021 /*len*/0, /*write*/FALSE, /*wait*/TRUE);
1022 if (rv == ETIMEDOUT) {
1023 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1024 "Resync Rate Setting Timed-out\n");
1028 ar = REQ_TO_RAID_ACTION_RESULT(req);
1030 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1031 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1032 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1033 "%d:%x:%x\n", rv, req->IOCStatus,
1036 vol_pg->ResyncRate = mpt->raid_resync_rate;
1037 mpt_free_request(mpt, req);
1038 } else if ((prio && mpt->raid_resync_rate < 128)
1039 || (!prio && mpt->raid_resync_rate >= 128)) {
1042 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1044 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1045 "Get request failed!\n");
1049 vol_pg->VolumeSettings.Settings ^=
1050 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1051 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1052 vol_pg->VolumeSettings.Settings ^=
1053 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1054 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1055 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1056 data, /*addr*/0, /*len*/0,
1057 /*write*/FALSE, /*wait*/TRUE);
1058 if (rv == ETIMEDOUT) {
1059 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1060 "Resync Rate Setting Timed-out\n");
1063 ar = REQ_TO_RAID_ACTION_RESULT(req);
1065 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1066 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1067 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1068 "%d:%x:%x\n", rv, req->IOCStatus,
1071 vol_pg->VolumeSettings.Settings ^=
1072 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1075 mpt_free_request(mpt, req);
1080 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1081 struct cam_path *path)
1083 struct ccb_relsim crs;
1085 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1086 crs.ccb_h.func_code = XPT_REL_SIMQ;
1087 crs.ccb_h.flags = CAM_DEV_QFREEZE;
1088 crs.release_flags = RELSIM_ADJUST_OPENINGS;
1089 crs.openings = mpt->raid_queue_depth;
1090 xpt_action((union ccb *)&crs);
1091 if (crs.ccb_h.status != CAM_REQ_CMP)
1092 mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1093 "with CAM status %#x\n", crs.ccb_h.status);
1097 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1099 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1102 vol_pg = mpt_vol->config_page;
1103 mpt_vol_prt(mpt, mpt_vol, "Settings (");
1104 for (i = 1; i <= 0x8000; i <<= 1) {
1105 switch (vol_pg->VolumeSettings.Settings & i) {
1106 case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1107 mpt_prtc(mpt, " Member-WCE");
1109 case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1110 mpt_prtc(mpt, " Offline-On-SMART-Err");
1112 case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1113 mpt_prtc(mpt, " Hot-Plug-Spares");
1115 case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1116 mpt_prtc(mpt, " High-Priority-ReSync");
1122 mpt_prtc(mpt, " )\n");
1123 if (vol_pg->VolumeSettings.HotSparePool != 0) {
1124 mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1125 powerof2(vol_pg->VolumeSettings.HotSparePool)
1127 for (i = 0; i < 8; i++) {
1131 if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1133 mpt_prtc(mpt, " %d", i);
1135 mpt_prtc(mpt, "\n");
1137 mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1138 for (i = 0; i < vol_pg->NumPhysDisks; i++){
1139 struct mpt_raid_disk *mpt_disk;
1140 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1141 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1144 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1145 disk_pg = &mpt_disk->config_page;
1147 mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1148 pt_bus, disk_pg->PhysDiskID);
1149 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1150 mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1151 "Primary" : "Secondary");
1153 mpt_prtc(mpt, "Stripe Position %d",
1154 mpt_disk->member_number);
1156 f = disk_pg->PhysDiskStatus.Flags;
1157 s = disk_pg->PhysDiskStatus.State;
1158 if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1159 mpt_prtc(mpt, " Out of Sync");
1161 if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1162 mpt_prtc(mpt, " Quiesced");
1164 if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1165 mpt_prtc(mpt, " Inactive");
1167 if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1168 mpt_prtc(mpt, " Was Optimal");
1170 if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1171 mpt_prtc(mpt, " Was Non-Optimal");
1174 case MPI_PHYSDISK0_STATUS_ONLINE:
1175 mpt_prtc(mpt, " Online");
1177 case MPI_PHYSDISK0_STATUS_MISSING:
1178 mpt_prtc(mpt, " Missing");
1180 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1181 mpt_prtc(mpt, " Incompatible");
1183 case MPI_PHYSDISK0_STATUS_FAILED:
1184 mpt_prtc(mpt, " Failed");
1186 case MPI_PHYSDISK0_STATUS_INITIALIZING:
1187 mpt_prtc(mpt, " Initializing");
1189 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1190 mpt_prtc(mpt, " Requested Offline");
1192 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1193 mpt_prtc(mpt, " Requested Failed");
1195 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1197 mpt_prtc(mpt, " Offline Other (%x)", s);
1200 mpt_prtc(mpt, "\n");
1205 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1207 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1208 int rd_bus = cam_sim_bus(mpt->sim);
1209 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1212 disk_pg = &mpt_disk->config_page;
1213 mpt_disk_prt(mpt, mpt_disk,
1214 "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1215 device_get_nameunit(mpt->dev), rd_bus,
1216 disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1217 pt_bus, mpt_disk - mpt->raid_disks);
1218 if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1220 mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1221 powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1223 for (i = 0; i < 8; i++) {
1227 if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1229 mpt_prtc(mpt, " %d", i);
1231 mpt_prtc(mpt, "\n");
1235 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1236 IOC_3_PHYS_DISK *ioc_disk)
1240 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1241 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1242 &mpt_disk->config_page.Header,
1243 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1245 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1246 "Failed to read RAID Disk Hdr(%d)\n",
1247 ioc_disk->PhysDiskNum);
1250 rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1251 &mpt_disk->config_page.Header,
1252 sizeof(mpt_disk->config_page),
1253 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1255 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1256 "Failed to read RAID Disk Page(%d)\n",
1257 ioc_disk->PhysDiskNum);
1258 mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1262 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1263 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1265 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1266 struct mpt_raid_action_result *ar;
1271 vol_pg = mpt_vol->config_page;
1272 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1274 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1275 ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1277 mpt_vol_prt(mpt, mpt_vol,
1278 "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1279 ioc_vol->VolumePageNumber);
1283 rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1284 &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1286 mpt_vol_prt(mpt, mpt_vol,
1287 "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1288 ioc_vol->VolumePageNumber);
1291 mpt2host_config_page_raid_vol_0(vol_pg);
1293 mpt_vol->flags |= MPT_RVF_ACTIVE;
1295 /* Update disk entry array data. */
1296 for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1297 struct mpt_raid_disk *mpt_disk;
1298 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1299 mpt_disk->volume = mpt_vol;
1300 mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1301 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1302 mpt_disk->member_number--;
1306 if ((vol_pg->VolumeStatus.Flags
1307 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1310 req = mpt_get_request(mpt, TRUE);
1312 mpt_vol_prt(mpt, mpt_vol,
1313 "mpt_refresh_raid_vol: Get request failed!\n");
1316 rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1317 MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1318 if (rv == ETIMEDOUT) {
1319 mpt_vol_prt(mpt, mpt_vol,
1320 "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1321 mpt_free_request(mpt, req);
1325 ar = REQ_TO_RAID_ACTION_RESULT(req);
1327 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1328 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1329 memcpy(&mpt_vol->sync_progress,
1330 &ar->action_data.indicator_struct,
1331 sizeof(mpt_vol->sync_progress));
1332 mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1334 mpt_vol_prt(mpt, mpt_vol,
1335 "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1337 mpt_free_request(mpt, req);
1341 * Update in-core information about RAID support. We update any entries
1342 * that didn't previously exists or have been marked as needing to
1343 * be updated by our event handler. Interesting changes are displayed
1347 mpt_refresh_raid_data(struct mpt_softc *mpt)
1349 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1350 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1351 IOC_3_PHYS_DISK *ioc_disk;
1352 IOC_3_PHYS_DISK *ioc_last_disk;
1353 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1357 u_int nonopt_volumes;
1359 if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1364 * Mark all items as unreferenced by the configuration.
1365 * This allows us to find, report, and discard stale
1368 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1369 mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1371 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1372 mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1376 * Get Physical Disk information.
1378 len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1379 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1380 &mpt->ioc_page3->Header, len,
1381 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1384 "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1387 mpt2host_config_page_ioc3(mpt->ioc_page3);
1389 ioc_disk = mpt->ioc_page3->PhysDisk;
1390 ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1391 for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1392 struct mpt_raid_disk *mpt_disk;
1394 mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1395 mpt_disk->flags |= MPT_RDF_REFERENCED;
1396 if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1397 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1399 mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1402 mpt_disk->flags |= MPT_RDF_ACTIVE;
1407 * Refresh volume data.
1409 len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1410 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1411 &mpt->ioc_page2->Header, len,
1412 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1414 mpt_prt(mpt, "mpt_refresh_raid_data: "
1415 "Failed to read IOC Page 2\n");
1418 mpt2host_config_page_ioc2(mpt->ioc_page2);
1420 ioc_vol = mpt->ioc_page2->RaidVolume;
1421 ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1422 for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1423 struct mpt_raid_volume *mpt_vol;
1425 mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1426 mpt_vol->flags |= MPT_RVF_REFERENCED;
1427 vol_pg = mpt_vol->config_page;
1430 if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1431 != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1432 || (vol_pg->VolumeStatus.Flags
1433 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1435 mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1437 mpt_vol->flags |= MPT_RVF_ACTIVE;
1441 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1442 struct mpt_raid_volume *mpt_vol;
1448 mpt_vol = &mpt->raid_volumes[i];
1450 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1454 vol_pg = mpt_vol->config_page;
1455 if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1456 == MPT_RVF_ANNOUNCED) {
1457 mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1462 if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1463 mpt_announce_vol(mpt, mpt_vol);
1464 mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1467 if (vol_pg->VolumeStatus.State !=
1468 MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1471 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1474 mpt_vol->flags |= MPT_RVF_UP2DATE;
1475 mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1476 mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1477 mpt_verify_mwce(mpt, mpt_vol);
1479 if (vol_pg->VolumeStatus.Flags == 0) {
1483 mpt_vol_prt(mpt, mpt_vol, "Status (");
1484 for (m = 1; m <= 0x80; m <<= 1) {
1485 switch (vol_pg->VolumeStatus.Flags & m) {
1486 case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1487 mpt_prtc(mpt, " Enabled");
1489 case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1490 mpt_prtc(mpt, " Quiesced");
1492 case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1493 mpt_prtc(mpt, " Re-Syncing");
1495 case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1496 mpt_prtc(mpt, " Inactive");
1502 mpt_prtc(mpt, " )\n");
1504 if ((vol_pg->VolumeStatus.Flags
1505 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1508 mpt_verify_resync_rate(mpt, mpt_vol);
1510 left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1511 total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1512 if (vol_pg->ResyncRate != 0) {
1514 prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1515 mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1516 prio / 1000, prio % 1000);
1518 prio = vol_pg->VolumeSettings.Settings
1519 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1520 mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1521 prio ? "High" : "Low");
1523 #if __FreeBSD_version >= 500000
1524 mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1525 "blocks remaining\n", (uintmax_t)left,
1528 mpt_vol_prt(mpt, mpt_vol, "%llu of %llu "
1529 "blocks remaining\n", (uint64_t)left,
1533 /* Periodically report on sync progress. */
1534 mpt_schedule_raid_refresh(mpt);
1537 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1538 struct mpt_raid_disk *mpt_disk;
1539 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1542 mpt_disk = &mpt->raid_disks[i];
1543 disk_pg = &mpt_disk->config_page;
1545 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1548 if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1549 == MPT_RDF_ANNOUNCED) {
1550 mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1551 mpt_disk->flags = 0;
1556 if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1558 mpt_announce_disk(mpt, mpt_disk);
1559 mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1562 if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1565 mpt_disk->flags |= MPT_RDF_UP2DATE;
1566 mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1567 if (disk_pg->PhysDiskStatus.Flags == 0)
1570 mpt_disk_prt(mpt, mpt_disk, "Status (");
1571 for (m = 1; m <= 0x80; m <<= 1) {
1572 switch (disk_pg->PhysDiskStatus.Flags & m) {
1573 case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1574 mpt_prtc(mpt, " Out-Of-Sync");
1576 case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1577 mpt_prtc(mpt, " Quiesced");
1583 mpt_prtc(mpt, " )\n");
1586 mpt->raid_nonopt_volumes = nonopt_volumes;
1591 mpt_raid_timer(void *arg)
1593 struct mpt_softc *mpt;
1595 mpt = (struct mpt_softc *)arg;
1596 #if __FreeBSD_version < 500000
1599 MPT_LOCK_ASSERT(mpt);
1600 mpt_raid_wakeup(mpt);
1601 #if __FreeBSD_version < 500000
1607 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1610 callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1611 mpt_raid_timer, mpt);
1615 mpt_raid_free_mem(struct mpt_softc *mpt)
1618 if (mpt->raid_volumes) {
1619 struct mpt_raid_volume *mpt_raid;
1621 for (i = 0; i < mpt->raid_max_volumes; i++) {
1622 mpt_raid = &mpt->raid_volumes[i];
1623 if (mpt_raid->config_page) {
1624 free(mpt_raid->config_page, M_DEVBUF);
1625 mpt_raid->config_page = NULL;
1628 free(mpt->raid_volumes, M_DEVBUF);
1629 mpt->raid_volumes = NULL;
1631 if (mpt->raid_disks) {
1632 free(mpt->raid_disks, M_DEVBUF);
1633 mpt->raid_disks = NULL;
1635 if (mpt->ioc_page2) {
1636 free(mpt->ioc_page2, M_DEVBUF);
1637 mpt->ioc_page2 = NULL;
1639 if (mpt->ioc_page3) {
1640 free(mpt->ioc_page3, M_DEVBUF);
1641 mpt->ioc_page3 = NULL;
1643 mpt->raid_max_volumes = 0;
1644 mpt->raid_max_disks = 0;
1647 #if __FreeBSD_version >= 500000
1649 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1651 struct mpt_raid_volume *mpt_vol;
1653 if ((rate > MPT_RAID_RESYNC_RATE_MAX
1654 || rate < MPT_RAID_RESYNC_RATE_MIN)
1655 && rate != MPT_RAID_RESYNC_RATE_NC)
1659 mpt->raid_resync_rate = rate;
1660 RAID_VOL_FOREACH(mpt, mpt_vol) {
1661 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1664 mpt_verify_resync_rate(mpt, mpt_vol);
1671 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1673 struct mpt_raid_volume *mpt_vol;
1675 if (vol_queue_depth > 255 || vol_queue_depth < 1)
1679 mpt->raid_queue_depth = vol_queue_depth;
1680 RAID_VOL_FOREACH(mpt, mpt_vol) {
1681 struct cam_path *path;
1684 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1687 mpt->raid_rescan = 0;
1689 MPTLOCK_2_CAMLOCK(mpt);
1690 error = xpt_create_path(&path, xpt_periph,
1691 cam_sim_path(mpt->sim),
1692 mpt_vol->config_page->VolumeID,
1694 if (error != CAM_REQ_CMP) {
1695 CAMLOCK_2_MPTLOCK(mpt);
1696 mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1699 mpt_adjust_queue_depth(mpt, mpt_vol, path);
1700 xpt_free_path(path);
1701 CAMLOCK_2_MPTLOCK(mpt);
1708 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1710 struct mpt_raid_volume *mpt_vol;
1711 int force_full_resync;
1714 if (mwce == mpt->raid_mwce_setting) {
1720 * Catch MWCE being left on due to a failed shutdown. Since
1721 * sysctls cannot be set by the loader, we treat the first
1722 * setting of this varible specially and force a full volume
1723 * resync if MWCE is enabled and a resync is in progress.
1725 force_full_resync = 0;
1726 if (mpt->raid_mwce_set == 0
1727 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1728 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1729 force_full_resync = 1;
1731 mpt->raid_mwce_setting = mwce;
1732 RAID_VOL_FOREACH(mpt, mpt_vol) {
1733 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1737 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1740 vol_pg = mpt_vol->config_page;
1741 resyncing = vol_pg->VolumeStatus.Flags
1742 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1743 mwce = vol_pg->VolumeSettings.Settings
1744 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1745 if (force_full_resync && resyncing && mwce) {
1748 * XXX disable/enable volume should force a resync,
1749 * but we'll need to queice, drain, and restart
1752 mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1753 "detected. Suggest full resync.\n");
1755 mpt_verify_mwce(mpt, mpt_vol);
1757 mpt->raid_mwce_set = 1;
1762 static const char *mpt_vol_mwce_strs[] =
1766 "On-During-Rebuild",
1771 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1774 struct mpt_softc *mpt;
1782 mpt = (struct mpt_softc *)arg1;
1783 str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1784 error = SYSCTL_OUT(req, str, strlen(str) + 1);
1785 if (error || !req->newptr) {
1789 size = req->newlen - req->newidx;
1790 if (size >= sizeof(inbuf)) {
1794 error = SYSCTL_IN(req, inbuf, size);
1799 for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1800 if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1801 return (mpt_raid_set_vol_mwce(mpt, i));
1808 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1810 struct mpt_softc *mpt;
1811 u_int raid_resync_rate;
1816 mpt = (struct mpt_softc *)arg1;
1817 raid_resync_rate = mpt->raid_resync_rate;
1819 error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1820 if (error || !req->newptr) {
1824 return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1828 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1830 struct mpt_softc *mpt;
1831 u_int raid_queue_depth;
1836 mpt = (struct mpt_softc *)arg1;
1837 raid_queue_depth = mpt->raid_queue_depth;
1839 error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1840 if (error || !req->newptr) {
1844 return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1848 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1850 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1851 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1853 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1854 "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1855 mpt_raid_sysctl_vol_member_wce, "A",
1856 "volume member WCE(On,Off,On-During-Rebuild,NC)");
1858 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1859 "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1860 mpt_raid_sysctl_vol_queue_depth, "I",
1861 "default volume queue depth");
1863 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1864 "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1865 mpt_raid_sysctl_vol_resync_rate, "I",
1866 "volume resync priority (0 == NC, 1 - 255)");
1867 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1868 "nonoptimal_volumes", CTLFLAG_RD,
1869 &mpt->raid_nonopt_volumes, 0,
1870 "number of nonoptimal volumes");