]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - sys/dev/mpt/mpt_raid.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / sys / dev / mpt / mpt_raid.c
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * Copyright (c) 2005, WHEEL Sp. z o.o.
5  * Copyright (c) 2005 Justin T. Gibbs.
6  * All rights reserved.
7  * 
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon including
16  *    a substantially similar Disclaimer requirement for further binary
17  *    redistribution.
18  * 3. Neither the names of the above listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  * 
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 /*-
35  * Some Breakage and Bug Fixing added later.
36  * Copyright (c) 2006, by Matthew Jacob
37  * All Rights Reserved
38  *
39  * Support from LSI-Logic has also gone a great deal toward making this a
40  * workable subsystem and is gratefully acknowledged.
41  */
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45
46 #include <dev/mpt/mpt.h>
47 #include <dev/mpt/mpt_raid.h>
48
49 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
50 #include "dev/mpt/mpilib/mpi_raid.h"
51
52 #include <cam/cam.h>
53 #include <cam/cam_ccb.h>
54 #include <cam/cam_sim.h>
55 #include <cam/cam_xpt_periph.h>
56 #include <cam/cam_xpt_sim.h>
57
58 #if __FreeBSD_version < 500000
59 #include <sys/devicestat.h>
60 #define GIANT_REQUIRED
61 #endif
62 #include <cam/cam_periph.h>
63
64 #include <sys/callout.h>
65 #include <sys/kthread.h>
66 #include <sys/sysctl.h>
67
68 #include <machine/stdarg.h>
69
70 struct mpt_raid_action_result
71 {
72         union {
73                 MPI_RAID_VOL_INDICATOR  indicator_struct;
74                 uint32_t                new_settings;
75                 uint8_t                 phys_disk_num;
76         } action_data;
77         uint16_t                        action_status;
78 };
79
80 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
81         (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
82
83 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
84
85
86 static mpt_probe_handler_t      mpt_raid_probe;
87 static mpt_attach_handler_t     mpt_raid_attach;
88 static mpt_enable_handler_t     mpt_raid_enable;
89 static mpt_event_handler_t      mpt_raid_event;
90 static mpt_shutdown_handler_t   mpt_raid_shutdown;
91 static mpt_reset_handler_t      mpt_raid_ioc_reset;
92 static mpt_detach_handler_t     mpt_raid_detach;
93
94 static struct mpt_personality mpt_raid_personality =
95 {
96         .name           = "mpt_raid",
97         .probe          = mpt_raid_probe,
98         .attach         = mpt_raid_attach,
99         .enable         = mpt_raid_enable,
100         .event          = mpt_raid_event,
101         .reset          = mpt_raid_ioc_reset,
102         .shutdown       = mpt_raid_shutdown,
103         .detach         = mpt_raid_detach,
104 };
105
106 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
107 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
108
109 static mpt_reply_handler_t mpt_raid_reply_handler;
110 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
111                                         MSG_DEFAULT_REPLY *reply_frame);
112 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
113 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
114 static void mpt_raid_thread(void *arg);
115 static timeout_t mpt_raid_timer;
116 #if 0
117 static void mpt_enable_vol(struct mpt_softc *mpt,
118                            struct mpt_raid_volume *mpt_vol, int enable);
119 #endif
120 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
121 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
122     struct cam_path *);
123 #if __FreeBSD_version < 500000
124 #define mpt_raid_sysctl_attach(x)       do { } while (0)
125 #else
126 static void mpt_raid_sysctl_attach(struct mpt_softc *);
127 #endif
128
129 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
130
131 const char *
132 mpt_vol_type(struct mpt_raid_volume *vol)
133 {
134         switch (vol->config_page->VolumeType) {
135         case MPI_RAID_VOL_TYPE_IS:
136                 return ("RAID-0");
137         case MPI_RAID_VOL_TYPE_IME:
138                 return ("RAID-1E");
139         case MPI_RAID_VOL_TYPE_IM:
140                 return ("RAID-1");
141         default:
142                 return ("Unknown");
143         }
144 }
145
146 const char *
147 mpt_vol_state(struct mpt_raid_volume *vol)
148 {
149         switch (vol->config_page->VolumeStatus.State) {
150         case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
151                 return ("Optimal");
152         case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
153                 return ("Degraded");
154         case MPI_RAIDVOL0_STATUS_STATE_FAILED:
155                 return ("Failed");
156         default:
157                 return ("Unknown");
158         }
159 }
160
161 const char *
162 mpt_disk_state(struct mpt_raid_disk *disk)
163 {
164         switch (disk->config_page.PhysDiskStatus.State) {
165         case MPI_PHYSDISK0_STATUS_ONLINE:
166                 return ("Online");
167         case MPI_PHYSDISK0_STATUS_MISSING:
168                 return ("Missing");
169         case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
170                 return ("Incompatible");
171         case MPI_PHYSDISK0_STATUS_FAILED:
172                 return ("Failed");
173         case MPI_PHYSDISK0_STATUS_INITIALIZING:
174                 return ("Initializing");
175         case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
176                 return ("Offline Requested");
177         case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
178                 return ("Failed per Host Request");
179         case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
180                 return ("Offline");
181         default:
182                 return ("Unknown");
183         }
184 }
185
186 void
187 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
188             const char *fmt, ...)
189 {
190         va_list ap;
191
192         printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
193                (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
194                vol->config_page->VolumeBus, vol->config_page->VolumeID);
195         va_start(ap, fmt);
196         vprintf(fmt, ap);
197         va_end(ap);
198 }
199
200 void
201 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
202              const char *fmt, ...)
203 {
204         va_list ap;
205
206         if (disk->volume != NULL) {
207                 printf("(%s:vol%d:%d): ",
208                        device_get_nameunit(mpt->dev),
209                        disk->volume->config_page->VolumeID,
210                        disk->member_number);
211         } else {
212                 printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
213                        disk->config_page.PhysDiskBus,
214                        disk->config_page.PhysDiskID);
215         }
216         va_start(ap, fmt);
217         vprintf(fmt, ap);
218         va_end(ap);
219 }
220
221 static void
222 mpt_raid_async(void *callback_arg, u_int32_t code,
223                struct cam_path *path, void *arg)
224 {
225         struct mpt_softc *mpt;
226
227         mpt = (struct mpt_softc*)callback_arg;
228         switch (code) {
229         case AC_FOUND_DEVICE:
230         {
231                 struct ccb_getdev *cgd;
232                 struct mpt_raid_volume *mpt_vol;
233
234                 cgd = (struct ccb_getdev *)arg;
235                 if (cgd == NULL) {
236                         break;
237                 }
238
239                 mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
240                          cgd->ccb_h.target_id);
241                 
242                 RAID_VOL_FOREACH(mpt, mpt_vol) {
243                         if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
244                                 continue;
245
246                         if (mpt_vol->config_page->VolumeID 
247                          == cgd->ccb_h.target_id) {
248                                 mpt_adjust_queue_depth(mpt, mpt_vol, path);
249                                 break;
250                         }
251                 }
252         }
253         default:
254                 break;
255         }
256 }
257
258 int
259 mpt_raid_probe(struct mpt_softc *mpt)
260 {
261         if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
262                 return (ENODEV);
263         }
264         return (0);
265 }
266
267 int
268 mpt_raid_attach(struct mpt_softc *mpt)
269 {
270         struct ccb_setasync csa;
271         mpt_handler_t    handler;
272         int              error;
273
274         mpt_callout_init(&mpt->raid_timer);
275
276         error = mpt_spawn_raid_thread(mpt);
277         if (error != 0) {
278                 mpt_prt(mpt, "Unable to spawn RAID thread!\n");
279                 goto cleanup;
280         }
281  
282         MPT_LOCK(mpt);
283         handler.reply_handler = mpt_raid_reply_handler;
284         error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
285                                      &raid_handler_id);
286         if (error != 0) {
287                 mpt_prt(mpt, "Unable to register RAID haandler!\n");
288                 goto cleanup;
289         }
290
291         xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
292         csa.ccb_h.func_code = XPT_SASYNC_CB;
293         csa.event_enable = AC_FOUND_DEVICE;
294         csa.callback = mpt_raid_async;
295         csa.callback_arg = mpt;
296         xpt_action((union ccb *)&csa);
297         if (csa.ccb_h.status != CAM_REQ_CMP) {
298                 mpt_prt(mpt, "mpt_raid_attach: Unable to register "
299                         "CAM async handler.\n");
300         }
301         MPT_UNLOCK(mpt);
302
303         mpt_raid_sysctl_attach(mpt);
304         return (0);
305 cleanup:
306         MPT_UNLOCK(mpt);
307         mpt_raid_detach(mpt);
308         return (error);
309 }
310
311 int
312 mpt_raid_enable(struct mpt_softc *mpt)
313 {
314         return (0);
315 }
316
317 void
318 mpt_raid_detach(struct mpt_softc *mpt)
319 {
320         struct ccb_setasync csa;
321         mpt_handler_t handler;
322
323         callout_stop(&mpt->raid_timer);
324         MPT_LOCK(mpt);
325         mpt_terminate_raid_thread(mpt); 
326
327         handler.reply_handler = mpt_raid_reply_handler;
328         mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
329                                raid_handler_id);
330         xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
331         csa.ccb_h.func_code = XPT_SASYNC_CB;
332         csa.event_enable = 0;
333         csa.callback = mpt_raid_async;
334         csa.callback_arg = mpt;
335         xpt_action((union ccb *)&csa);
336         MPT_UNLOCK(mpt);
337 }
338
339 static void
340 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
341 {
342         /* Nothing to do yet. */
343 }
344
345 static const char *raid_event_txt[] =
346 {
347         "Volume Created",
348         "Volume Deleted",
349         "Volume Settings Changed",
350         "Volume Status Changed",
351         "Volume Physical Disk Membership Changed",
352         "Physical Disk Created",
353         "Physical Disk Deleted",
354         "Physical Disk Settings Changed",
355         "Physical Disk Status Changed",
356         "Domain Validation Required",
357         "SMART Data Received",
358         "Replace Action Started",
359 };
360
361 static int
362 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
363                MSG_EVENT_NOTIFY_REPLY *msg)
364 {
365         EVENT_DATA_RAID *raid_event;
366         struct mpt_raid_volume *mpt_vol;
367         struct mpt_raid_disk *mpt_disk;
368         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
369         int i;
370         int print_event;
371
372         if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
373                 return (0);
374         }
375
376         raid_event = (EVENT_DATA_RAID *)&msg->Data;
377
378         mpt_vol = NULL;
379         vol_pg = NULL;
380         if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
381                 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
382                         mpt_vol = &mpt->raid_volumes[i];
383                         vol_pg = mpt_vol->config_page;
384
385                         if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
386                                 continue;
387
388                         if (vol_pg->VolumeID == raid_event->VolumeID
389                          && vol_pg->VolumeBus == raid_event->VolumeBus)
390                                 break;
391                 }
392                 if (i >= mpt->ioc_page2->MaxVolumes) {
393                         mpt_vol = NULL;
394                         vol_pg = NULL;
395                 }
396         }
397
398         mpt_disk = NULL;
399         if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
400                 mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
401                 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
402                         mpt_disk = NULL;
403                 }
404         }
405
406         print_event = 1;
407         switch(raid_event->ReasonCode) {
408         case MPI_EVENT_RAID_RC_VOLUME_CREATED:
409         case MPI_EVENT_RAID_RC_VOLUME_DELETED:
410                 break;
411         case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
412                 if (mpt_vol != NULL) {
413                         if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
414                                 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
415                         } else {
416                                 /*
417                                  * Coalesce status messages into one
418                                  * per background run of our RAID thread.
419                                  * This removes "spurious" status messages
420                                  * from our output.
421                                  */
422                                 print_event = 0;
423                         }
424                 }
425                 break;
426         case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
427         case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
428                 mpt->raid_rescan++;
429                 if (mpt_vol != NULL) {
430                         mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
431                 }
432                 break;
433         case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
434         case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
435                 mpt->raid_rescan++;
436                 break;
437         case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
438         case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
439                 mpt->raid_rescan++;
440                 if (mpt_disk != NULL) {
441                         mpt_disk->flags &= ~MPT_RDF_UP2DATE;
442                 }
443                 break;
444         case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
445                 mpt->raid_rescan++;
446                 break;
447         case MPI_EVENT_RAID_RC_SMART_DATA:
448         case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
449                 break;
450         }
451
452         if (print_event) {
453                 if (mpt_disk != NULL) {
454                         mpt_disk_prt(mpt, mpt_disk, "");
455                 } else if (mpt_vol != NULL) {
456                         mpt_vol_prt(mpt, mpt_vol, "");
457                 } else {
458                         mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
459                                 raid_event->VolumeID);
460
461                         if (raid_event->PhysDiskNum != 0xFF)
462                                 mpt_prtc(mpt, ":%d): ",
463                                          raid_event->PhysDiskNum);
464                         else
465                                 mpt_prtc(mpt, "): ");
466                 }
467
468                 if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
469                         mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
470                                  raid_event->ReasonCode);
471                 else
472                         mpt_prtc(mpt, "%s\n",
473                                  raid_event_txt[raid_event->ReasonCode]);
474         }
475
476         if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
477                 /* XXX Use CAM's print sense for this... */
478                 if (mpt_disk != NULL)
479                         mpt_disk_prt(mpt, mpt_disk, "");
480                 else
481                         mpt_prt(mpt, "Volume(%d:%d:%d: ",
482                             raid_event->VolumeBus, raid_event->VolumeID,
483                             raid_event->PhysDiskNum);
484                 mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
485                          raid_event->ASC, raid_event->ASCQ);
486         }
487
488         mpt_raid_wakeup(mpt);
489         return (1);
490 }
491
492 static void
493 mpt_raid_shutdown(struct mpt_softc *mpt)
494 {
495         struct mpt_raid_volume *mpt_vol;
496
497         if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
498                 return;
499         }
500
501         mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
502         RAID_VOL_FOREACH(mpt, mpt_vol) {
503                 mpt_verify_mwce(mpt, mpt_vol);
504         }
505 }
506
507 static int
508 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
509     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
510 {
511         int free_req;
512
513         if (req == NULL)
514                 return (TRUE);
515
516         free_req = TRUE;
517         if (reply_frame != NULL)
518                 free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
519 #ifdef NOTYET
520         else if (req->ccb != NULL) {
521                 /* Complete Quiesce CCB with error... */
522         }
523 #endif
524
525         req->state &= ~REQ_STATE_QUEUED;
526         req->state |= REQ_STATE_DONE;
527         TAILQ_REMOVE(&mpt->request_pending_list, req, links);
528
529         if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
530                 wakeup(req);
531         } else if (free_req) {
532                 mpt_free_request(mpt, req);
533         }
534
535         return (TRUE);
536 }
537
538 /*
539  * Parse additional completion information in the reply
540  * frame for RAID I/O requests.
541  */
542 static int
543 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
544     MSG_DEFAULT_REPLY *reply_frame)
545 {
546         MSG_RAID_ACTION_REPLY *reply;
547         struct mpt_raid_action_result *action_result;
548         MSG_RAID_ACTION_REQUEST *rap;
549
550         reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
551         req->IOCStatus = le16toh(reply->IOCStatus);
552         rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
553         
554         switch (rap->Action) {
555         case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
556                 mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
557                 break;
558         case MPI_RAID_ACTION_ENABLE_PHYS_IO:
559                 mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
560                 break;
561         default:
562                 break;
563         }
564         action_result = REQ_TO_RAID_ACTION_RESULT(req);
565         memcpy(&action_result->action_data, &reply->ActionData,
566             sizeof(action_result->action_data));
567         action_result->action_status = le16toh(reply->ActionStatus);
568         return (TRUE);
569 }
570
571 /*
572  * Utiltity routine to perform a RAID action command;
573  */
574 int
575 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
576                    struct mpt_raid_disk *disk, request_t *req, u_int Action,
577                    uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
578                    int write, int wait)
579 {
580         MSG_RAID_ACTION_REQUEST *rap;
581         SGE_SIMPLE32 *se;
582
583         rap = req->req_vbuf;
584         memset(rap, 0, sizeof *rap);
585         rap->Action = Action;
586         rap->ActionDataWord = htole32(ActionDataWord);
587         rap->Function = MPI_FUNCTION_RAID_ACTION;
588         rap->VolumeID = vol->config_page->VolumeID;
589         rap->VolumeBus = vol->config_page->VolumeBus;
590         if (disk != 0)
591                 rap->PhysDiskNum = disk->config_page.PhysDiskNum;
592         else
593                 rap->PhysDiskNum = 0xFF;
594         se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
595         se->Address = htole32(addr);
596         MPI_pSGE_SET_LENGTH(se, len);
597         MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
598             MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
599             MPI_SGE_FLAGS_END_OF_LIST |
600             write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
601         se->FlagsLength = htole32(se->FlagsLength);
602         rap->MsgContext = htole32(req->index | raid_handler_id);
603
604         mpt_check_doorbell(mpt);
605         mpt_send_cmd(mpt, req);
606
607         if (wait) {
608                 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
609                                      /*sleep_ok*/FALSE, /*time_ms*/2000));
610         } else {
611                 return (0);
612         }
613 }
614
615 /*************************** RAID Status Monitoring ***************************/
616 static int
617 mpt_spawn_raid_thread(struct mpt_softc *mpt)
618 {
619         int error;
620
621         /*
622          * Freeze out any CAM transactions until our thread
623          * is able to run at least once.  We need to update
624          * our RAID pages before acception I/O or we may
625          * reject I/O to an ID we later determine is for a
626          * hidden physdisk.
627          */
628         MPT_LOCK(mpt);
629         xpt_freeze_simq(mpt->phydisk_sim, 1);
630         MPT_UNLOCK(mpt);
631         error = mpt_kthread_create(mpt_raid_thread, mpt,
632             &mpt->raid_thread, /*flags*/0, /*altstack*/0,
633             "mpt_raid%d", mpt->unit);
634         if (error != 0) {
635                 MPT_LOCK(mpt);
636                 xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
637                 MPT_UNLOCK(mpt);
638         }
639         return (error);
640 }
641
642 static void
643 mpt_terminate_raid_thread(struct mpt_softc *mpt)
644 {
645
646         if (mpt->raid_thread == NULL) {
647                 return;
648         }
649         mpt->shutdwn_raid = 1;
650         wakeup(mpt->raid_volumes);
651         /*
652          * Sleep on a slightly different location
653          * for this interlock just for added safety.
654          */
655         mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
656 }
657
658 static void
659 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
660 {
661         xpt_free_path(ccb->ccb_h.path);
662 }
663
664 static void
665 mpt_raid_thread(void *arg)
666 {
667         struct mpt_softc *mpt;
668         union ccb *ccb;
669         int firstrun;
670
671         mpt = (struct mpt_softc *)arg;
672         firstrun = 1;
673         ccb = xpt_alloc_ccb();
674         MPT_LOCK(mpt);
675         while (mpt->shutdwn_raid == 0) {
676
677                 if (mpt->raid_wakeup == 0) {
678                         mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
679                         continue;
680                 }
681
682                 mpt->raid_wakeup = 0;
683
684                 if (mpt_refresh_raid_data(mpt)) {
685                         mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */
686                         continue;
687                 }
688
689                 /*
690                  * Now that we have our first snapshot of RAID data,
691                  * allow CAM to access our physical disk bus.
692                  */
693                 if (firstrun) {
694                         firstrun = 0;
695                         MPTLOCK_2_CAMLOCK(mpt);
696                         xpt_release_simq(mpt->phydisk_sim, TRUE);
697                         CAMLOCK_2_MPTLOCK(mpt);
698                 }
699
700                 if (mpt->raid_rescan != 0) {
701                         struct cam_path *path;
702                         int error;
703
704                         mpt->raid_rescan = 0;
705
706                         error = xpt_create_path(&path, xpt_periph,
707                             cam_sim_path(mpt->phydisk_sim),
708                             CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
709                         if (error != CAM_REQ_CMP) {
710                                 mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
711                         } else {
712                                 xpt_setup_ccb(&ccb->ccb_h, path, 5);
713                                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
714                                 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
715                                 ccb->crcn.flags = CAM_FLAG_NONE;
716                                 MPTLOCK_2_CAMLOCK(mpt);
717                                 xpt_action(ccb);
718                                 CAMLOCK_2_MPTLOCK(mpt);
719                         }
720                 }
721         }
722         xpt_free_ccb(ccb);
723         mpt->raid_thread = NULL;
724         wakeup(&mpt->raid_thread);
725         MPT_UNLOCK(mpt);
726         mpt_kthread_exit(0);
727 }
728
729 #if 0
730 static void
731 mpt_raid_quiesce_timeout(void *arg)
732 {
733         /* Complete the CCB with error */
734         /* COWWWW */
735 }
736
737 static timeout_t mpt_raid_quiesce_timeout;
738 cam_status
739 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
740                       request_t *req)
741 {
742         union ccb *ccb;
743
744         ccb = req->ccb;
745         if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
746                 return (CAM_REQ_CMP);
747
748         if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
749                 int rv;
750
751                 mpt_disk->flags |= MPT_RDF_QUIESCING;
752                 xpt_freeze_devq(ccb->ccb_h.path, 1);
753                 
754                 rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
755                                         MPI_RAID_ACTION_QUIESCE_PHYS_IO,
756                                         /*ActionData*/0, /*addr*/0,
757                                         /*len*/0, /*write*/FALSE,
758                                         /*wait*/FALSE);
759                 if (rv != 0)
760                         return (CAM_REQ_CMP_ERR);
761
762                 mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
763 #if 0
764                 if (rv == ETIMEDOUT) {
765                         mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
766                                      "Quiece Timed-out\n");
767                         xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
768                         return (CAM_REQ_CMP_ERR);
769                 }
770
771                 ar = REQ_TO_RAID_ACTION_RESULT(req);
772                 if (rv != 0
773                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
774                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
775                         mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
776                                     "%d:%x:%x\n", rv, req->IOCStatus,
777                                     ar->action_status);
778                         xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
779                         return (CAM_REQ_CMP_ERR);
780                 }
781 #endif
782                 return (CAM_REQ_INPROG);
783         }
784         return (CAM_REQUEUE_REQ);
785 }
786 #endif
787
788 /* XXX Ignores that there may be multiple busses/IOCs involved. */
789 cam_status
790 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
791 {
792         struct mpt_raid_disk *mpt_disk;
793
794         mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
795         if (ccb->ccb_h.target_id < mpt->raid_max_disks
796          && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
797                 *tgt = mpt_disk->config_page.PhysDiskID;
798                 return (0);
799         }
800         mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
801                  ccb->ccb_h.target_id);
802         return (-1);
803 }
804
805 /* XXX Ignores that there may be multiple busses/IOCs involved. */
806 int
807 mpt_is_raid_volume(struct mpt_softc *mpt, int tgt)
808 {
809         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
810         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
811
812         if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
813                 return (0);
814         }
815         ioc_vol = mpt->ioc_page2->RaidVolume;
816         ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
817         for (;ioc_vol != ioc_last_vol; ioc_vol++) {
818                 if (ioc_vol->VolumeID == tgt) {
819                         return (1);
820                 }
821         }
822         return (0);
823 }
824
825 #if 0
826 static void
827 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
828                int enable)
829 {
830         request_t *req;
831         struct mpt_raid_action_result *ar;
832         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
833         int enabled;
834         int rv;
835
836         vol_pg = mpt_vol->config_page;
837         enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
838
839         /*
840          * If the setting matches the configuration,
841          * there is nothing to do.
842          */
843         if ((enabled && enable)
844          || (!enabled && !enable))
845                 return;
846
847         req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
848         if (req == NULL) {
849                 mpt_vol_prt(mpt, mpt_vol,
850                             "mpt_enable_vol: Get request failed!\n");
851                 return;
852         }
853
854         rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
855                                 enable ? MPI_RAID_ACTION_ENABLE_VOLUME
856                                        : MPI_RAID_ACTION_DISABLE_VOLUME,
857                                 /*data*/0, /*addr*/0, /*len*/0,
858                                 /*write*/FALSE, /*wait*/TRUE);
859         if (rv == ETIMEDOUT) {
860                 mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
861                             "%s Volume Timed-out\n",
862                             enable ? "Enable" : "Disable");
863                 return;
864         }
865         ar = REQ_TO_RAID_ACTION_RESULT(req);
866         if (rv != 0
867          || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
868          || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
869                 mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
870                             enable ? "Enable" : "Disable",
871                             rv, req->IOCStatus, ar->action_status);
872         }
873
874         mpt_free_request(mpt, req);
875 }
876 #endif
877
878 static void
879 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
880 {
881         request_t *req;
882         struct mpt_raid_action_result *ar;
883         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
884         uint32_t data;
885         int rv;
886         int resyncing;
887         int mwce;
888
889         vol_pg = mpt_vol->config_page;
890         resyncing = vol_pg->VolumeStatus.Flags
891                   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
892         mwce = vol_pg->VolumeSettings.Settings
893              & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
894
895         /*
896          * If the setting matches the configuration,
897          * there is nothing to do.
898          */
899         switch (mpt->raid_mwce_setting) {
900         case MPT_RAID_MWCE_REBUILD_ONLY:
901                 if ((resyncing && mwce) || (!resyncing && !mwce)) {
902                         return;
903                 }
904                 mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
905                 if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
906                         /*
907                          * Wait one more status update to see if
908                          * resyncing gets enabled.  It gets disabled
909                          * temporarilly when WCE is changed.
910                          */
911                         return;
912                 }
913                 break;
914         case MPT_RAID_MWCE_ON:
915                 if (mwce)
916                         return;
917                 break;
918         case MPT_RAID_MWCE_OFF:
919                 if (!mwce)
920                         return;
921                 break;
922         case MPT_RAID_MWCE_NC:
923                 return;
924         }
925
926         req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
927         if (req == NULL) {
928                 mpt_vol_prt(mpt, mpt_vol,
929                             "mpt_verify_mwce: Get request failed!\n");
930                 return;
931         }
932
933         vol_pg->VolumeSettings.Settings ^=
934             MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
935         memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
936         vol_pg->VolumeSettings.Settings ^=
937             MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
938         rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
939                                 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
940                                 data, /*addr*/0, /*len*/0,
941                                 /*write*/FALSE, /*wait*/TRUE);
942         if (rv == ETIMEDOUT) {
943                 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
944                             "Write Cache Enable Timed-out\n");
945                 return;
946         }
947         ar = REQ_TO_RAID_ACTION_RESULT(req);
948         if (rv != 0
949          || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
950          || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
951                 mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
952                             "%d:%x:%x\n", rv, req->IOCStatus,
953                             ar->action_status);
954         } else {
955                 vol_pg->VolumeSettings.Settings ^=
956                     MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
957         }
958         mpt_free_request(mpt, req);
959 }
960
961 static void
962 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
963 {
964         request_t *req;
965         struct mpt_raid_action_result *ar;
966         CONFIG_PAGE_RAID_VOL_0  *vol_pg;
967         u_int prio;
968         int rv;
969
970         vol_pg = mpt_vol->config_page;
971
972         if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
973                 return;
974
975         /*
976          * If the current RAID resync rate does not
977          * match our configured rate, update it.
978          */
979         prio = vol_pg->VolumeSettings.Settings
980              & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
981         if (vol_pg->ResyncRate != 0
982          && vol_pg->ResyncRate != mpt->raid_resync_rate) {
983
984                 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
985                 if (req == NULL) {
986                         mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
987                                     "Get request failed!\n");
988                         return;
989                 }
990
991                 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
992                                         MPI_RAID_ACTION_SET_RESYNC_RATE,
993                                         mpt->raid_resync_rate, /*addr*/0,
994                                         /*len*/0, /*write*/FALSE, /*wait*/TRUE);
995                 if (rv == ETIMEDOUT) {
996                         mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
997                                     "Resync Rate Setting Timed-out\n");
998                         return;
999                 }
1000
1001                 ar = REQ_TO_RAID_ACTION_RESULT(req);
1002                 if (rv != 0
1003                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1004                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1005                         mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1006                                     "%d:%x:%x\n", rv, req->IOCStatus,
1007                                     ar->action_status);
1008                 } else 
1009                         vol_pg->ResyncRate = mpt->raid_resync_rate;
1010                 mpt_free_request(mpt, req);
1011         } else if ((prio && mpt->raid_resync_rate < 128)
1012                 || (!prio && mpt->raid_resync_rate >= 128)) {
1013                 uint32_t data;
1014
1015                 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1016                 if (req == NULL) {
1017                         mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1018                                     "Get request failed!\n");
1019                         return;
1020                 }
1021
1022                 vol_pg->VolumeSettings.Settings ^=
1023                     MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1024                 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1025                 vol_pg->VolumeSettings.Settings ^=
1026                     MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1027                 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1028                                         MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1029                                         data, /*addr*/0, /*len*/0,
1030                                         /*write*/FALSE, /*wait*/TRUE);
1031                 if (rv == ETIMEDOUT) {
1032                         mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1033                                     "Resync Rate Setting Timed-out\n");
1034                         return;
1035                 }
1036                 ar = REQ_TO_RAID_ACTION_RESULT(req);
1037                 if (rv != 0
1038                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1039                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1040                         mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1041                                     "%d:%x:%x\n", rv, req->IOCStatus,
1042                                     ar->action_status);
1043                 } else {
1044                         vol_pg->VolumeSettings.Settings ^=
1045                             MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1046                 }
1047
1048                 mpt_free_request(mpt, req);
1049         }
1050 }
1051
1052 static void
1053 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1054                        struct cam_path *path)
1055 {
1056         struct ccb_relsim crs;
1057
1058         xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1059         crs.ccb_h.func_code = XPT_REL_SIMQ;
1060         crs.release_flags = RELSIM_ADJUST_OPENINGS;
1061         crs.openings = mpt->raid_queue_depth;
1062         xpt_action((union ccb *)&crs);
1063         if (crs.ccb_h.status != CAM_REQ_CMP)
1064                 mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1065                             "with CAM status %#x\n", crs.ccb_h.status);
1066 }
1067
1068 static void
1069 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1070 {
1071         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1072         u_int i;
1073
1074         vol_pg = mpt_vol->config_page;
1075         mpt_vol_prt(mpt, mpt_vol, "Settings (");
1076         for (i = 1; i <= 0x8000; i <<= 1) {
1077                 switch (vol_pg->VolumeSettings.Settings & i) {
1078                 case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1079                         mpt_prtc(mpt, " Member-WCE");
1080                         break;
1081                 case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1082                         mpt_prtc(mpt, " Offline-On-SMART-Err");
1083                         break;
1084                 case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1085                         mpt_prtc(mpt, " Hot-Plug-Spares");
1086                         break;
1087                 case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1088                         mpt_prtc(mpt, " High-Priority-ReSync");
1089                         break;
1090                 default:
1091                         break;
1092                 }
1093         }
1094         mpt_prtc(mpt, " )\n");
1095         if (vol_pg->VolumeSettings.HotSparePool != 0) {
1096                 mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1097                             powerof2(vol_pg->VolumeSettings.HotSparePool)
1098                           ? ":" : "s:");
1099                 for (i = 0; i < 8; i++) {
1100                         u_int mask;
1101
1102                         mask = 0x1 << i;
1103                         if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1104                                 continue;
1105                         mpt_prtc(mpt, " %d", i);
1106                 }
1107                 mpt_prtc(mpt, "\n");
1108         }
1109         mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1110         for (i = 0; i < vol_pg->NumPhysDisks; i++){
1111                 struct mpt_raid_disk *mpt_disk;
1112                 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1113                 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1114                 U8 f, s;
1115
1116                 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1117                 disk_pg = &mpt_disk->config_page;
1118                 mpt_prtc(mpt, "      ");
1119                 mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1120                          pt_bus, disk_pg->PhysDiskID);
1121                 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1122                         mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1123                             "Primary" : "Secondary");
1124                 } else {
1125                         mpt_prtc(mpt, "Stripe Position %d",
1126                                  mpt_disk->member_number);
1127                 }
1128                 f = disk_pg->PhysDiskStatus.Flags;
1129                 s = disk_pg->PhysDiskStatus.State;
1130                 if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1131                         mpt_prtc(mpt, " Out of Sync");
1132                 }
1133                 if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1134                         mpt_prtc(mpt, " Quiesced");
1135                 }
1136                 if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1137                         mpt_prtc(mpt, " Inactive");
1138                 }
1139                 if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1140                         mpt_prtc(mpt, " Was Optimal");
1141                 }
1142                 if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1143                         mpt_prtc(mpt, " Was Non-Optimal");
1144                 }
1145                 switch (s) {
1146                 case MPI_PHYSDISK0_STATUS_ONLINE:
1147                         mpt_prtc(mpt, " Online");
1148                         break;
1149                 case MPI_PHYSDISK0_STATUS_MISSING:
1150                         mpt_prtc(mpt, " Missing");
1151                         break;
1152                 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1153                         mpt_prtc(mpt, " Incompatible");
1154                         break;
1155                 case MPI_PHYSDISK0_STATUS_FAILED:
1156                         mpt_prtc(mpt, " Failed");
1157                         break;
1158                 case MPI_PHYSDISK0_STATUS_INITIALIZING:
1159                         mpt_prtc(mpt, " Initializing");
1160                         break;
1161                 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1162                         mpt_prtc(mpt, " Requested Offline");
1163                         break;
1164                 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1165                         mpt_prtc(mpt, " Requested Failed");
1166                         break;
1167                 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1168                 default:
1169                         mpt_prtc(mpt, " Offline Other (%x)", s);
1170                         break;
1171                 }
1172                 mpt_prtc(mpt, "\n");
1173         }
1174 }
1175
1176 static void
1177 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1178 {
1179         CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1180         int rd_bus = cam_sim_bus(mpt->sim);
1181         int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1182         u_int i;
1183
1184         disk_pg = &mpt_disk->config_page;
1185         mpt_disk_prt(mpt, mpt_disk,
1186                      "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1187                      device_get_nameunit(mpt->dev), rd_bus,
1188                      disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1189                      pt_bus, mpt_disk - mpt->raid_disks);
1190         if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1191                 return;
1192         mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1193                      powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1194                    ? ":" : "s:");
1195         for (i = 0; i < 8; i++) {
1196                 u_int mask;
1197
1198                 mask = 0x1 << i;
1199                 if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1200                         continue;
1201                 mpt_prtc(mpt, " %d", i);
1202         }
1203         mpt_prtc(mpt, "\n");
1204 }
1205
1206 static void
1207 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1208                       IOC_3_PHYS_DISK *ioc_disk)
1209 {
1210         int rv;
1211
1212         rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1213                                  /*PageNumber*/0, ioc_disk->PhysDiskNum,
1214                                  &mpt_disk->config_page.Header,
1215                                  /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1216         if (rv != 0) {
1217                 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1218                         "Failed to read RAID Disk Hdr(%d)\n",
1219                         ioc_disk->PhysDiskNum);
1220                 return;
1221         }
1222         rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1223                                    &mpt_disk->config_page.Header,
1224                                    sizeof(mpt_disk->config_page),
1225                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1226         if (rv != 0)
1227                 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1228                         "Failed to read RAID Disk Page(%d)\n",
1229                         ioc_disk->PhysDiskNum);
1230         mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1231 }
1232
1233 static void
1234 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1235     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1236 {
1237         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1238         struct mpt_raid_action_result *ar;
1239         request_t *req;
1240         int rv;
1241         int i;
1242
1243         vol_pg = mpt_vol->config_page;
1244         mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1245
1246         rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1247             ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1248         if (rv != 0) {
1249                 mpt_vol_prt(mpt, mpt_vol,
1250                     "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1251                     ioc_vol->VolumePageNumber);
1252                 return;
1253         }
1254
1255         rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1256             &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1257         if (rv != 0) {
1258                 mpt_vol_prt(mpt, mpt_vol,
1259                     "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1260                     ioc_vol->VolumePageNumber);
1261                 return;
1262         }
1263         mpt2host_config_page_raid_vol_0(vol_pg);
1264
1265         mpt_vol->flags |= MPT_RVF_ACTIVE;
1266
1267         /* Update disk entry array data. */
1268         for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1269                 struct mpt_raid_disk *mpt_disk;
1270                 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1271                 mpt_disk->volume = mpt_vol;
1272                 mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1273                 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1274                         mpt_disk->member_number--;
1275                 }
1276         }
1277
1278         if ((vol_pg->VolumeStatus.Flags
1279            & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1280                 return;
1281
1282         req = mpt_get_request(mpt, TRUE);
1283         if (req == NULL) {
1284                 mpt_vol_prt(mpt, mpt_vol,
1285                     "mpt_refresh_raid_vol: Get request failed!\n");
1286                 return;
1287         }
1288         rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1289             MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1290         if (rv == ETIMEDOUT) {
1291                 mpt_vol_prt(mpt, mpt_vol,
1292                     "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1293                 mpt_free_request(mpt, req);
1294                 return;
1295         }
1296
1297         ar = REQ_TO_RAID_ACTION_RESULT(req);
1298         if (rv == 0
1299          && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1300          && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1301                 memcpy(&mpt_vol->sync_progress,
1302                        &ar->action_data.indicator_struct,
1303                        sizeof(mpt_vol->sync_progress));
1304                 mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1305         } else {
1306                 mpt_vol_prt(mpt, mpt_vol,
1307                     "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1308         }
1309         mpt_free_request(mpt, req);
1310 }
1311
1312 /*
1313  * Update in-core information about RAID support.  We update any entries
1314  * that didn't previously exists or have been marked as needing to
1315  * be updated by our event handler.  Interesting changes are displayed
1316  * to the console.
1317  */
1318 int
1319 mpt_refresh_raid_data(struct mpt_softc *mpt)
1320 {
1321         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1322         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1323         IOC_3_PHYS_DISK *ioc_disk;
1324         IOC_3_PHYS_DISK *ioc_last_disk;
1325         CONFIG_PAGE_RAID_VOL_0  *vol_pg;
1326         size_t len;
1327         int rv;
1328         int i;
1329         u_int nonopt_volumes;
1330
1331         if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1332                 return (0);
1333         }
1334
1335         /*
1336          * Mark all items as unreferenced by the configuration.
1337          * This allows us to find, report, and discard stale
1338          * entries.
1339          */
1340         for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1341                 mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1342         }
1343         for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1344                 mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1345         }
1346
1347         /*
1348          * Get Physical Disk information.
1349          */
1350         len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1351         rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1352                                    &mpt->ioc_page3->Header, len,
1353                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1354         if (rv) {
1355                 mpt_prt(mpt,
1356                     "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1357                 return (-1);
1358         }
1359         mpt2host_config_page_ioc3(mpt->ioc_page3);
1360
1361         ioc_disk = mpt->ioc_page3->PhysDisk;
1362         ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1363         for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1364                 struct mpt_raid_disk *mpt_disk;
1365
1366                 mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1367                 mpt_disk->flags |= MPT_RDF_REFERENCED;
1368                 if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1369                  != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1370
1371                         mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1372
1373                 }
1374                 mpt_disk->flags |= MPT_RDF_ACTIVE;
1375                 mpt->raid_rescan++;
1376         }
1377
1378         /*
1379          * Refresh volume data.
1380          */
1381         len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1382         rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1383                                    &mpt->ioc_page2->Header, len,
1384                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1385         if (rv) {
1386                 mpt_prt(mpt, "mpt_refresh_raid_data: "
1387                         "Failed to read IOC Page 2\n");
1388                 return (-1);
1389         }
1390         mpt2host_config_page_ioc2(mpt->ioc_page2);
1391
1392         ioc_vol = mpt->ioc_page2->RaidVolume;
1393         ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1394         for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1395                 struct mpt_raid_volume *mpt_vol;
1396
1397                 mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1398                 mpt_vol->flags |= MPT_RVF_REFERENCED;
1399                 vol_pg = mpt_vol->config_page;
1400                 if (vol_pg == NULL)
1401                         continue;
1402                 if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1403                   != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1404                  || (vol_pg->VolumeStatus.Flags
1405                    & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1406
1407                         mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1408                 }
1409                 mpt_vol->flags |= MPT_RVF_ACTIVE;
1410         }
1411
1412         nonopt_volumes = 0;
1413         for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1414                 struct mpt_raid_volume *mpt_vol;
1415                 uint64_t total;
1416                 uint64_t left;
1417                 int m;
1418                 u_int prio;
1419
1420                 mpt_vol = &mpt->raid_volumes[i];
1421
1422                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1423                         continue;
1424                 }
1425
1426                 vol_pg = mpt_vol->config_page;
1427                 if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1428                  == MPT_RVF_ANNOUNCED) {
1429                         mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1430                         mpt_vol->flags = 0;
1431                         continue;
1432                 }
1433
1434                 if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1435                         mpt_announce_vol(mpt, mpt_vol);
1436                         mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1437                 }
1438
1439                 if (vol_pg->VolumeStatus.State !=
1440                     MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1441                         nonopt_volumes++;
1442
1443                 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1444                         continue;
1445
1446                 mpt_vol->flags |= MPT_RVF_UP2DATE;
1447                 mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1448                     mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1449                 mpt_verify_mwce(mpt, mpt_vol);
1450
1451                 if (vol_pg->VolumeStatus.Flags == 0) {
1452                         continue;
1453                 }
1454
1455                 mpt_vol_prt(mpt, mpt_vol, "Status (");
1456                 for (m = 1; m <= 0x80; m <<= 1) {
1457                         switch (vol_pg->VolumeStatus.Flags & m) {
1458                         case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1459                                 mpt_prtc(mpt, " Enabled");
1460                                 break;
1461                         case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1462                                 mpt_prtc(mpt, " Quiesced");
1463                                 break;
1464                         case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1465                                 mpt_prtc(mpt, " Re-Syncing");
1466                                 break;
1467                         case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1468                                 mpt_prtc(mpt, " Inactive");
1469                                 break;
1470                         default:
1471                                 break;
1472                         }
1473                 }
1474                 mpt_prtc(mpt, " )\n");
1475
1476                 if ((vol_pg->VolumeStatus.Flags
1477                    & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1478                         continue;
1479
1480                 mpt_verify_resync_rate(mpt, mpt_vol);
1481
1482                 left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1483                 total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1484                 if (vol_pg->ResyncRate != 0) {
1485
1486                         prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1487                         mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1488                             prio / 1000, prio % 1000);
1489                 } else {
1490                         prio = vol_pg->VolumeSettings.Settings
1491                              & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1492                         mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1493                             prio ? "High" : "Low");
1494                 }
1495 #if __FreeBSD_version >= 500000
1496                 mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1497                             "blocks remaining\n", (uintmax_t)left,
1498                             (uintmax_t)total);
1499 #else
1500                 mpt_vol_prt(mpt, mpt_vol, "%llu of %llu "
1501                             "blocks remaining\n", (uint64_t)left,
1502                             (uint64_t)total);
1503 #endif
1504
1505                 /* Periodically report on sync progress. */
1506                 mpt_schedule_raid_refresh(mpt);
1507         }
1508
1509         for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1510                 struct mpt_raid_disk *mpt_disk;
1511                 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1512                 int m;
1513
1514                 mpt_disk = &mpt->raid_disks[i];
1515                 disk_pg = &mpt_disk->config_page;
1516
1517                 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1518                         continue;
1519
1520                 if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1521                  == MPT_RDF_ANNOUNCED) {
1522                         mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1523                         mpt_disk->flags = 0;
1524                         mpt->raid_rescan++;
1525                         continue;
1526                 }
1527
1528                 if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1529
1530                         mpt_announce_disk(mpt, mpt_disk);
1531                         mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1532                 }
1533
1534                 if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1535                         continue;
1536
1537                 mpt_disk->flags |= MPT_RDF_UP2DATE;
1538                 mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1539                 if (disk_pg->PhysDiskStatus.Flags == 0)
1540                         continue;
1541
1542                 mpt_disk_prt(mpt, mpt_disk, "Status (");
1543                 for (m = 1; m <= 0x80; m <<= 1) {
1544                         switch (disk_pg->PhysDiskStatus.Flags & m) {
1545                         case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1546                                 mpt_prtc(mpt, " Out-Of-Sync");
1547                                 break;
1548                         case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1549                                 mpt_prtc(mpt, " Quiesced");
1550                                 break;
1551                         default:
1552                                 break;
1553                         }
1554                 }
1555                 mpt_prtc(mpt, " )\n");
1556         }
1557
1558         mpt->raid_nonopt_volumes = nonopt_volumes;
1559         return (0);
1560 }
1561
1562 static void
1563 mpt_raid_timer(void *arg)
1564 {
1565         struct mpt_softc *mpt;
1566
1567         mpt = (struct mpt_softc *)arg;
1568         MPT_LOCK(mpt);
1569         mpt_raid_wakeup(mpt);
1570         MPT_UNLOCK(mpt);
1571 }
1572
1573 void
1574 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1575 {
1576         callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1577                       mpt_raid_timer, mpt);
1578 }
1579
1580 void
1581 mpt_raid_free_mem(struct mpt_softc *mpt)
1582 {
1583
1584         if (mpt->raid_volumes) {
1585                 struct mpt_raid_volume *mpt_raid;
1586                 int i;
1587                 for (i = 0; i < mpt->raid_max_volumes; i++) {
1588                         mpt_raid = &mpt->raid_volumes[i];
1589                         if (mpt_raid->config_page) {
1590                                 free(mpt_raid->config_page, M_DEVBUF);
1591                                 mpt_raid->config_page = NULL;
1592                         }
1593                 }
1594                 free(mpt->raid_volumes, M_DEVBUF);
1595                 mpt->raid_volumes = NULL;
1596         }
1597         if (mpt->raid_disks) {
1598                 free(mpt->raid_disks, M_DEVBUF);
1599                 mpt->raid_disks = NULL;
1600         }
1601         if (mpt->ioc_page2) {
1602                 free(mpt->ioc_page2, M_DEVBUF);
1603                 mpt->ioc_page2 = NULL;
1604         }
1605         if (mpt->ioc_page3) {
1606                 free(mpt->ioc_page3, M_DEVBUF);
1607                 mpt->ioc_page3 = NULL;
1608         }
1609         mpt->raid_max_volumes =  0;
1610         mpt->raid_max_disks =  0;
1611 }
1612
1613 #if __FreeBSD_version >= 500000
1614 static int
1615 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1616 {
1617         struct mpt_raid_volume *mpt_vol;
1618
1619         if ((rate > MPT_RAID_RESYNC_RATE_MAX
1620           || rate < MPT_RAID_RESYNC_RATE_MIN)
1621          && rate != MPT_RAID_RESYNC_RATE_NC)
1622                 return (EINVAL);
1623
1624         MPT_LOCK(mpt);
1625         mpt->raid_resync_rate = rate;
1626         RAID_VOL_FOREACH(mpt, mpt_vol) {
1627                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1628                         continue;
1629                 }
1630                 mpt_verify_resync_rate(mpt, mpt_vol);
1631         }
1632         MPT_UNLOCK(mpt);
1633         return (0);
1634 }
1635
1636 static int
1637 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1638 {
1639         struct mpt_raid_volume *mpt_vol;
1640
1641         if (vol_queue_depth > 255 || vol_queue_depth < 1)
1642                 return (EINVAL);
1643
1644         MPT_LOCK(mpt);
1645         mpt->raid_queue_depth = vol_queue_depth;
1646         RAID_VOL_FOREACH(mpt, mpt_vol) {
1647                 struct cam_path *path;
1648                 int error;
1649
1650                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1651                         continue;
1652
1653                 mpt->raid_rescan = 0;
1654
1655                 MPTLOCK_2_CAMLOCK(mpt);
1656                 error = xpt_create_path(&path, xpt_periph,
1657                                         cam_sim_path(mpt->sim),
1658                                         mpt_vol->config_page->VolumeID,
1659                                         /*lun*/0);
1660                 if (error != CAM_REQ_CMP) {
1661                         CAMLOCK_2_MPTLOCK(mpt);
1662                         mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1663                         continue;
1664                 }
1665                 mpt_adjust_queue_depth(mpt, mpt_vol, path);
1666                 xpt_free_path(path);
1667                 CAMLOCK_2_MPTLOCK(mpt);
1668         }
1669         MPT_UNLOCK(mpt);
1670         return (0);
1671 }
1672
1673 static int
1674 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1675 {
1676         struct mpt_raid_volume *mpt_vol;
1677         int force_full_resync;
1678
1679         MPT_LOCK(mpt);
1680         if (mwce == mpt->raid_mwce_setting) {
1681                 MPT_UNLOCK(mpt);
1682                 return (0);
1683         }
1684
1685         /*
1686          * Catch MWCE being left on due to a failed shutdown.  Since
1687          * sysctls cannot be set by the loader, we treat the first
1688          * setting of this varible specially and force a full volume
1689          * resync if MWCE is enabled and a resync is in progress.
1690          */
1691         force_full_resync = 0;
1692         if (mpt->raid_mwce_set == 0
1693          && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1694          && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1695                 force_full_resync = 1;
1696
1697         mpt->raid_mwce_setting = mwce;
1698         RAID_VOL_FOREACH(mpt, mpt_vol) {
1699                 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1700                 int resyncing;
1701                 int mwce;
1702
1703                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1704                         continue;
1705
1706                 vol_pg = mpt_vol->config_page;
1707                 resyncing = vol_pg->VolumeStatus.Flags
1708                           & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1709                 mwce = vol_pg->VolumeSettings.Settings
1710                      & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1711                 if (force_full_resync && resyncing && mwce) {
1712
1713                         /*
1714                          * XXX disable/enable volume should force a resync,
1715                          *     but we'll need to queice, drain, and restart
1716                          *     I/O to do that.
1717                          */
1718                         mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1719                                     "detected.  Suggest full resync.\n");
1720                 }
1721                 mpt_verify_mwce(mpt, mpt_vol);
1722         }
1723         mpt->raid_mwce_set = 1;
1724         MPT_UNLOCK(mpt);
1725         return (0);
1726 }
1727 const char *mpt_vol_mwce_strs[] =
1728 {
1729         "On",
1730         "Off",
1731         "On-During-Rebuild",
1732         "NC"
1733 };
1734
1735 static int
1736 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1737 {
1738         char inbuf[20];
1739         struct mpt_softc *mpt;
1740         const char *str;
1741         int error;
1742         u_int size;
1743         u_int i;
1744
1745         GIANT_REQUIRED;
1746
1747         mpt = (struct mpt_softc *)arg1;
1748         str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1749         error = SYSCTL_OUT(req, str, strlen(str) + 1);
1750         if (error || !req->newptr) {
1751                 return (error);
1752         }
1753
1754         size = req->newlen - req->newidx;
1755         if (size >= sizeof(inbuf)) {
1756                 return (EINVAL);
1757         }
1758
1759         error = SYSCTL_IN(req, inbuf, size);
1760         if (error) {
1761                 return (error);
1762         }
1763         inbuf[size] = '\0'; 
1764         for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1765                 if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1766                         return (mpt_raid_set_vol_mwce(mpt, i));
1767                 }
1768         }
1769         return (EINVAL);
1770 }
1771
1772 static int
1773 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1774 {
1775         struct mpt_softc *mpt;
1776         u_int raid_resync_rate;
1777         int error;
1778
1779         GIANT_REQUIRED;
1780
1781         mpt = (struct mpt_softc *)arg1;
1782         raid_resync_rate = mpt->raid_resync_rate;
1783
1784         error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1785         if (error || !req->newptr) {
1786                 return error;
1787         }
1788
1789         return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1790 }
1791
1792 static int
1793 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1794 {
1795         struct mpt_softc *mpt;
1796         u_int raid_queue_depth;
1797         int error;
1798
1799         GIANT_REQUIRED;
1800
1801         mpt = (struct mpt_softc *)arg1;
1802         raid_queue_depth = mpt->raid_queue_depth;
1803
1804         error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1805         if (error || !req->newptr) {
1806                 return error;
1807         }
1808
1809         return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1810 }
1811
1812 static void
1813 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1814 {
1815         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1816         struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1817
1818         SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1819                         "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1820                         mpt_raid_sysctl_vol_member_wce, "A",
1821                         "volume member WCE(On,Off,On-During-Rebuild,NC)");
1822
1823         SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1824                         "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1825                         mpt_raid_sysctl_vol_queue_depth, "I",
1826                         "default volume queue depth");
1827
1828         SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1829                         "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1830                         mpt_raid_sysctl_vol_resync_rate, "I",
1831                         "volume resync priority (0 == NC, 1 - 255)");
1832         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1833                         "nonoptimal_volumes", CTLFLAG_RD,
1834                         &mpt->raid_nonopt_volumes, 0,
1835                         "number of nonoptimal volumes");
1836 }
1837 #endif