]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/mpt/mpt_raid.c
This commit was generated by cvs2svn to compensate for changes in r161351,
[FreeBSD/FreeBSD.git] / sys / dev / mpt / mpt_raid.c
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * Copyright (c) 2005, WHEEL Sp. z o.o.
5  * Copyright (c) 2005 Justin T. Gibbs.
6  * All rights reserved.
7  * 
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon including
16  *    a substantially similar Disclaimer requirement for further binary
17  *    redistribution.
18  * 3. Neither the names of the above listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  * 
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 /*-
35  * Some Breakage and Bug Fixing added later.
36  * Copyright (c) 2006, by Matthew Jacob
37  * All Rights Reserved
38  *
39  * Support from LSI-Logic has also gone a great deal toward making this a
40  * workable subsystem and is gratefully acknowledged.
41  */
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45
46 #include <dev/mpt/mpt.h>
47 #include <dev/mpt/mpt_raid.h>
48
49 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
50 #include "dev/mpt/mpilib/mpi_raid.h"
51
52 #include <cam/cam.h>
53 #include <cam/cam_ccb.h>
54 #include <cam/cam_sim.h>
55 #include <cam/cam_xpt_sim.h>
56
57 #if __FreeBSD_version < 500000
58 #include <sys/devicestat.h>
59 #define GIANT_REQUIRED
60 #endif
61 #include <cam/cam_periph.h>
62
63 #include <sys/callout.h>
64 #include <sys/kthread.h>
65 #include <sys/sysctl.h>
66
67 #include <machine/stdarg.h>
68
69 struct mpt_raid_action_result
70 {
71         union {
72                 MPI_RAID_VOL_INDICATOR  indicator_struct;
73                 uint32_t                new_settings;
74                 uint8_t                 phys_disk_num;
75         } action_data;
76         uint16_t                        action_status;
77 };
78
79 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
80         (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
81
82 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
83
84
85 static mpt_probe_handler_t      mpt_raid_probe;
86 static mpt_attach_handler_t     mpt_raid_attach;
87 static mpt_enable_handler_t     mpt_raid_enable;
88 static mpt_event_handler_t      mpt_raid_event;
89 static mpt_shutdown_handler_t   mpt_raid_shutdown;
90 static mpt_reset_handler_t      mpt_raid_ioc_reset;
91 static mpt_detach_handler_t     mpt_raid_detach;
92
93 static struct mpt_personality mpt_raid_personality =
94 {
95         .name           = "mpt_raid",
96         .probe          = mpt_raid_probe,
97         .attach         = mpt_raid_attach,
98         .enable         = mpt_raid_enable,
99         .event          = mpt_raid_event,
100         .reset          = mpt_raid_ioc_reset,
101         .shutdown       = mpt_raid_shutdown,
102         .detach         = mpt_raid_detach,
103 };
104
105 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
106 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
107
108 static mpt_reply_handler_t mpt_raid_reply_handler;
109 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
110                                         MSG_DEFAULT_REPLY *reply_frame);
111 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
112 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
113 static void mpt_raid_thread(void *arg);
114 static timeout_t mpt_raid_timer;
115 #if 0
116 static void mpt_enable_vol(struct mpt_softc *mpt,
117                            struct mpt_raid_volume *mpt_vol, int enable);
118 #endif
119 static void mpt_verify_mwce(struct mpt_softc *mpt,
120                             struct mpt_raid_volume *mpt_vol);
121 static void mpt_adjust_queue_depth(struct mpt_softc *mpt,
122                                    struct mpt_raid_volume *mpt_vol,
123                                    struct cam_path *path);
124 static void mpt_raid_sysctl_attach(struct mpt_softc *mpt);
125
126 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
127
128 const char *
129 mpt_vol_type(struct mpt_raid_volume *vol)
130 {
131         switch (vol->config_page->VolumeType) {
132         case MPI_RAID_VOL_TYPE_IS:
133                 return ("RAID-0");
134         case MPI_RAID_VOL_TYPE_IME:
135                 return ("RAID-1E");
136         case MPI_RAID_VOL_TYPE_IM:
137                 return ("RAID-1");
138         default:
139                 return ("Unknown");
140         }
141 }
142
143 const char *
144 mpt_vol_state(struct mpt_raid_volume *vol)
145 {
146         switch (vol->config_page->VolumeStatus.State) {
147         case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
148                 return ("Optimal");
149         case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
150                 return ("Degraded");
151         case MPI_RAIDVOL0_STATUS_STATE_FAILED:
152                 return ("Failed");
153         default:
154                 return ("Unknown");
155         }
156 }
157
158 const char *
159 mpt_disk_state(struct mpt_raid_disk *disk)
160 {
161         switch (disk->config_page.PhysDiskStatus.State) {
162         case MPI_PHYSDISK0_STATUS_ONLINE:
163                 return ("Online");
164         case MPI_PHYSDISK0_STATUS_MISSING:
165                 return ("Missing");
166         case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
167                 return ("Incompatible");
168         case MPI_PHYSDISK0_STATUS_FAILED:
169                 return ("Failed");
170         case MPI_PHYSDISK0_STATUS_INITIALIZING:
171                 return ("Initializing");
172         case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
173                 return ("Offline Requested");
174         case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
175                 return ("Failed per Host Request");
176         case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
177                 return ("Offline");
178         default:
179                 return ("Unknown");
180         }
181 }
182
183 void
184 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
185             const char *fmt, ...)
186 {
187         va_list ap;
188
189         printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
190                (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
191                vol->config_page->VolumeBus, vol->config_page->VolumeID);
192         va_start(ap, fmt);
193         vprintf(fmt, ap);
194         va_end(ap);
195 }
196
197 void
198 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
199              const char *fmt, ...)
200 {
201         va_list ap;
202
203         if (disk->volume != NULL) {
204                 printf("(%s:vol%d:%d): ",
205                        device_get_nameunit(mpt->dev),
206                        disk->volume->config_page->VolumeID,
207                        disk->member_number);
208         } else {
209                 printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
210                        disk->config_page.PhysDiskBus,
211                        disk->config_page.PhysDiskID);
212         }
213         va_start(ap, fmt);
214         vprintf(fmt, ap);
215         va_end(ap);
216 }
217
218 static void
219 mpt_raid_async(void *callback_arg, u_int32_t code,
220                struct cam_path *path, void *arg)
221 {
222         struct mpt_softc *mpt;
223
224         mpt = (struct mpt_softc*)callback_arg;
225         switch (code) {
226         case AC_FOUND_DEVICE:
227         {
228                 struct ccb_getdev *cgd;
229                 struct mpt_raid_volume *mpt_vol;
230
231                 cgd = (struct ccb_getdev *)arg;
232                 if (cgd == NULL) {
233                         break;
234                 }
235
236                 mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
237                          cgd->ccb_h.target_id);
238                 
239                 RAID_VOL_FOREACH(mpt, mpt_vol) {
240                         if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
241                                 continue;
242
243                         if (mpt_vol->config_page->VolumeID 
244                          == cgd->ccb_h.target_id) {
245                                 mpt_adjust_queue_depth(mpt, mpt_vol, path);
246                                 break;
247                         }
248                 }
249         }
250         default:
251                 break;
252         }
253 }
254
255 int
256 mpt_raid_probe(struct mpt_softc *mpt)
257 {
258         if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
259                 return (ENODEV);
260         }
261         return (0);
262 }
263
264 int
265 mpt_raid_attach(struct mpt_softc *mpt)
266 {
267         struct ccb_setasync csa;
268         mpt_handler_t    handler;
269         int              error;
270
271         mpt_callout_init(&mpt->raid_timer);
272
273         handler.reply_handler = mpt_raid_reply_handler;
274         error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
275                                      &raid_handler_id);
276         if (error != 0) {
277                 mpt_prt(mpt, "Unable to register RAID haandler!\n");
278                 goto cleanup;
279         }
280
281         error = mpt_spawn_raid_thread(mpt);
282         if (error != 0) {
283                 mpt_prt(mpt, "Unable to spawn RAID thread!\n");
284                 goto cleanup;
285         }
286  
287         xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
288         csa.ccb_h.func_code = XPT_SASYNC_CB;
289         csa.event_enable = AC_FOUND_DEVICE;
290         csa.callback = mpt_raid_async;
291         csa.callback_arg = mpt;
292         MPTLOCK_2_CAMLOCK(mpt);
293         xpt_action((union ccb *)&csa);
294         CAMLOCK_2_MPTLOCK(mpt);
295         if (csa.ccb_h.status != CAM_REQ_CMP) {
296                 mpt_prt(mpt, "mpt_raid_attach: Unable to register "
297                         "CAM async handler.\n");
298         }
299
300         mpt_raid_sysctl_attach(mpt);
301         return (0);
302 cleanup:
303         mpt_raid_detach(mpt);
304         return (error);
305 }
306
307 int
308 mpt_raid_enable(struct mpt_softc *mpt)
309 {
310         return (0);
311 }
312
313 void
314 mpt_raid_detach(struct mpt_softc *mpt)
315 {
316         struct ccb_setasync csa;
317         mpt_handler_t handler;
318
319         callout_stop(&mpt->raid_timer);
320         mpt_terminate_raid_thread(mpt); 
321
322         handler.reply_handler = mpt_raid_reply_handler;
323         mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
324                                raid_handler_id);
325         xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
326         csa.ccb_h.func_code = XPT_SASYNC_CB;
327         csa.event_enable = 0;
328         csa.callback = mpt_raid_async;
329         csa.callback_arg = mpt;
330         MPTLOCK_2_CAMLOCK(mpt);
331         xpt_action((union ccb *)&csa);
332         CAMLOCK_2_MPTLOCK(mpt);
333 }
334
335 static void
336 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
337 {
338         /* Nothing to do yet. */
339 }
340
341 static const char *raid_event_txt[] =
342 {
343         "Volume Created",
344         "Volume Deleted",
345         "Volume Settings Changed",
346         "Volume Status Changed",
347         "Volume Physical Disk Membership Changed",
348         "Physical Disk Created",
349         "Physical Disk Deleted",
350         "Physical Disk Settings Changed",
351         "Physical Disk Status Changed",
352         "Domain Validation Required",
353         "SMART Data Received",
354         "Replace Action Started",
355 };
356
357 static int
358 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
359                MSG_EVENT_NOTIFY_REPLY *msg)
360 {
361         EVENT_DATA_RAID *raid_event;
362         struct mpt_raid_volume *mpt_vol;
363         struct mpt_raid_disk *mpt_disk;
364         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
365         int i;
366         int print_event;
367
368         if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
369                 return (0);
370         }
371
372         raid_event = (EVENT_DATA_RAID *)&msg->Data;
373
374         mpt_vol = NULL;
375         vol_pg = NULL;
376         if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
377                 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
378                         mpt_vol = &mpt->raid_volumes[i];
379                         vol_pg = mpt_vol->config_page;
380
381                         if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
382                                 continue;
383
384                         if (vol_pg->VolumeID == raid_event->VolumeID
385                          && vol_pg->VolumeBus == raid_event->VolumeBus)
386                                 break;
387                 }
388                 if (i >= mpt->ioc_page2->MaxVolumes) {
389                         mpt_vol = NULL;
390                         vol_pg = NULL;
391                 }
392         }
393
394         mpt_disk = NULL;
395         if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
396                 mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
397                 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
398                         mpt_disk = NULL;
399                 }
400         }
401
402         print_event = 1;
403         switch(raid_event->ReasonCode) {
404         case MPI_EVENT_RAID_RC_VOLUME_CREATED:
405         case MPI_EVENT_RAID_RC_VOLUME_DELETED:
406                 break;
407         case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
408                 if (mpt_vol != NULL) {
409                         if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
410                                 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
411                         } else {
412                                 /*
413                                  * Coalesce status messages into one
414                                  * per background run of our RAID thread.
415                                  * This removes "spurious" status messages
416                                  * from our output.
417                                  */
418                                 print_event = 0;
419                         }
420                 }
421                 break;
422         case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
423         case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
424                 mpt->raid_rescan++;
425                 if (mpt_vol != NULL) {
426                         mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
427                 }
428                 break;
429         case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
430         case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
431                 mpt->raid_rescan++;
432                 break;
433         case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
434         case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
435                 mpt->raid_rescan++;
436                 if (mpt_disk != NULL) {
437                         mpt_disk->flags &= ~MPT_RDF_UP2DATE;
438                 }
439                 break;
440         case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
441                 mpt->raid_rescan++;
442                 break;
443         case MPI_EVENT_RAID_RC_SMART_DATA:
444         case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
445                 break;
446         }
447
448         if (print_event) {
449                 if (mpt_disk != NULL) {
450                         mpt_disk_prt(mpt, mpt_disk, "");
451                 } else if (mpt_vol != NULL) {
452                         mpt_vol_prt(mpt, mpt_vol, "");
453                 } else {
454                         mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
455                                 raid_event->VolumeID);
456
457                         if (raid_event->PhysDiskNum != 0xFF)
458                                 mpt_prtc(mpt, ":%d): ",
459                                          raid_event->PhysDiskNum);
460                         else
461                                 mpt_prtc(mpt, "): ");
462                 }
463
464                 if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
465                         mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
466                                  raid_event->ReasonCode);
467                 else
468                         mpt_prtc(mpt, "%s\n",
469                                  raid_event_txt[raid_event->ReasonCode]);
470         }
471
472         if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
473                 /* XXX Use CAM's print sense for this... */
474                 if (mpt_disk != NULL)
475                         mpt_disk_prt(mpt, mpt_disk, "");
476                 else
477                         mpt_prt(mpt, "Volume(%d:%d:%d: ",
478                             raid_event->VolumeBus, raid_event->VolumeID,
479                             raid_event->PhysDiskNum);
480                 mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
481                          raid_event->ASC, raid_event->ASCQ);
482         }
483
484         mpt_raid_wakeup(mpt);
485         return (1);
486 }
487
488 static void
489 mpt_raid_shutdown(struct mpt_softc *mpt)
490 {
491         struct mpt_raid_volume *mpt_vol;
492
493         if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
494                 return;
495         }
496
497         mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
498         RAID_VOL_FOREACH(mpt, mpt_vol) {
499                 mpt_verify_mwce(mpt, mpt_vol);
500         }
501 }
502
503 static int
504 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
505     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
506 {
507         int free_req;
508
509         if (req == NULL)
510                 return (TRUE);
511
512         free_req = TRUE;
513         if (reply_frame != NULL)
514                 free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
515 #ifdef NOTYET
516         else if (req->ccb != NULL) {
517                 /* Complete Quiesce CCB with error... */
518         }
519 #endif
520
521         req->state &= ~REQ_STATE_QUEUED;
522         req->state |= REQ_STATE_DONE;
523         TAILQ_REMOVE(&mpt->request_pending_list, req, links);
524
525         if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
526                 wakeup(req);
527         } else if (free_req) {
528                 mpt_free_request(mpt, req);
529         }
530
531         return (TRUE);
532 }
533
534 /*
535  * Parse additional completion information in the reply
536  * frame for RAID I/O requests.
537  */
538 static int
539 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
540     MSG_DEFAULT_REPLY *reply_frame)
541 {
542         MSG_RAID_ACTION_REPLY *reply;
543         struct mpt_raid_action_result *action_result;
544         MSG_RAID_ACTION_REQUEST *rap;
545
546         reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
547         req->IOCStatus = le16toh(reply->IOCStatus);
548         rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
549         
550         switch (rap->Action) {
551         case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
552                 mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
553                 break;
554         case MPI_RAID_ACTION_ENABLE_PHYS_IO:
555                 mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
556                 break;
557         default:
558                 break;
559         }
560         action_result = REQ_TO_RAID_ACTION_RESULT(req);
561         memcpy(&action_result->action_data, &reply->ActionData,
562             sizeof(action_result->action_data));
563         action_result->action_status = reply->ActionStatus;
564         return (TRUE);
565 }
566
567 /*
568  * Utiltity routine to perform a RAID action command;
569  */
570 int
571 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
572                    struct mpt_raid_disk *disk, request_t *req, u_int Action,
573                    uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
574                    int write, int wait)
575 {
576         MSG_RAID_ACTION_REQUEST *rap;
577         SGE_SIMPLE32 *se;
578
579         rap = req->req_vbuf;
580         memset(rap, 0, sizeof *rap);
581         rap->Action = Action;
582         rap->ActionDataWord = ActionDataWord;
583         rap->Function = MPI_FUNCTION_RAID_ACTION;
584         rap->VolumeID = vol->config_page->VolumeID;
585         rap->VolumeBus = vol->config_page->VolumeBus;
586         if (disk != 0)
587                 rap->PhysDiskNum = disk->config_page.PhysDiskNum;
588         else
589                 rap->PhysDiskNum = 0xFF;
590         se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
591         se->Address = addr;
592         MPI_pSGE_SET_LENGTH(se, len);
593         MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
594             MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
595             MPI_SGE_FLAGS_END_OF_LIST |
596             write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
597         rap->MsgContext = htole32(req->index | raid_handler_id);
598
599         mpt_check_doorbell(mpt);
600         mpt_send_cmd(mpt, req);
601
602         if (wait) {
603                 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
604                                      /*sleep_ok*/FALSE, /*time_ms*/2000));
605         } else {
606                 return (0);
607         }
608 }
609
610 /*************************** RAID Status Monitoring ***************************/
611 static int
612 mpt_spawn_raid_thread(struct mpt_softc *mpt)
613 {
614         int error;
615
616         /*
617          * Freeze out any CAM transactions until our thread
618          * is able to run at least once.  We need to update
619          * our RAID pages before acception I/O or we may
620          * reject I/O to an ID we later determine is for a
621          * hidden physdisk.
622          */
623         xpt_freeze_simq(mpt->phydisk_sim, 1);
624         error = mpt_kthread_create(mpt_raid_thread, mpt,
625             &mpt->raid_thread, /*flags*/0, /*altstack*/0,
626             "mpt_raid%d", mpt->unit);
627         if (error != 0)
628                 xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
629         return (error);
630 }
631
632 static void
633 mpt_terminate_raid_thread(struct mpt_softc *mpt)
634 {
635
636         if (mpt->raid_thread == NULL) {
637                 return;
638         }
639         mpt->shutdwn_raid = 1;
640         wakeup(mpt->raid_volumes);
641         /*
642          * Sleep on a slightly different location
643          * for this interlock just for added safety.
644          */
645         mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
646 }
647
648 static void
649 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
650 {
651         xpt_free_path(ccb->ccb_h.path);
652         free(ccb, M_DEVBUF);
653 }
654
655 static void
656 mpt_raid_thread(void *arg)
657 {
658         struct mpt_softc *mpt;
659         int firstrun;
660
661 #if __FreeBSD_version >= 500000
662         mtx_lock(&Giant);
663 #endif
664         mpt = (struct mpt_softc *)arg;
665         firstrun = 1;
666         MPT_LOCK(mpt);
667         while (mpt->shutdwn_raid == 0) {
668
669                 if (mpt->raid_wakeup == 0) {
670                         mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
671                         continue;
672                 }
673
674                 mpt->raid_wakeup = 0;
675
676                 if (mpt_refresh_raid_data(mpt)) {
677                         mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */
678                         continue;
679                 }
680
681                 /*
682                  * Now that we have our first snapshot of RAID data,
683                  * allow CAM to access our physical disk bus.
684                  */
685                 if (firstrun) {
686                         firstrun = 0;
687                         MPTLOCK_2_CAMLOCK(mpt);
688                         xpt_release_simq(mpt->phydisk_sim, TRUE);
689                         CAMLOCK_2_MPTLOCK(mpt);
690                 }
691
692                 if (mpt->raid_rescan != 0) {
693                         union ccb *ccb;
694                         struct cam_path *path;
695                         int error;
696
697                         mpt->raid_rescan = 0;
698
699                         ccb = malloc(sizeof(*ccb), M_DEVBUF, M_WAITOK);
700                         error = xpt_create_path(&path, xpt_periph,
701                             cam_sim_path(mpt->phydisk_sim),
702                             CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
703                         if (error != CAM_REQ_CMP) {
704                                 free(ccb, M_DEVBUF);
705                                 mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
706                         } else {
707                                 xpt_setup_ccb(&ccb->ccb_h, path, 5);
708                                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
709                                 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
710                                 ccb->crcn.flags = CAM_FLAG_NONE;
711                                 MPTLOCK_2_CAMLOCK(mpt);
712                                 xpt_action(ccb);
713                                 CAMLOCK_2_MPTLOCK(mpt);
714                         }
715                 }
716         }
717         mpt->raid_thread = NULL;
718         wakeup(&mpt->raid_thread);
719         MPT_UNLOCK(mpt);
720 #if __FreeBSD_version >= 500000
721         mtx_unlock(&Giant);
722 #endif
723         kthread_exit(0);
724 }
725
726 #if 0
727 static void
728 mpt_raid_quiesce_timeout(void *arg)
729 {
730         /* Complete the CCB with error */
731         /* COWWWW */
732 }
733
734 static timeout_t mpt_raid_quiesce_timeout;
735 cam_status
736 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
737                       request_t *req)
738 {
739         union ccb *ccb;
740
741         ccb = req->ccb;
742         if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
743                 return (CAM_REQ_CMP);
744
745         if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
746                 int rv;
747
748                 mpt_disk->flags |= MPT_RDF_QUIESCING;
749                 xpt_freeze_devq(ccb->ccb_h.path, 1);
750                 
751                 rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
752                                         MPI_RAID_ACTION_QUIESCE_PHYS_IO,
753                                         /*ActionData*/0, /*addr*/0,
754                                         /*len*/0, /*write*/FALSE,
755                                         /*wait*/FALSE);
756                 if (rv != 0)
757                         return (CAM_REQ_CMP_ERR);
758
759                 ccb->ccb_h.timeout_ch =
760                         timeout(mpt_raid_quiesce_timeout, (caddr_t)ccb, 5 * hz);
761 #if 0
762                 if (rv == ETIMEDOUT) {
763                         mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
764                                      "Quiece Timed-out\n");
765                         xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
766                         return (CAM_REQ_CMP_ERR);
767                 }
768
769                 ar = REQ_TO_RAID_ACTION_RESULT(req);
770                 if (rv != 0
771                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
772                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
773                         mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
774                                     "%d:%x:%x\n", rv, req->IOCStatus,
775                                     ar->action_status);
776                         xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
777                         return (CAM_REQ_CMP_ERR);
778                 }
779 #endif
780                 return (CAM_REQ_INPROG);
781         }
782         return (CAM_REQUEUE_REQ);
783 }
784 #endif
785
786 /* XXX Ignores that there may be multiple busses/IOCs involved. */
787 cam_status
788 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
789 {
790         struct mpt_raid_disk *mpt_disk;
791
792         mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
793         if (ccb->ccb_h.target_id < mpt->raid_max_disks
794          && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
795
796                 *tgt = mpt_disk->config_page.PhysDiskID;
797                 return (0);
798         }
799         mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
800                  ccb->ccb_h.target_id);
801         return (-1);
802 }
803
804 /* XXX Ignores that there may be multiple busses/IOCs involved. */
805 int
806 mpt_is_raid_volume(struct mpt_softc *mpt, int tgt)
807 {
808         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
809         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
810
811         ioc_vol = mpt->ioc_page2->RaidVolume;
812         ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
813         for (;ioc_vol != ioc_last_vol; ioc_vol++) {
814                 if (ioc_vol->VolumeID == tgt) {
815                         return (1);
816                 }
817         }
818         return (0);
819 }
820
821 #if 0
822 static void
823 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
824                int enable)
825 {
826         request_t *req;
827         struct mpt_raid_action_result *ar;
828         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
829         int enabled;
830         int rv;
831
832         vol_pg = mpt_vol->config_page;
833         enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
834
835         /*
836          * If the setting matches the configuration,
837          * there is nothing to do.
838          */
839         if ((enabled && enable)
840          || (!enabled && !enable))
841                 return;
842
843         req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
844         if (req == NULL) {
845                 mpt_vol_prt(mpt, mpt_vol,
846                             "mpt_enable_vol: Get request failed!\n");
847                 return;
848         }
849
850         rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
851                                 enable ? MPI_RAID_ACTION_ENABLE_VOLUME
852                                        : MPI_RAID_ACTION_DISABLE_VOLUME,
853                                 /*data*/0, /*addr*/0, /*len*/0,
854                                 /*write*/FALSE, /*wait*/TRUE);
855         if (rv == ETIMEDOUT) {
856                 mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
857                             "%s Volume Timed-out\n",
858                             enable ? "Enable" : "Disable");
859                 return;
860         }
861         ar = REQ_TO_RAID_ACTION_RESULT(req);
862         if (rv != 0
863          || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
864          || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
865                 mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
866                             enable ? "Enable" : "Disable",
867                             rv, req->IOCStatus, ar->action_status);
868         }
869
870         mpt_free_request(mpt, req);
871 }
872 #endif
873
874 static void
875 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
876 {
877         request_t *req;
878         struct mpt_raid_action_result *ar;
879         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
880         uint32_t data;
881         int rv;
882         int resyncing;
883         int mwce;
884
885         vol_pg = mpt_vol->config_page;
886         resyncing = vol_pg->VolumeStatus.Flags
887                   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
888         mwce = vol_pg->VolumeSettings.Settings
889              & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
890
891         /*
892          * If the setting matches the configuration,
893          * there is nothing to do.
894          */
895         switch (mpt->raid_mwce_setting) {
896         case MPT_RAID_MWCE_REBUILD_ONLY:
897                 if ((resyncing && mwce) || (!resyncing && !mwce)) {
898                         return;
899                 }
900                 mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
901                 if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
902                         /*
903                          * Wait one more status update to see if
904                          * resyncing gets enabled.  It gets disabled
905                          * temporarilly when WCE is changed.
906                          */
907                         return;
908                 }
909                 break;
910         case MPT_RAID_MWCE_ON:
911                 if (mwce)
912                         return;
913                 break;
914         case MPT_RAID_MWCE_OFF:
915                 if (!mwce)
916                         return;
917                 break;
918         case MPT_RAID_MWCE_NC:
919                 return;
920         }
921
922         req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
923         if (req == NULL) {
924                 mpt_vol_prt(mpt, mpt_vol,
925                             "mpt_verify_mwce: Get request failed!\n");
926                 return;
927         }
928
929         vol_pg->VolumeSettings.Settings ^=
930             MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
931         memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
932         vol_pg->VolumeSettings.Settings ^=
933             MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
934         rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
935                                 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
936                                 data, /*addr*/0, /*len*/0,
937                                 /*write*/FALSE, /*wait*/TRUE);
938         if (rv == ETIMEDOUT) {
939                 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
940                             "Write Cache Enable Timed-out\n");
941                 return;
942         }
943         ar = REQ_TO_RAID_ACTION_RESULT(req);
944         if (rv != 0
945          || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
946          || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
947                 mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
948                             "%d:%x:%x\n", rv, req->IOCStatus,
949                             ar->action_status);
950         } else {
951                 vol_pg->VolumeSettings.Settings ^=
952                     MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
953         }
954         mpt_free_request(mpt, req);
955 }
956
957 static void
958 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
959 {
960         request_t *req;
961         struct mpt_raid_action_result *ar;
962         CONFIG_PAGE_RAID_VOL_0  *vol_pg;
963         u_int prio;
964         int rv;
965
966         vol_pg = mpt_vol->config_page;
967
968         if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
969                 return;
970
971         /*
972          * If the current RAID resync rate does not
973          * match our configured rate, update it.
974          */
975         prio = vol_pg->VolumeSettings.Settings
976              & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
977         if (vol_pg->ResyncRate != 0
978          && vol_pg->ResyncRate != mpt->raid_resync_rate) {
979
980                 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
981                 if (req == NULL) {
982                         mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
983                                     "Get request failed!\n");
984                         return;
985                 }
986
987                 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
988                                         MPI_RAID_ACTION_SET_RESYNC_RATE,
989                                         mpt->raid_resync_rate, /*addr*/0,
990                                         /*len*/0, /*write*/FALSE, /*wait*/TRUE);
991                 if (rv == ETIMEDOUT) {
992                         mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
993                                     "Resync Rate Setting Timed-out\n");
994                         return;
995                 }
996
997                 ar = REQ_TO_RAID_ACTION_RESULT(req);
998                 if (rv != 0
999                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1000                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1001                         mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1002                                     "%d:%x:%x\n", rv, req->IOCStatus,
1003                                     ar->action_status);
1004                 } else 
1005                         vol_pg->ResyncRate = mpt->raid_resync_rate;
1006                 mpt_free_request(mpt, req);
1007         } else if ((prio && mpt->raid_resync_rate < 128)
1008                 || (!prio && mpt->raid_resync_rate >= 128)) {
1009                 uint32_t data;
1010
1011                 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1012                 if (req == NULL) {
1013                         mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1014                                     "Get request failed!\n");
1015                         return;
1016                 }
1017
1018                 vol_pg->VolumeSettings.Settings ^=
1019                     MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1020                 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1021                 vol_pg->VolumeSettings.Settings ^=
1022                     MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1023                 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1024                                         MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1025                                         data, /*addr*/0, /*len*/0,
1026                                         /*write*/FALSE, /*wait*/TRUE);
1027                 if (rv == ETIMEDOUT) {
1028                         mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1029                                     "Resync Rate Setting Timed-out\n");
1030                         return;
1031                 }
1032                 ar = REQ_TO_RAID_ACTION_RESULT(req);
1033                 if (rv != 0
1034                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1035                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1036                         mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1037                                     "%d:%x:%x\n", rv, req->IOCStatus,
1038                                     ar->action_status);
1039                 } else {
1040                         vol_pg->VolumeSettings.Settings ^=
1041                             MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1042                 }
1043
1044                 mpt_free_request(mpt, req);
1045         }
1046 }
1047
1048 static void
1049 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1050                        struct cam_path *path)
1051 {
1052         struct ccb_relsim crs;
1053
1054         xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1055         crs.ccb_h.func_code = XPT_REL_SIMQ;
1056         crs.release_flags = RELSIM_ADJUST_OPENINGS;
1057         crs.openings = mpt->raid_queue_depth;
1058         xpt_action((union ccb *)&crs);
1059         if (crs.ccb_h.status != CAM_REQ_CMP)
1060                 mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1061                             "with CAM status %#x\n", crs.ccb_h.status);
1062 }
1063
1064 static void
1065 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1066 {
1067         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1068         u_int i;
1069
1070         vol_pg = mpt_vol->config_page;
1071         mpt_vol_prt(mpt, mpt_vol, "Settings (");
1072         for (i = 1; i <= 0x8000; i <<= 1) {
1073                 switch (vol_pg->VolumeSettings.Settings & i) {
1074                 case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1075                         mpt_prtc(mpt, " Member-WCE");
1076                         break;
1077                 case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1078                         mpt_prtc(mpt, " Offline-On-SMART-Err");
1079                         break;
1080                 case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1081                         mpt_prtc(mpt, " Hot-Plug-Spares");
1082                         break;
1083                 case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1084                         mpt_prtc(mpt, " High-Priority-ReSync");
1085                         break;
1086                 default:
1087                         break;
1088                 }
1089         }
1090         mpt_prtc(mpt, " )\n");
1091         if (vol_pg->VolumeSettings.HotSparePool != 0) {
1092                 mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1093                             powerof2(vol_pg->VolumeSettings.HotSparePool)
1094                           ? ":" : "s:");
1095                 for (i = 0; i < 8; i++) {
1096                         u_int mask;
1097
1098                         mask = 0x1 << i;
1099                         if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1100                                 continue;
1101                         mpt_prtc(mpt, " %d", i);
1102                 }
1103                 mpt_prtc(mpt, "\n");
1104         }
1105         mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1106         for (i = 0; i < vol_pg->NumPhysDisks; i++){
1107                 struct mpt_raid_disk *mpt_disk;
1108                 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1109
1110                 mpt_disk = mpt->raid_disks 
1111                          + vol_pg->PhysDisk[i].PhysDiskNum;
1112                 disk_pg = &mpt_disk->config_page;
1113                 mpt_prtc(mpt, "      ");
1114                 mpt_prtc(mpt, "(%s:%d:%d): ", device_get_nameunit(mpt->dev),
1115                          disk_pg->PhysDiskBus, disk_pg->PhysDiskID);
1116                 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM)
1117                         mpt_prtc(mpt, "%s\n",
1118                                  mpt_disk->member_number == 0
1119                                ? "Primary" : "Secondary");
1120                 else
1121                         mpt_prtc(mpt, "Stripe Position %d\n",
1122                                  mpt_disk->member_number);
1123         }
1124 }
1125
1126 static void
1127 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1128 {
1129         CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1130         u_int i;
1131
1132         disk_pg = &mpt_disk->config_page;
1133         mpt_disk_prt(mpt, mpt_disk,
1134                      "Physical (%s:%d:%d), Pass-thru (%s:%d:%d)\n",
1135                      device_get_nameunit(mpt->dev), disk_pg->PhysDiskBus, 
1136                      disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1137                      /*bus*/1, mpt_disk - mpt->raid_disks);
1138
1139         if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1140                 return;
1141         mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1142                      powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1143                    ? ":" : "s:");
1144         for (i = 0; i < 8; i++) {
1145                 u_int mask;
1146
1147                 mask = 0x1 << i;
1148                 if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1149                         continue;
1150                 mpt_prtc(mpt, " %d", i);
1151         }
1152         mpt_prtc(mpt, "\n");
1153 }
1154
1155 static void
1156 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1157                       IOC_3_PHYS_DISK *ioc_disk)
1158 {
1159         int rv;
1160
1161         rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1162                                  /*PageNumber*/0, ioc_disk->PhysDiskNum,
1163                                  &mpt_disk->config_page.Header,
1164                                  /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1165         if (rv != 0) {
1166                 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1167                         "Failed to read RAID Disk Hdr(%d)\n",
1168                         ioc_disk->PhysDiskNum);
1169                 return;
1170         }
1171         rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1172                                    &mpt_disk->config_page.Header,
1173                                    sizeof(mpt_disk->config_page),
1174                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1175         if (rv != 0)
1176                 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1177                         "Failed to read RAID Disk Page(%d)\n",
1178                         ioc_disk->PhysDiskNum);
1179 }
1180
1181 static void
1182 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1183                      CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1184 {
1185         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1186         struct mpt_raid_action_result *ar;
1187         request_t *req;
1188         int rv;
1189         int i;
1190
1191         vol_pg = mpt_vol->config_page;
1192         mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1193         rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME,
1194                                  /*PageNumber*/0, ioc_vol->VolumePageNumber,
1195                                  &vol_pg->Header, /*sleep_ok*/TRUE,
1196                                  /*timeout_ms*/5000);
1197         if (rv != 0) {
1198                 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1199                             "Failed to read RAID Vol Hdr(%d)\n",
1200                             ioc_vol->VolumePageNumber);
1201                 return;
1202         }
1203         rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1204                                    &vol_pg->Header, mpt->raid_page0_len,
1205                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1206         if (rv != 0) {
1207                 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1208                             "Failed to read RAID Vol Page(%d)\n",
1209                             ioc_vol->VolumePageNumber);
1210                 return;
1211         }
1212         mpt_vol->flags |= MPT_RVF_ACTIVE;
1213
1214         /* Update disk entry array data. */
1215         for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1216                 struct mpt_raid_disk *mpt_disk;
1217
1218                 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1219                 mpt_disk->volume = mpt_vol;
1220                 mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1221                 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM)
1222                         mpt_disk->member_number--;
1223         }
1224
1225         if ((vol_pg->VolumeStatus.Flags
1226            & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1227                 return;
1228
1229         req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1230         if (req == NULL) {
1231                 mpt_vol_prt(mpt, mpt_vol,
1232                             "mpt_refresh_raid_vol: Get request failed!\n");
1233                 return;
1234         }
1235         rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1236                                 MPI_RAID_ACTION_INDICATOR_STRUCT,
1237                                 /*ActionWord*/0, /*addr*/0, /*len*/0,
1238                                 /*write*/FALSE, /*wait*/TRUE);
1239         if (rv == ETIMEDOUT) {
1240                 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1241                             "Progress indicator fetch timedout!\n");
1242                 return;
1243         }
1244
1245         ar = REQ_TO_RAID_ACTION_RESULT(req);
1246         if (rv == 0
1247          && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1248          && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1249                 memcpy(&mpt_vol->sync_progress,
1250                        &ar->action_data.indicator_struct,
1251                        sizeof(mpt_vol->sync_progress));
1252         } else {
1253                 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: "
1254                             "Progress indicator fetch failed!\n");
1255         }
1256         mpt_free_request(mpt, req);
1257 }
1258
1259 /*
1260  * Update in-core information about RAID support.  We update any entries
1261  * that didn't previously exists or have been marked as needing to
1262  * be updated by our event handler.  Interesting changes are displayed
1263  * to the console.
1264  */
1265 int
1266 mpt_refresh_raid_data(struct mpt_softc *mpt)
1267 {
1268         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1269         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1270         IOC_3_PHYS_DISK *ioc_disk;
1271         IOC_3_PHYS_DISK *ioc_last_disk;
1272         CONFIG_PAGE_RAID_VOL_0  *vol_pg;
1273         size_t len;
1274         int rv;
1275         int i;
1276         u_int nonopt_volumes;
1277
1278         if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1279                 return (0);
1280         }
1281
1282         /*
1283          * Mark all items as unreferenced by the configuration.
1284          * This allows us to find, report, and discard stale
1285          * entries.
1286          */
1287         for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1288                 mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1289         }
1290         for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1291                 mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1292         }
1293
1294         /*
1295          * Get Physical Disk information.
1296          */
1297         len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1298         rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1299                                    &mpt->ioc_page3->Header, len,
1300                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1301         if (rv) {
1302                 mpt_prt(mpt,
1303                     "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1304                 return (-1);
1305         }
1306
1307         ioc_disk = mpt->ioc_page3->PhysDisk;
1308         ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1309         for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1310                 struct mpt_raid_disk *mpt_disk;
1311
1312                 mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1313                 mpt_disk->flags |= MPT_RDF_REFERENCED;
1314                 if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1315                  != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1316
1317                         mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1318
1319                 }
1320                 mpt_disk->flags |= MPT_RDF_ACTIVE;
1321                 mpt->raid_rescan++;
1322         }
1323
1324         /*
1325          * Refresh volume data.
1326          */
1327         len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1328         rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1329                                    &mpt->ioc_page2->Header, len,
1330                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1331         if (rv) {
1332                 mpt_prt(mpt, "mpt_refresh_raid_data: "
1333                         "Failed to read IOC Page 2\n");
1334                 return (-1);
1335         }
1336
1337         ioc_vol = mpt->ioc_page2->RaidVolume;
1338         ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1339         for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1340                 struct mpt_raid_volume *mpt_vol;
1341
1342                 mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1343                 mpt_vol->flags |= MPT_RVF_REFERENCED;
1344                 vol_pg = mpt_vol->config_page;
1345                 if (vol_pg == NULL)
1346                         continue;
1347                 if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1348                   != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1349                  || (vol_pg->VolumeStatus.Flags
1350                    & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1351
1352                         mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1353                 }
1354                 mpt_vol->flags |= MPT_RVF_ACTIVE;
1355         }
1356
1357         nonopt_volumes = 0;
1358         for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1359                 struct mpt_raid_volume *mpt_vol;
1360                 uint64_t total;
1361                 uint64_t left;
1362                 int m;
1363                 u_int prio;
1364
1365                 mpt_vol = &mpt->raid_volumes[i];
1366
1367                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1368                         continue;
1369
1370                 vol_pg = mpt_vol->config_page;
1371                 if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1372                  == MPT_RVF_ANNOUNCED) {
1373                         mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1374                         mpt_vol->flags = 0;
1375                         continue;
1376                 }
1377
1378                 if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1379
1380                         mpt_announce_vol(mpt, mpt_vol);
1381                         mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1382                 }
1383
1384                 if (vol_pg->VolumeStatus.State !=
1385                     MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1386                         nonopt_volumes++;
1387
1388                 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1389                         continue;
1390
1391                 mpt_vol->flags |= MPT_RVF_UP2DATE;
1392                 mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1393                             mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1394                 mpt_verify_mwce(mpt, mpt_vol);
1395
1396                 if (vol_pg->VolumeStatus.Flags == 0)
1397                         continue;
1398
1399                 mpt_vol_prt(mpt, mpt_vol, "Status (");
1400                 for (m = 1; m <= 0x80; m <<= 1) {
1401                         switch (vol_pg->VolumeStatus.Flags & m) {
1402                         case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1403                                 mpt_prtc(mpt, " Enabled");
1404                                 break;
1405                         case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1406                                 mpt_prtc(mpt, " Quiesced");
1407                                 break;
1408                         case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1409                                 mpt_prtc(mpt, " Re-Syncing");
1410                                 break;
1411                         case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1412                                 mpt_prtc(mpt, " Inactive");
1413                                 break;
1414                         default:
1415                                 break;
1416                         }
1417                 }
1418                 mpt_prtc(mpt, " )\n");
1419
1420                 if ((vol_pg->VolumeStatus.Flags
1421                    & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1422                         continue;
1423
1424                 mpt_verify_resync_rate(mpt, mpt_vol);
1425
1426                 left = u64toh(mpt_vol->sync_progress.BlocksRemaining);
1427                 total = u64toh(mpt_vol->sync_progress.TotalBlocks);
1428                 if (vol_pg->ResyncRate != 0) {
1429
1430                         prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1431                         mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1432                             prio / 1000, prio % 1000);
1433                 } else {
1434                         prio = vol_pg->VolumeSettings.Settings
1435                              & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1436                         mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1437                             prio ? "High" : "Low");
1438                 }
1439 #if __FreeBSD_version >= 500000
1440                 mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1441                             "blocks remaining\n", (uintmax_t)left,
1442                             (uintmax_t)total);
1443 #else
1444                 mpt_vol_prt(mpt, mpt_vol, "%llu of %llu "
1445                             "blocks remaining\n", (uint64_t)left,
1446                             (uint64_t)total);
1447 #endif
1448
1449                 /* Periodically report on sync progress. */
1450                 mpt_schedule_raid_refresh(mpt);
1451         }
1452
1453         for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1454                 struct mpt_raid_disk *mpt_disk;
1455                 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1456                 int m;
1457
1458                 mpt_disk = &mpt->raid_disks[i];
1459                 disk_pg = &mpt_disk->config_page;
1460
1461                 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1462                         continue;
1463
1464                 if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1465                  == MPT_RDF_ANNOUNCED) {
1466                         mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1467                         mpt_disk->flags = 0;
1468                         mpt->raid_rescan++;
1469                         continue;
1470                 }
1471
1472                 if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1473
1474                         mpt_announce_disk(mpt, mpt_disk);
1475                         mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1476                 }
1477
1478                 if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1479                         continue;
1480
1481                 mpt_disk->flags |= MPT_RDF_UP2DATE;
1482                 mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1483                 if (disk_pg->PhysDiskStatus.Flags == 0)
1484                         continue;
1485
1486                 mpt_disk_prt(mpt, mpt_disk, "Status (");
1487                 for (m = 1; m <= 0x80; m <<= 1) {
1488                         switch (disk_pg->PhysDiskStatus.Flags & m) {
1489                         case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1490                                 mpt_prtc(mpt, " Out-Of-Sync");
1491                                 break;
1492                         case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1493                                 mpt_prtc(mpt, " Quiesced");
1494                                 break;
1495                         default:
1496                                 break;
1497                         }
1498                 }
1499                 mpt_prtc(mpt, " )\n");
1500         }
1501
1502         mpt->raid_nonopt_volumes = nonopt_volumes;
1503         return (0);
1504 }
1505
1506 static void
1507 mpt_raid_timer(void *arg)
1508 {
1509         struct mpt_softc *mpt;
1510
1511         mpt = (struct mpt_softc *)arg;
1512         MPT_LOCK(mpt);
1513         mpt_raid_wakeup(mpt);
1514         MPT_UNLOCK(mpt);
1515 }
1516
1517 void
1518 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1519 {
1520         callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1521                       mpt_raid_timer, mpt);
1522 }
1523
1524 void
1525 mpt_raid_free_mem(struct mpt_softc *mpt)
1526 {
1527
1528         if (mpt->raid_volumes) {
1529                 struct mpt_raid_volume *mpt_raid;
1530                 int i;
1531                 for (i = 0; i < mpt->raid_max_volumes; i++) {
1532                         mpt_raid = &mpt->raid_volumes[i];
1533                         if (mpt_raid->config_page) {
1534                                 free(mpt_raid->config_page, M_DEVBUF);
1535                                 mpt_raid->config_page = NULL;
1536                         }
1537                 }
1538                 free(mpt->raid_volumes, M_DEVBUF);
1539                 mpt->raid_volumes = NULL;
1540         }
1541         if (mpt->raid_disks) {
1542                 free(mpt->raid_disks, M_DEVBUF);
1543                 mpt->raid_disks = NULL;
1544         }
1545         if (mpt->ioc_page2) {
1546                 free(mpt->ioc_page2, M_DEVBUF);
1547                 mpt->ioc_page2 = NULL;
1548         }
1549         if (mpt->ioc_page3) {
1550                 free(mpt->ioc_page3, M_DEVBUF);
1551                 mpt->ioc_page3 = NULL;
1552         }
1553         mpt->raid_max_volumes =  0;
1554         mpt->raid_max_disks =  0;
1555 }
1556
1557 static int
1558 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1559 {
1560         struct mpt_raid_volume *mpt_vol;
1561
1562         if ((rate > MPT_RAID_RESYNC_RATE_MAX
1563           || rate < MPT_RAID_RESYNC_RATE_MIN)
1564          && rate != MPT_RAID_RESYNC_RATE_NC)
1565                 return (EINVAL);
1566
1567         MPT_LOCK(mpt);
1568         mpt->raid_resync_rate = rate;
1569         RAID_VOL_FOREACH(mpt, mpt_vol) {
1570                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1571                         continue;
1572                 }
1573                 mpt_verify_resync_rate(mpt, mpt_vol);
1574         }
1575         MPT_UNLOCK(mpt);
1576         return (0);
1577 }
1578
1579 static int
1580 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1581 {
1582         struct mpt_raid_volume *mpt_vol;
1583
1584         if (vol_queue_depth > 255 || vol_queue_depth < 1)
1585                 return (EINVAL);
1586
1587         MPT_LOCK(mpt);
1588         mpt->raid_queue_depth = vol_queue_depth;
1589         RAID_VOL_FOREACH(mpt, mpt_vol) {
1590                 struct cam_path *path;
1591                 int error;
1592
1593                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1594                         continue;
1595
1596                 mpt->raid_rescan = 0;
1597
1598                 error = xpt_create_path(&path, xpt_periph,
1599                                         cam_sim_path(mpt->sim),
1600                                         mpt_vol->config_page->VolumeID,
1601                                         /*lun*/0);
1602                 if (error != CAM_REQ_CMP) {
1603                         mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1604                         continue;
1605                 }
1606                 mpt_adjust_queue_depth(mpt, mpt_vol, path);
1607                 xpt_free_path(path);
1608         }
1609         MPT_UNLOCK(mpt);
1610         return (0);
1611 }
1612
1613 static int
1614 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1615 {
1616         struct mpt_raid_volume *mpt_vol;
1617         int force_full_resync;
1618
1619         MPT_LOCK(mpt);
1620         if (mwce == mpt->raid_mwce_setting) {
1621                 MPT_UNLOCK(mpt);
1622                 return (0);
1623         }
1624
1625         /*
1626          * Catch MWCE being left on due to a failed shutdown.  Since
1627          * sysctls cannot be set by the loader, we treat the first
1628          * setting of this varible specially and force a full volume
1629          * resync if MWCE is enabled and a resync is in progress.
1630          */
1631         force_full_resync = 0;
1632         if (mpt->raid_mwce_set == 0
1633          && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1634          && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1635                 force_full_resync = 1;
1636
1637         mpt->raid_mwce_setting = mwce;
1638         RAID_VOL_FOREACH(mpt, mpt_vol) {
1639                 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1640                 int resyncing;
1641                 int mwce;
1642
1643                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1644                         continue;
1645
1646                 vol_pg = mpt_vol->config_page;
1647                 resyncing = vol_pg->VolumeStatus.Flags
1648                           & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1649                 mwce = vol_pg->VolumeSettings.Settings
1650                      & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1651                 if (force_full_resync && resyncing && mwce) {
1652
1653                         /*
1654                          * XXX disable/enable volume should force a resync,
1655                          *     but we'll need to queice, drain, and restart
1656                          *     I/O to do that.
1657                          */
1658                         mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1659                                     "detected.  Suggest full resync.\n");
1660                 }
1661                 mpt_verify_mwce(mpt, mpt_vol);
1662         }
1663         mpt->raid_mwce_set = 1;
1664         MPT_UNLOCK(mpt);
1665         return (0);
1666 }
1667
1668 const char *mpt_vol_mwce_strs[] =
1669 {
1670         "On",
1671         "Off",
1672         "On-During-Rebuild",
1673         "NC"
1674 };
1675
1676 static int
1677 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1678 {
1679         char inbuf[20];
1680         struct mpt_softc *mpt;
1681         const char *str;
1682         int error;
1683         u_int size;
1684         u_int i;
1685
1686         GIANT_REQUIRED;
1687
1688         mpt = (struct mpt_softc *)arg1;
1689         str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1690         error = SYSCTL_OUT(req, str, strlen(str) + 1);
1691         if (error || !req->newptr) {
1692                 return (error);
1693         }
1694
1695         size = req->newlen - req->newidx;
1696         if (size >= sizeof(inbuf)) {
1697                 return (EINVAL);
1698         }
1699
1700         error = SYSCTL_IN(req, inbuf, size);
1701         if (error) {
1702                 return (error);
1703         }
1704         inbuf[size] = '\0'; 
1705         for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1706                 if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1707                         return (mpt_raid_set_vol_mwce(mpt, i));
1708                 }
1709         }
1710         return (EINVAL);
1711 }
1712
1713 static int
1714 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1715 {
1716         struct mpt_softc *mpt;
1717         u_int raid_resync_rate;
1718         int error;
1719
1720         GIANT_REQUIRED;
1721
1722         mpt = (struct mpt_softc *)arg1;
1723         raid_resync_rate = mpt->raid_resync_rate;
1724
1725         error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1726         if (error || !req->newptr) {
1727                 return error;
1728         }
1729
1730         return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1731 }
1732
1733 static int
1734 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1735 {
1736         struct mpt_softc *mpt;
1737         u_int raid_queue_depth;
1738         int error;
1739
1740         GIANT_REQUIRED;
1741
1742         mpt = (struct mpt_softc *)arg1;
1743         raid_queue_depth = mpt->raid_queue_depth;
1744
1745         error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1746         if (error || !req->newptr) {
1747                 return error;
1748         }
1749
1750         return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1751 }
1752
1753 static void
1754 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1755 {
1756 #if __FreeBSD_version >= 500000
1757         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1758         struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1759
1760         SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1761                         "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1762                         mpt_raid_sysctl_vol_member_wce, "A",
1763                         "volume member WCE(On,Off,On-During-Rebuild,NC)");
1764
1765         SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1766                         "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1767                         mpt_raid_sysctl_vol_queue_depth, "I",
1768                         "default volume queue depth");
1769
1770         SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1771                         "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1772                         mpt_raid_sysctl_vol_resync_rate, "I",
1773                         "volume resync priority (0 == NC, 1 - 255)");
1774         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1775                         "nonoptimal_volumes", CTLFLAG_RD,
1776                         &mpt->raid_nonopt_volumes, 0,
1777                         "number of nonoptimal volumes");
1778 #endif
1779 }