]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/mpt/mpt_user.c
mpr/mps/mpt: verify cfg page ioctl lengths
[FreeBSD/FreeBSD.git] / sys / dev / mpt / mpt_user.c
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2008 Yahoo!, Inc.
5  * All rights reserved.
6  * Written by: John Baldwin <jhb@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * LSI MPT-Fusion Host Adapter FreeBSD userland interface
33  */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include <sys/param.h>
39 #ifdef __amd64__
40 #include <sys/abi_compat.h>
41 #endif
42 #include <sys/conf.h>
43 #include <sys/errno.h>
44 #include <sys/ioccom.h>
45 #include <sys/mpt_ioctl.h>
46
47 #include <dev/mpt/mpt.h>
48
49 struct mpt_user_raid_action_result {
50         uint32_t        volume_status;
51         uint32_t        action_data[4];
52         uint16_t        action_status;
53 };
54
55 struct mpt_page_memory {
56         bus_dma_tag_t   tag;
57         bus_dmamap_t    map;
58         bus_addr_t      paddr;
59         void            *vaddr;
60 };
61
62 static mpt_probe_handler_t      mpt_user_probe;
63 static mpt_attach_handler_t     mpt_user_attach;
64 static mpt_enable_handler_t     mpt_user_enable;
65 static mpt_ready_handler_t      mpt_user_ready;
66 static mpt_event_handler_t      mpt_user_event;
67 static mpt_reset_handler_t      mpt_user_reset;
68 static mpt_detach_handler_t     mpt_user_detach;
69
70 static struct mpt_personality mpt_user_personality = {
71         .name           = "mpt_user",
72         .probe          = mpt_user_probe,
73         .attach         = mpt_user_attach,
74         .enable         = mpt_user_enable,
75         .ready          = mpt_user_ready,
76         .event          = mpt_user_event,
77         .reset          = mpt_user_reset,
78         .detach         = mpt_user_detach,
79 };
80
81 DECLARE_MPT_PERSONALITY(mpt_user, SI_ORDER_SECOND);
82
83 static mpt_reply_handler_t      mpt_user_reply_handler;
84
85 static d_open_t         mpt_open;
86 static d_close_t        mpt_close;
87 static d_ioctl_t        mpt_ioctl;
88
89 static struct cdevsw mpt_cdevsw = {
90         .d_version =    D_VERSION,
91         .d_flags =      0,
92         .d_open =       mpt_open,
93         .d_close =      mpt_close,
94         .d_ioctl =      mpt_ioctl,
95         .d_name =       "mpt",
96 };
97
98 static MALLOC_DEFINE(M_MPTUSER, "mpt_user", "Buffers for mpt(4) ioctls");
99
100 static uint32_t user_handler_id = MPT_HANDLER_ID_NONE;
101
102 static int
103 mpt_user_probe(struct mpt_softc *mpt)
104 {
105
106         /* Attach to every controller. */
107         return (0);
108 }
109
110 static int
111 mpt_user_attach(struct mpt_softc *mpt)
112 {
113         mpt_handler_t handler;
114         int error, unit;
115
116         MPT_LOCK(mpt);
117         handler.reply_handler = mpt_user_reply_handler;
118         error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
119                                      &user_handler_id);
120         MPT_UNLOCK(mpt);
121         if (error != 0) {
122                 mpt_prt(mpt, "Unable to register user handler!\n");
123                 return (error);
124         }
125         unit = device_get_unit(mpt->dev);
126         mpt->cdev = make_dev(&mpt_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640,
127             "mpt%d", unit);
128         if (mpt->cdev == NULL) {
129                 MPT_LOCK(mpt);
130                 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
131                     user_handler_id);
132                 MPT_UNLOCK(mpt);
133                 return (ENOMEM);
134         }
135         mpt->cdev->si_drv1 = mpt;
136         return (0);
137 }
138
139 static int
140 mpt_user_enable(struct mpt_softc *mpt)
141 {
142
143         return (0);
144 }
145
146 static void
147 mpt_user_ready(struct mpt_softc *mpt)
148 {
149
150 }
151
152 static int
153 mpt_user_event(struct mpt_softc *mpt, request_t *req,
154     MSG_EVENT_NOTIFY_REPLY *msg)
155 {
156
157         /* Someday we may want to let a user daemon listen for events? */
158         return (0);
159 }
160
161 static void
162 mpt_user_reset(struct mpt_softc *mpt, int type)
163 {
164
165 }
166
167 static void
168 mpt_user_detach(struct mpt_softc *mpt)
169 {
170         mpt_handler_t handler;
171
172         /* XXX: do a purge of pending requests? */
173         destroy_dev(mpt->cdev);
174
175         MPT_LOCK(mpt);
176         handler.reply_handler = mpt_user_reply_handler;
177         mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
178             user_handler_id);
179         MPT_UNLOCK(mpt);
180 }
181
182 static int
183 mpt_open(struct cdev *dev, int flags, int fmt, struct thread *td)
184 {
185
186         return (0);
187 }
188
189 static int
190 mpt_close(struct cdev *dev, int flags, int fmt, struct thread *td)
191 {
192
193         return (0);
194 }
195
196 static int
197 mpt_alloc_buffer(struct mpt_softc *mpt, struct mpt_page_memory *page_mem,
198     size_t len)
199 {
200         struct mpt_map_info mi;
201         int error;
202
203         page_mem->vaddr = NULL;
204
205         /* Limit requests to 16M. */
206         if (len > 16 * 1024 * 1024)
207                 return (ENOSPC);
208         error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
209             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
210             len, 1, len, 0, &page_mem->tag);
211         if (error)
212                 return (error);
213         error = bus_dmamem_alloc(page_mem->tag, &page_mem->vaddr,
214             BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &page_mem->map);
215         if (error) {
216                 bus_dma_tag_destroy(page_mem->tag);
217                 return (error);
218         }
219         mi.mpt = mpt;
220         error = bus_dmamap_load(page_mem->tag, page_mem->map, page_mem->vaddr,
221             len, mpt_map_rquest, &mi, BUS_DMA_NOWAIT);
222         if (error == 0)
223                 error = mi.error;
224         if (error) {
225                 bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
226                 bus_dma_tag_destroy(page_mem->tag);
227                 page_mem->vaddr = NULL;
228                 return (error);
229         }
230         page_mem->paddr = mi.phys;
231         return (0);
232 }
233
234 static void
235 mpt_free_buffer(struct mpt_page_memory *page_mem)
236 {
237
238         if (page_mem->vaddr == NULL)
239                 return;
240         bus_dmamap_unload(page_mem->tag, page_mem->map);
241         bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
242         bus_dma_tag_destroy(page_mem->tag);
243         page_mem->vaddr = NULL;
244 }
245
246 static int
247 mpt_user_read_cfg_header(struct mpt_softc *mpt,
248     struct mpt_cfg_page_req *page_req)
249 {
250         request_t  *req;
251         cfgparms_t params;
252         MSG_CONFIG *cfgp;
253         int         error;
254
255         req = mpt_get_request(mpt, TRUE);
256         if (req == NULL) {
257                 mpt_prt(mpt, "mpt_user_read_cfg_header: Get request failed!\n");
258                 return (ENOMEM);
259         }
260
261         params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
262         params.PageVersion = 0;
263         params.PageLength = 0;
264         params.PageNumber = page_req->header.PageNumber;
265         params.PageType = page_req->header.PageType;
266         params.PageAddress = le32toh(page_req->page_address);
267         error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
268                                   TRUE, 5000);
269         if (error != 0) {
270                 /*
271                  * Leave the request. Without resetting the chip, it's
272                  * still owned by it and we'll just get into trouble
273                  * freeing it now. Mark it as abandoned so that if it
274                  * shows up later it can be freed.
275                  */
276                 mpt_prt(mpt, "read_cfg_header timed out\n");
277                 return (ETIMEDOUT);
278         }
279
280         page_req->ioc_status = htole16(req->IOCStatus);
281         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
282                 cfgp = req->req_vbuf;
283                 bcopy(&cfgp->Header, &page_req->header,
284                     sizeof(page_req->header));
285         }
286         mpt_free_request(mpt, req);
287         return (0);
288 }
289
290 static int
291 mpt_user_read_cfg_page(struct mpt_softc *mpt, struct mpt_cfg_page_req *page_req,
292     struct mpt_page_memory *mpt_page)
293 {
294         CONFIG_PAGE_HEADER *hdr;
295         request_t    *req;
296         cfgparms_t    params;
297         int           error;
298
299         req = mpt_get_request(mpt, TRUE);
300         if (req == NULL) {
301                 mpt_prt(mpt, "mpt_user_read_cfg_page: Get request failed!\n");
302                 return (ENOMEM);
303         }
304
305         hdr = mpt_page->vaddr;
306         params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
307         params.PageVersion = hdr->PageVersion;
308         params.PageLength = hdr->PageLength;
309         params.PageNumber = hdr->PageNumber;
310         params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
311         params.PageAddress = le32toh(page_req->page_address);
312         bus_dmamap_sync(mpt_page->tag, mpt_page->map,
313             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
314         error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
315             le32toh(page_req->len), TRUE, 5000);
316         if (error != 0) {
317                 mpt_prt(mpt, "mpt_user_read_cfg_page timed out\n");
318                 return (ETIMEDOUT);
319         }
320
321         page_req->ioc_status = htole16(req->IOCStatus);
322         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
323                 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
324                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
325         mpt_free_request(mpt, req);
326         return (0);
327 }
328
329 static int
330 mpt_user_read_extcfg_header(struct mpt_softc *mpt,
331     struct mpt_ext_cfg_page_req *ext_page_req)
332 {
333         request_t  *req;
334         cfgparms_t params;
335         MSG_CONFIG_REPLY *cfgp;
336         int         error;
337
338         req = mpt_get_request(mpt, TRUE);
339         if (req == NULL) {
340                 mpt_prt(mpt, "mpt_user_read_extcfg_header: Get request failed!\n");
341                 return (ENOMEM);
342         }
343
344         params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
345         params.PageVersion = ext_page_req->header.PageVersion;
346         params.PageLength = 0;
347         params.PageNumber = ext_page_req->header.PageNumber;
348         params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
349         params.PageAddress = le32toh(ext_page_req->page_address);
350         params.ExtPageType = ext_page_req->header.ExtPageType;
351         params.ExtPageLength = 0;
352         error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
353                                   TRUE, 5000);
354         if (error != 0) {
355                 /*
356                  * Leave the request. Without resetting the chip, it's
357                  * still owned by it and we'll just get into trouble
358                  * freeing it now. Mark it as abandoned so that if it
359                  * shows up later it can be freed.
360                  */
361                 mpt_prt(mpt, "mpt_user_read_extcfg_header timed out\n");
362                 return (ETIMEDOUT);
363         }
364
365         ext_page_req->ioc_status = htole16(req->IOCStatus);
366         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
367                 cfgp = req->req_vbuf;
368                 ext_page_req->header.PageVersion = cfgp->Header.PageVersion;
369                 ext_page_req->header.PageNumber = cfgp->Header.PageNumber;
370                 ext_page_req->header.PageType = cfgp->Header.PageType;
371                 ext_page_req->header.ExtPageLength = cfgp->ExtPageLength;
372                 ext_page_req->header.ExtPageType = cfgp->ExtPageType;
373         }
374         mpt_free_request(mpt, req);
375         return (0);
376 }
377
378 static int
379 mpt_user_read_extcfg_page(struct mpt_softc *mpt,
380     struct mpt_ext_cfg_page_req *ext_page_req, struct mpt_page_memory *mpt_page)
381 {
382         CONFIG_EXTENDED_PAGE_HEADER *hdr;
383         request_t    *req;
384         cfgparms_t    params;
385         int           error;
386
387         req = mpt_get_request(mpt, TRUE);
388         if (req == NULL) {
389                 mpt_prt(mpt, "mpt_user_read_extcfg_page: Get request failed!\n");
390                 return (ENOMEM);
391         }
392
393         hdr = mpt_page->vaddr;
394         params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
395         params.PageVersion = hdr->PageVersion;
396         params.PageLength = 0;
397         params.PageNumber = hdr->PageNumber;
398         params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
399         params.PageAddress = le32toh(ext_page_req->page_address);
400         params.ExtPageType = hdr->ExtPageType;
401         params.ExtPageLength = hdr->ExtPageLength;
402         bus_dmamap_sync(mpt_page->tag, mpt_page->map,
403             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
404         error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
405             le32toh(ext_page_req->len), TRUE, 5000);
406         if (error != 0) {
407                 mpt_prt(mpt, "mpt_user_read_extcfg_page timed out\n");
408                 return (ETIMEDOUT);
409         }
410
411         ext_page_req->ioc_status = htole16(req->IOCStatus);
412         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
413                 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
414                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
415         mpt_free_request(mpt, req);
416         return (0);
417 }
418
419 static int
420 mpt_user_write_cfg_page(struct mpt_softc *mpt,
421     struct mpt_cfg_page_req *page_req, struct mpt_page_memory *mpt_page)
422 {
423         CONFIG_PAGE_HEADER *hdr;
424         request_t    *req;
425         cfgparms_t    params;
426         u_int         hdr_attr;
427         int           error;
428
429         hdr = mpt_page->vaddr;
430         hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
431         if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
432             hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
433                 mpt_prt(mpt, "page type 0x%x not changeable\n",
434                         hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
435                 return (EINVAL);
436         }
437
438 #if     0
439         /*
440          * We shouldn't mask off other bits here.
441          */
442         hdr->PageType &= ~MPI_CONFIG_PAGETYPE_MASK;
443 #endif
444
445         req = mpt_get_request(mpt, TRUE);
446         if (req == NULL)
447                 return (ENOMEM);
448
449         bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_PREREAD |
450             BUS_DMASYNC_PREWRITE);
451
452         /*
453          * There isn't any point in restoring stripped out attributes
454          * if you then mask them going down to issue the request.
455          */
456
457         params.Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
458         params.PageVersion = hdr->PageVersion;
459         params.PageLength = hdr->PageLength;
460         params.PageNumber = hdr->PageNumber;
461         params.PageAddress = le32toh(page_req->page_address);
462 #if     0
463         /* Restore stripped out attributes */
464         hdr->PageType |= hdr_attr;
465         params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
466 #else
467         params.PageType = hdr->PageType;
468 #endif
469         error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
470             le32toh(page_req->len), TRUE, 5000);
471         if (error != 0) {
472                 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
473                 return (ETIMEDOUT);
474         }
475
476         page_req->ioc_status = htole16(req->IOCStatus);
477         bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_POSTREAD |
478             BUS_DMASYNC_POSTWRITE);
479         mpt_free_request(mpt, req);
480         return (0);
481 }
482
483 static int
484 mpt_user_reply_handler(struct mpt_softc *mpt, request_t *req,
485     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
486 {
487         MSG_RAID_ACTION_REPLY *reply;
488         struct mpt_user_raid_action_result *res;
489
490         if (req == NULL)
491                 return (TRUE);
492
493         if (reply_frame != NULL) {
494                 reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
495                 req->IOCStatus = le16toh(reply->IOCStatus);
496                 res = (struct mpt_user_raid_action_result *)
497                     (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
498                 res->action_status = reply->ActionStatus;
499                 res->volume_status = reply->VolumeStatus;
500                 bcopy(&reply->ActionData, res->action_data,
501                     sizeof(res->action_data));
502         }
503
504         req->state &= ~REQ_STATE_QUEUED;
505         req->state |= REQ_STATE_DONE;
506         TAILQ_REMOVE(&mpt->request_pending_list, req, links);
507
508         if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
509                 wakeup(req);
510         } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
511                 /*
512                  * Whew- we can free this request (late completion)
513                  */
514                 mpt_free_request(mpt, req);
515         }
516
517         return (TRUE);
518 }
519
520 /*
521  * We use the first part of the request buffer after the request frame
522  * to hold the action data and action status from the RAID reply.  The
523  * rest of the request buffer is used to hold the buffer for the
524  * action SGE.
525  */
526 static int
527 mpt_user_raid_action(struct mpt_softc *mpt, struct mpt_raid_action *raid_act,
528         struct mpt_page_memory *mpt_page)
529 {
530         request_t *req;
531         struct mpt_user_raid_action_result *res;
532         MSG_RAID_ACTION_REQUEST *rap;
533         SGE_SIMPLE32 *se;
534         int error;
535
536         req = mpt_get_request(mpt, TRUE);
537         if (req == NULL)
538                 return (ENOMEM);
539         rap = req->req_vbuf;
540         memset(rap, 0, sizeof *rap);
541         rap->Action = raid_act->action;
542         rap->ActionDataWord = raid_act->action_data_word;
543         rap->Function = MPI_FUNCTION_RAID_ACTION;
544         rap->VolumeID = raid_act->volume_id;
545         rap->VolumeBus = raid_act->volume_bus;
546         rap->PhysDiskNum = raid_act->phys_disk_num;
547         se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
548         if (mpt_page->vaddr != NULL && raid_act->len != 0) {
549                 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
550                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
551                 se->Address = htole32(mpt_page->paddr);
552                 MPI_pSGE_SET_LENGTH(se, le32toh(raid_act->len));
553                 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
554                     MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
555                     MPI_SGE_FLAGS_END_OF_LIST |
556                     (raid_act->write ? MPI_SGE_FLAGS_HOST_TO_IOC :
557                     MPI_SGE_FLAGS_IOC_TO_HOST)));
558         }
559         se->FlagsLength = htole32(se->FlagsLength);
560         rap->MsgContext = htole32(req->index | user_handler_id);
561
562         mpt_check_doorbell(mpt);
563         mpt_send_cmd(mpt, req);
564
565         error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE,
566             2000);
567         if (error != 0) {
568                 /*
569                  * Leave request so it can be cleaned up later.
570                  */
571                 mpt_prt(mpt, "mpt_user_raid_action timed out\n");
572                 return (error);
573         }
574
575         raid_act->ioc_status = htole16(req->IOCStatus);
576         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
577                 mpt_free_request(mpt, req);
578                 return (0);
579         }
580
581         res = (struct mpt_user_raid_action_result *)
582             (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
583         raid_act->volume_status = res->volume_status;
584         raid_act->action_status = res->action_status;
585         bcopy(res->action_data, raid_act->action_data,
586             sizeof(res->action_data));
587         if (mpt_page->vaddr != NULL)
588                 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
589                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
590         mpt_free_request(mpt, req);
591         return (0);
592 }
593
594 static int
595 mpt_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
596 {
597         struct mpt_softc *mpt;
598         struct mpt_cfg_page_req *page_req;
599         struct mpt_ext_cfg_page_req *ext_page_req;
600         struct mpt_raid_action *raid_act;
601         struct mpt_page_memory mpt_page;
602 #ifdef __amd64__
603         struct mpt_cfg_page_req32 *page_req32;
604         struct mpt_cfg_page_req page_req_swab;
605         struct mpt_ext_cfg_page_req32 *ext_page_req32;
606         struct mpt_ext_cfg_page_req ext_page_req_swab;
607         struct mpt_raid_action32 *raid_act32;
608         struct mpt_raid_action raid_act_swab;
609 #endif
610         int error;
611
612         mpt = dev->si_drv1;
613         page_req = (void *)arg;
614         ext_page_req = (void *)arg;
615         raid_act = (void *)arg;
616         mpt_page.vaddr = NULL;
617
618 #ifdef __amd64__
619         /* Convert 32-bit structs to native ones. */
620         page_req32 = (void *)arg;
621         ext_page_req32 = (void *)arg;
622         raid_act32 = (void *)arg;
623         switch (cmd) {
624         case MPTIO_READ_CFG_HEADER32:
625         case MPTIO_READ_CFG_PAGE32:
626         case MPTIO_WRITE_CFG_PAGE32:
627                 page_req = &page_req_swab;
628                 page_req->header = page_req32->header;
629                 page_req->page_address = page_req32->page_address;
630                 page_req->buf = PTRIN(page_req32->buf);
631                 page_req->len = page_req32->len;
632                 page_req->ioc_status = page_req32->ioc_status;
633                 break;
634         case MPTIO_READ_EXT_CFG_HEADER32:
635         case MPTIO_READ_EXT_CFG_PAGE32:
636                 ext_page_req = &ext_page_req_swab;
637                 ext_page_req->header = ext_page_req32->header;
638                 ext_page_req->page_address = ext_page_req32->page_address;
639                 ext_page_req->buf = PTRIN(ext_page_req32->buf);
640                 ext_page_req->len = ext_page_req32->len;
641                 ext_page_req->ioc_status = ext_page_req32->ioc_status;
642                 break;
643         case MPTIO_RAID_ACTION32:
644                 raid_act = &raid_act_swab;
645                 raid_act->action = raid_act32->action;
646                 raid_act->volume_bus = raid_act32->volume_bus;
647                 raid_act->volume_id = raid_act32->volume_id;
648                 raid_act->phys_disk_num = raid_act32->phys_disk_num;
649                 raid_act->action_data_word = raid_act32->action_data_word;
650                 raid_act->buf = PTRIN(raid_act32->buf);
651                 raid_act->len = raid_act32->len;
652                 raid_act->volume_status = raid_act32->volume_status;
653                 bcopy(raid_act32->action_data, raid_act->action_data,
654                     sizeof(raid_act->action_data));
655                 raid_act->action_status = raid_act32->action_status;
656                 raid_act->ioc_status = raid_act32->ioc_status;
657                 raid_act->write = raid_act32->write;
658                 break;
659         }
660 #endif
661
662         switch (cmd) {
663 #ifdef __amd64__
664         case MPTIO_READ_CFG_HEADER32:
665 #endif
666         case MPTIO_READ_CFG_HEADER:
667                 MPT_LOCK(mpt);
668                 error = mpt_user_read_cfg_header(mpt, page_req);
669                 MPT_UNLOCK(mpt);
670                 break;
671 #ifdef __amd64__
672         case MPTIO_READ_CFG_PAGE32:
673 #endif
674         case MPTIO_READ_CFG_PAGE:
675                 if (page_req->len < (int)sizeof(CONFIG_PAGE_HEADER)) {
676                         error = EINVAL;
677                         break;
678                 }
679                 error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
680                 if (error)
681                         break;
682                 error = copyin(page_req->buf, mpt_page.vaddr,
683                     sizeof(CONFIG_PAGE_HEADER));
684                 if (error)
685                         break;
686                 MPT_LOCK(mpt);
687                 error = mpt_user_read_cfg_page(mpt, page_req, &mpt_page);
688                 MPT_UNLOCK(mpt);
689                 if (error)
690                         break;
691                 error = copyout(mpt_page.vaddr, page_req->buf, page_req->len);
692                 break;
693 #ifdef __amd64__
694         case MPTIO_READ_EXT_CFG_HEADER32:
695 #endif
696         case MPTIO_READ_EXT_CFG_HEADER:
697                 MPT_LOCK(mpt);
698                 error = mpt_user_read_extcfg_header(mpt, ext_page_req);
699                 MPT_UNLOCK(mpt);
700                 break;
701 #ifdef __amd64__
702         case MPTIO_READ_EXT_CFG_PAGE32:
703 #endif
704         case MPTIO_READ_EXT_CFG_PAGE:
705                 if (ext_page_req->len <
706                     (int)sizeof(CONFIG_EXTENDED_PAGE_HEADER)) {
707                         error = EINVAL;
708                         break;
709                 }
710                 error = mpt_alloc_buffer(mpt, &mpt_page, ext_page_req->len);
711                 if (error)
712                         break;
713                 error = copyin(ext_page_req->buf, mpt_page.vaddr,
714                     sizeof(CONFIG_EXTENDED_PAGE_HEADER));
715                 if (error)
716                         break;
717                 MPT_LOCK(mpt);
718                 error = mpt_user_read_extcfg_page(mpt, ext_page_req, &mpt_page);
719                 MPT_UNLOCK(mpt);
720                 if (error)
721                         break;
722                 error = copyout(mpt_page.vaddr, ext_page_req->buf,
723                     ext_page_req->len);
724                 break;
725 #ifdef __amd64__
726         case MPTIO_WRITE_CFG_PAGE32:
727 #endif
728         case MPTIO_WRITE_CFG_PAGE:
729                 if (page_req->len < (int)sizeof(CONFIG_PAGE_HEADER)) {
730                         error = EINVAL;
731                         break;
732                 }
733                 error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
734                 if (error)
735                         break;
736                 error = copyin(page_req->buf, mpt_page.vaddr, page_req->len);
737                 if (error)
738                         break;
739                 MPT_LOCK(mpt);
740                 error = mpt_user_write_cfg_page(mpt, page_req, &mpt_page);
741                 MPT_UNLOCK(mpt);
742                 break;
743 #ifdef __amd64__
744         case MPTIO_RAID_ACTION32:
745 #endif
746         case MPTIO_RAID_ACTION:
747                 if (raid_act->buf != NULL) {
748                         error = mpt_alloc_buffer(mpt, &mpt_page, raid_act->len);
749                         if (error)
750                                 break;
751                         error = copyin(raid_act->buf, mpt_page.vaddr,
752                             raid_act->len);
753                         if (error)
754                                 break;
755                 }
756                 MPT_LOCK(mpt);
757                 error = mpt_user_raid_action(mpt, raid_act, &mpt_page);
758                 MPT_UNLOCK(mpt);
759                 if (error)
760                         break;
761                 if (raid_act->buf != NULL)
762                         error = copyout(mpt_page.vaddr, raid_act->buf,
763                             raid_act->len);
764                 break;
765         default:
766                 error = ENOIOCTL;
767                 break;
768         }
769
770         mpt_free_buffer(&mpt_page);
771
772         if (error)
773                 return (error);
774
775 #ifdef __amd64__
776         /* Convert native structs to 32-bit ones. */
777         switch (cmd) {
778         case MPTIO_READ_CFG_HEADER32:
779         case MPTIO_READ_CFG_PAGE32:
780         case MPTIO_WRITE_CFG_PAGE32:
781                 page_req32->header = page_req->header;
782                 page_req32->page_address = page_req->page_address;
783                 page_req32->buf = PTROUT(page_req->buf);
784                 page_req32->len = page_req->len;
785                 page_req32->ioc_status = page_req->ioc_status;
786                 break;
787         case MPTIO_READ_EXT_CFG_HEADER32:
788         case MPTIO_READ_EXT_CFG_PAGE32:         
789                 ext_page_req32->header = ext_page_req->header;
790                 ext_page_req32->page_address = ext_page_req->page_address;
791                 ext_page_req32->buf = PTROUT(ext_page_req->buf);
792                 ext_page_req32->len = ext_page_req->len;
793                 ext_page_req32->ioc_status = ext_page_req->ioc_status;
794                 break;
795         case MPTIO_RAID_ACTION32:
796                 raid_act32->action = raid_act->action;
797                 raid_act32->volume_bus = raid_act->volume_bus;
798                 raid_act32->volume_id = raid_act->volume_id;
799                 raid_act32->phys_disk_num = raid_act->phys_disk_num;
800                 raid_act32->action_data_word = raid_act->action_data_word;
801                 raid_act32->buf = PTROUT(raid_act->buf);
802                 raid_act32->len = raid_act->len;
803                 raid_act32->volume_status = raid_act->volume_status;
804                 bcopy(raid_act->action_data, raid_act32->action_data,
805                     sizeof(raid_act->action_data));
806                 raid_act32->action_status = raid_act->action_status;
807                 raid_act32->ioc_status = raid_act->ioc_status;
808                 raid_act32->write = raid_act->write;
809                 break;
810         }
811 #endif
812
813         return (0);
814 }