]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/smartpqi/smartpqi_ioctl.c
Update capsicum-test to git commit f4d97414d48b8f8356b971ab9f45dc5c70d53c40
[FreeBSD/FreeBSD.git] / sys / dev / smartpqi / smartpqi_ioctl.c
1 /*-
2  * Copyright (c) 2018 Microsemi Corporation.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 /* $FreeBSD$ */
28
29 /*
30  * Management interface for smartpqi driver
31  */
32
33 #include "smartpqi_includes.h"
34
35 /*
36  * Wrapper function to copy to user from kernel
37  */
38 int os_copy_to_user(struct pqisrc_softstate *softs, void *dest_buf,
39                 void *src_buf, int size, int mode)
40 {
41         return(copyout(src_buf, dest_buf, size));
42 }
43
44 /*
45  * Wrapper function to copy from user to kernel
46  */
47 int os_copy_from_user(struct pqisrc_softstate *softs, void *dest_buf,
48                 void *src_buf, int size, int mode)
49 {
50         return(copyin(src_buf, dest_buf, size));
51 }
52
53 /*
54  * Device open function for ioctl entry 
55  */
56 static int smartpqi_open(struct cdev *cdev, int flags, int devtype,
57                 struct thread *td)
58 {
59         int error = PQI_STATUS_SUCCESS;
60
61         return error;
62 }
63
64 /*
65  * Device close function for ioctl entry 
66  */
67 static int smartpqi_close(struct cdev *cdev, int flags, int devtype,
68                 struct thread *td)
69 {
70         int error = PQI_STATUS_SUCCESS;
71
72         return error;
73 }
74
75 /*
76  * ioctl for getting driver info
77  */
78 static void smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev *cdev)
79 {
80         struct pqisrc_softstate *softs = cdev->si_drv1;
81         pdriver_info driver_info = (pdriver_info)udata;
82
83         DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
84
85         driver_info->major_version = PQISRC_DRIVER_MAJOR;
86         driver_info->minor_version = PQISRC_DRIVER_MINOR;
87         driver_info->release_version = PQISRC_DRIVER_RELEASE;
88         driver_info->build_revision = PQISRC_DRIVER_REVISION;
89         driver_info->max_targets = PQI_MAX_DEVICES - 1;
90         driver_info->max_io = softs->max_io_for_scsi_ml;
91         driver_info->max_transfer_length = softs->pqi_cap.max_transfer_size;
92
93         DBG_FUNC("OUT\n");
94 }
95
96 /*
97  * ioctl for getting controller info
98  */
99 static void smartpqi_get_pci_info_ioctl(caddr_t udata, struct cdev *cdev)
100 {
101         struct pqisrc_softstate *softs = cdev->si_drv1;
102         device_t dev = softs->os_specific.pqi_dev;
103         pqi_pci_info_t *pci_info = (pqi_pci_info_t *)udata;
104         uint32_t sub_vendor = 0;
105         uint32_t sub_device = 0;
106         uint32_t vendor = 0;
107         uint32_t device = 0;
108
109         DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
110
111         pci_info->bus = pci_get_bus(dev);
112         pci_info->dev_fn = pci_get_function(dev);
113         pci_info->domain = pci_get_domain(dev);
114         sub_vendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
115         sub_device = pci_read_config(dev, PCIR_SUBDEV_0, 2);
116         pci_info->board_id = ((sub_device << 16) & 0xffff0000) | sub_vendor;
117         vendor = pci_get_vendor(dev);
118         device =  pci_get_device(dev);
119         pci_info->chip_id = ((device << 16) & 0xffff0000) | vendor;
120         DBG_FUNC("OUT\n");
121 }
122
123 /*
124  * ioctl entry point for user
125  */
126 static int smartpqi_ioctl(struct cdev *cdev, u_long cmd, caddr_t udata,
127                 int flags, struct thread *td)
128 {
129         int error = PQI_STATUS_SUCCESS;
130         struct pqisrc_softstate *softs = cdev->si_drv1;
131
132         DBG_FUNC("IN cmd = 0x%lx udata = %p cdev = %p\n", cmd, udata, cdev);
133
134         if (!udata) {
135                 DBG_ERR("udata is null !!\n");
136         }
137
138         if (pqisrc_ctrl_offline(softs)){
139                 DBG_ERR("Controller s offline !!\n");
140                 return ENOTTY;
141         }
142
143         switch (cmd) {
144                 case CCISS_GETDRIVVER:
145                         smartpqi_get_driver_info_ioctl(udata, cdev);
146                         break;
147                 case CCISS_GETPCIINFO:
148                         smartpqi_get_pci_info_ioctl(udata, cdev);
149                         break;
150                 case SMARTPQI_PASS_THRU:
151                 case CCISS_PASSTHRU:
152                         error = pqisrc_passthru_ioctl(softs, udata, 0);
153                         error = PQI_STATUS_SUCCESS;
154                         break;
155                 case CCISS_REGNEWD:
156                         error = pqisrc_scan_devices(softs);
157                         break;
158                 default:
159                         DBG_WARN( "!IOCTL cmd 0x%lx not supported", cmd);
160                         error = ENOTTY;
161                         break;
162         }
163
164         DBG_FUNC("OUT error = %d\n", error);
165         return error;
166 }
167
168 static struct cdevsw smartpqi_cdevsw =
169 {
170         .d_version = D_VERSION,
171         .d_open    = smartpqi_open,
172         .d_close   = smartpqi_close,
173         .d_ioctl   = smartpqi_ioctl,
174         .d_name    = "smartpqi",
175 };
176
177 /*
178  * Function to create device node for ioctl
179  */
180 int create_char_dev(struct pqisrc_softstate *softs, int card_index)
181 {
182         int error = PQI_STATUS_SUCCESS;
183
184         DBG_FUNC("IN idx = %d\n", card_index);
185
186         softs->os_specific.cdev = make_dev(&smartpqi_cdevsw, card_index,
187                                 UID_ROOT, GID_OPERATOR, 0640,
188                                 "smartpqi%u", card_index);
189         if(softs->os_specific.cdev) {
190                 softs->os_specific.cdev->si_drv1 = softs;
191         } else {
192                 error = PQI_STATUS_FAILURE;
193         }
194
195         DBG_FUNC("OUT error = %d\n", error);
196         return error;
197 }
198
199 /*
200  * Function to destroy device node for ioctl
201  */
202 void destroy_char_dev(struct pqisrc_softstate *softs)
203 {
204         DBG_FUNC("IN\n");
205         if (softs->os_specific.cdev) {
206                 destroy_dev(softs->os_specific.cdev);
207                 softs->os_specific.cdev = NULL;
208         }
209         DBG_FUNC("OUT\n");
210 }
211
212 /*
213  * Function used to send passthru commands to adapter
214  * to support management tools. For eg. ssacli, sscon.
215  */
216 int
217 pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
218 {
219         int ret = PQI_STATUS_SUCCESS;
220         char *drv_buf = NULL;
221         uint32_t tag = 0;
222         IOCTL_Command_struct *iocommand = (IOCTL_Command_struct *)arg;
223         dma_mem_t ioctl_dma_buf;
224         pqisrc_raid_req_t request;
225         raid_path_error_info_elem_t error_info;
226         ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
227         ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
228         rcb_t *rcb = NULL;
229
230         memset(&request, 0, sizeof(request));
231         memset(&error_info, 0, sizeof(error_info));
232                 
233         DBG_FUNC("IN");
234
235         if (pqisrc_ctrl_offline(softs))
236                 return PQI_STATUS_FAILURE;
237
238         if (!arg)
239                 return (PQI_STATUS_FAILURE);
240
241         if (iocommand->buf_size < 1 && 
242                 iocommand->Request.Type.Direction != PQIIOCTL_NONE)
243                 return PQI_STATUS_FAILURE;
244         if (iocommand->Request.CDBLen > sizeof(request.cdb))
245                 return PQI_STATUS_FAILURE;
246
247         switch (iocommand->Request.Type.Direction) {
248                 case PQIIOCTL_NONE:
249                 case PQIIOCTL_WRITE:
250                 case PQIIOCTL_READ:
251                 case PQIIOCTL_BIDIRECTIONAL:
252                         break;
253                 default:
254                         return PQI_STATUS_FAILURE;
255         }
256
257         if (iocommand->buf_size > 0) {
258                 memset(&ioctl_dma_buf, 0, sizeof(struct dma_mem));
259                 ioctl_dma_buf.tag = "Ioctl_PassthruCmd_Buffer";
260                 ioctl_dma_buf.size = iocommand->buf_size;
261                 ioctl_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
262                 /* allocate memory */
263                 ret = os_dma_mem_alloc(softs, &ioctl_dma_buf);
264                 if (ret) {
265                         DBG_ERR("Failed to Allocate dma mem for Ioctl PassthruCmd Buffer : %d\n", ret);
266                         ret = PQI_STATUS_FAILURE;
267                         goto out;
268                 }
269                  
270                 DBG_INFO("ioctl_dma_buf.dma_addr  = %p\n",(void*)ioctl_dma_buf.dma_addr);
271                 DBG_INFO("ioctl_dma_buf.virt_addr = %p\n",(void*)ioctl_dma_buf.virt_addr);
272
273                 drv_buf = (char *)ioctl_dma_buf.virt_addr;
274                 if (iocommand->Request.Type.Direction & PQIIOCTL_WRITE) {
275                         if ((ret = os_copy_from_user(softs, (void *)drv_buf, (void *)iocommand->buf, 
276                                                 iocommand->buf_size, mode)) != 0) { 
277                                 ret = PQI_STATUS_FAILURE;
278                                 goto free_mem;
279                         }
280                 }
281         }
282
283         request.header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
284         request.header.iu_length = offsetof(pqisrc_raid_req_t, sg_descriptors[1]) - 
285                                                                         PQI_REQUEST_HEADER_LENGTH;
286         memcpy(request.lun_number, iocommand->LUN_info.LunAddrBytes, 
287                 sizeof(request.lun_number));
288         memcpy(request.cdb, iocommand->Request.CDB, iocommand->Request.CDBLen);
289         request.additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
290
291         switch (iocommand->Request.Type.Direction) {
292         case PQIIOCTL_NONE:
293                 request.data_direction = SOP_DATA_DIR_NONE;
294                 break;
295         case PQIIOCTL_WRITE:
296                 request.data_direction = SOP_DATA_DIR_FROM_DEVICE;
297                 break;
298         case PQIIOCTL_READ:
299                 request.data_direction = SOP_DATA_DIR_TO_DEVICE;
300                 break;
301         case PQIIOCTL_BIDIRECTIONAL:
302                 request.data_direction = SOP_DATA_DIR_BIDIRECTIONAL;
303                 break;
304         }
305
306         request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
307         if (iocommand->buf_size > 0) {
308                 request.buffer_length = iocommand->buf_size;
309                 request.sg_descriptors[0].addr = ioctl_dma_buf.dma_addr;
310                 request.sg_descriptors[0].len = iocommand->buf_size;
311                 request.sg_descriptors[0].flags =  SG_FLAG_LAST;
312         }
313         tag = pqisrc_get_tag(&softs->taglist);
314         if (INVALID_ELEM == tag) {
315                 DBG_ERR("Tag not available\n");
316                 ret = PQI_STATUS_FAILURE;
317                 goto free_mem;
318         }
319         request.request_id = tag;
320         request.response_queue_id = ob_q->q_id;
321         request.error_index = request.request_id;
322         rcb = &softs->rcb[tag];
323
324         rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success;
325         rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error;
326         rcb->tag = tag;
327         rcb->req_pending = true;
328         /* Submit Command */
329         ret = pqisrc_submit_cmnd(softs, ib_q, &request);
330         if (ret != PQI_STATUS_SUCCESS) {
331                 DBG_ERR("Unable to submit command\n");
332                 goto err_out;
333         }
334
335         ret = pqisrc_wait_on_condition(softs, rcb);
336         if (ret != PQI_STATUS_SUCCESS) {
337                 DBG_ERR("Passthru IOCTL cmd timed out !!\n");
338                 goto err_out;
339         }
340
341         memset(&iocommand->error_info, 0, sizeof(iocommand->error_info));
342
343         if (rcb->status) {
344                 size_t sense_data_length;
345
346                 memcpy(&error_info, rcb->error_info, sizeof(error_info));
347                 iocommand->error_info.ScsiStatus = error_info.status;
348                 sense_data_length = error_info.sense_data_len;
349
350                 if (!sense_data_length)
351                         sense_data_length = error_info.resp_data_len;
352
353                 if (sense_data_length && 
354                         (sense_data_length > sizeof(error_info.data)))
355                                 sense_data_length = sizeof(error_info.data);
356
357                 if (sense_data_length) {
358                         if (sense_data_length >
359                                 sizeof(iocommand->error_info.SenseInfo))
360                                 sense_data_length =
361                                         sizeof(iocommand->error_info.SenseInfo);
362                         memcpy (iocommand->error_info.SenseInfo,
363                                         error_info.data, sense_data_length);
364                         iocommand->error_info.SenseLen = sense_data_length;
365                 }
366
367                 if (error_info.data_out_result == 
368                                 PQI_RAID_DATA_IN_OUT_UNDERFLOW){
369                         rcb->status = REQUEST_SUCCESS;
370                 }
371         }
372
373         if (rcb->status == REQUEST_SUCCESS && iocommand->buf_size > 0 && 
374                 (iocommand->Request.Type.Direction & PQIIOCTL_READ)) {
375                 if ((ret = os_copy_to_user(softs, (void*)iocommand->buf, 
376                         (void*)drv_buf, iocommand->buf_size, mode)) != 0) {
377                                 DBG_ERR("Failed to copy the response\n");       
378                                 goto err_out;
379                 }
380         }
381
382         os_reset_rcb(rcb); 
383         pqisrc_put_tag(&softs->taglist, request.request_id);
384         if (iocommand->buf_size > 0)
385                         os_dma_mem_free(softs,&ioctl_dma_buf);
386
387         DBG_FUNC("OUT\n");
388         return ret;
389 err_out:
390         os_reset_rcb(rcb); 
391         pqisrc_put_tag(&softs->taglist, request.request_id);
392
393 free_mem:
394         if (iocommand->buf_size > 0)
395                 os_dma_mem_free(softs, &ioctl_dma_buf);
396
397 out:
398         DBG_FUNC("Failed OUT\n");
399         return PQI_STATUS_FAILURE;
400 }