]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/smartpqi/smartpqi_ioctl.c
MFV r361938:
[FreeBSD/FreeBSD.git] / sys / dev / smartpqi / smartpqi_ioctl.c
1 /*-
2  * Copyright (c) 2018 Microsemi Corporation.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 /* $FreeBSD$ */
28
29 /*
30  * Management interface for smartpqi driver
31  */
32
33 #include "smartpqi_includes.h"
34
35 /*
36  * Wrapper function to copy to user from kernel
37  */
38 int os_copy_to_user(struct pqisrc_softstate *softs, void *dest_buf,
39                 void *src_buf, int size, int mode)
40 {
41         return(copyout(src_buf, dest_buf, size));
42 }
43
44 /*
45  * Wrapper function to copy from user to kernel
46  */
47 int os_copy_from_user(struct pqisrc_softstate *softs, void *dest_buf,
48                 void *src_buf, int size, int mode)
49 {
50         return(copyin(src_buf, dest_buf, size));
51 }
52
53 /*
54  * Device open function for ioctl entry 
55  */
56 static int smartpqi_open(struct cdev *cdev, int flags, int devtype,
57                 struct thread *td)
58 {
59         int error = PQI_STATUS_SUCCESS;
60         
61         return error;
62 }
63
64 /*
65  * Device close function for ioctl entry 
66  */
67 static int smartpqi_close(struct cdev *cdev, int flags, int devtype,
68                 struct thread *td)
69 {
70         int error = PQI_STATUS_SUCCESS;
71
72         return error;
73 }
74
75 /*
76  * ioctl for getting driver info
77  */
78 static void smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev *cdev)
79 {
80         struct pqisrc_softstate *softs = cdev->si_drv1;
81         pdriver_info driver_info = (pdriver_info)udata;
82
83         DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
84
85         driver_info->major_version = PQISRC_DRIVER_MAJOR;
86         driver_info->minor_version = PQISRC_DRIVER_MINOR;
87         driver_info->release_version = PQISRC_DRIVER_RELEASE;
88         driver_info->build_revision = PQISRC_DRIVER_REVISION;
89         driver_info->max_targets = PQI_MAX_DEVICES - 1;
90         driver_info->max_io = softs->max_io_for_scsi_ml;
91         driver_info->max_transfer_length = softs->pqi_cap.max_transfer_size;
92
93         DBG_FUNC("OUT\n");
94 }
95
96 /*
97  * ioctl for getting controller info
98  */
99 static void smartpqi_get_pci_info_ioctl(caddr_t udata, struct cdev *cdev)
100 {
101         struct pqisrc_softstate *softs = cdev->si_drv1;
102         device_t dev = softs->os_specific.pqi_dev;
103         pqi_pci_info_t *pci_info = (pqi_pci_info_t *)udata;
104         uint32_t sub_vendor = 0;
105         uint32_t sub_device = 0;
106         uint32_t vendor = 0;
107         uint32_t device = 0;
108
109         DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
110
111         pci_info->bus = pci_get_bus(dev);
112         pci_info->dev_fn = pci_get_function(dev);
113         pci_info->domain = pci_get_domain(dev);
114         sub_vendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
115         sub_device = pci_read_config(dev, PCIR_SUBDEV_0, 2);
116         pci_info->board_id = ((sub_device << 16) & 0xffff0000) | sub_vendor;
117         vendor = pci_get_vendor(dev);
118         device =  pci_get_device(dev);
119         pci_info->chip_id = ((device << 16) & 0xffff0000) | vendor;
120         DBG_FUNC("OUT\n");
121 }
122
123
124 /*
125  * ioctl entry point for user
126  */
127 static int smartpqi_ioctl(struct cdev *cdev, u_long cmd, caddr_t udata,
128                 int flags, struct thread *td)
129 {
130         int error = PQI_STATUS_SUCCESS;
131         struct pqisrc_softstate *softs = cdev->si_drv1;
132
133         DBG_FUNC("IN cmd = 0x%lx udata = %p cdev = %p\n", cmd, udata, cdev);
134
135         if (!udata) {
136                 DBG_ERR("udata is null !!\n");
137         }
138
139         if (pqisrc_ctrl_offline(softs)){
140                 DBG_ERR("Controller s offline !!\n");
141                 return ENOTTY;
142         }
143
144         switch (cmd) {
145                 case CCISS_GETDRIVVER:
146                         smartpqi_get_driver_info_ioctl(udata, cdev);
147                         break;
148                 case CCISS_GETPCIINFO:
149                         smartpqi_get_pci_info_ioctl(udata, cdev);
150                         break;
151                 case SMARTPQI_PASS_THRU:
152                 case CCISS_PASSTHRU:
153                         error = pqisrc_passthru_ioctl(softs, udata, 0);
154                         error = PQI_STATUS_SUCCESS;
155                         break;
156                 case CCISS_REGNEWD:
157                         error = pqisrc_scan_devices(softs);
158                         break;
159                 default:
160                         DBG_WARN( "!IOCTL cmd 0x%lx not supported", cmd);
161                         error = ENOTTY;
162                         break;
163         }
164
165         DBG_FUNC("OUT error = %d\n", error);
166         return error;
167 }
168
169 static struct cdevsw smartpqi_cdevsw =
170 {
171         .d_version = D_VERSION,
172         .d_open    = smartpqi_open,
173         .d_close   = smartpqi_close,
174         .d_ioctl   = smartpqi_ioctl,
175         .d_name    = "smartpqi",
176 };
177
178 /*
179  * Function to create device node for ioctl
180  */
181 int create_char_dev(struct pqisrc_softstate *softs, int card_index)
182 {
183         int error = PQI_STATUS_SUCCESS;
184
185         DBG_FUNC("IN idx = %d\n", card_index);
186
187         softs->os_specific.cdev = make_dev(&smartpqi_cdevsw, card_index,
188                                 UID_ROOT, GID_OPERATOR, 0640,
189                                 "smartpqi%u", card_index);
190         if(softs->os_specific.cdev) {
191                 softs->os_specific.cdev->si_drv1 = softs;
192         } else {
193                 error = PQI_STATUS_FAILURE;
194         }
195
196         DBG_FUNC("OUT error = %d\n", error);
197         return error;
198 }
199
200 /*
201  * Function to destroy device node for ioctl
202  */
203 void destroy_char_dev(struct pqisrc_softstate *softs)
204 {
205         DBG_FUNC("IN\n");
206         if (softs->os_specific.cdev) {
207                 destroy_dev(softs->os_specific.cdev);
208                 softs->os_specific.cdev = NULL;
209         }
210         DBG_FUNC("OUT\n");
211 }
212
213 /*
214  * Function used to send passthru commands to adapter
215  * to support management tools. For eg. ssacli, sscon.
216  */
217 int
218 pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
219 {
220         int ret = PQI_STATUS_SUCCESS;
221         char *drv_buf = NULL;
222         uint32_t tag = 0;
223         IOCTL_Command_struct *iocommand = (IOCTL_Command_struct *)arg;
224         dma_mem_t ioctl_dma_buf;
225         pqisrc_raid_req_t request;
226         raid_path_error_info_elem_t error_info;
227         ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
228         ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
229         rcb_t *rcb = NULL;
230
231         memset(&request, 0, sizeof(request));
232         memset(&error_info, 0, sizeof(error_info));
233                 
234         DBG_FUNC("IN");
235
236         if (pqisrc_ctrl_offline(softs))
237                 return PQI_STATUS_FAILURE;
238         
239         if (!arg)
240                 return (PQI_STATUS_FAILURE);
241
242         if (iocommand->buf_size < 1 && 
243                 iocommand->Request.Type.Direction != PQIIOCTL_NONE)
244                 return PQI_STATUS_FAILURE;
245         if (iocommand->Request.CDBLen > sizeof(request.cdb))
246                 return PQI_STATUS_FAILURE;
247
248         switch (iocommand->Request.Type.Direction) {
249                 case PQIIOCTL_NONE:
250                 case PQIIOCTL_WRITE:
251                 case PQIIOCTL_READ:
252                 case PQIIOCTL_BIDIRECTIONAL:
253                         break;
254                 default:
255                         return PQI_STATUS_FAILURE;
256         }
257
258         if (iocommand->buf_size > 0) {
259                 memset(&ioctl_dma_buf, 0, sizeof(struct dma_mem));
260                 ioctl_dma_buf.tag = "Ioctl_PassthruCmd_Buffer";
261                 ioctl_dma_buf.size = iocommand->buf_size;
262                 ioctl_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
263                 /* allocate memory */
264                 ret = os_dma_mem_alloc(softs, &ioctl_dma_buf);
265                 if (ret) {
266                         DBG_ERR("Failed to Allocate dma mem for Ioctl PassthruCmd Buffer : %d\n", ret);
267                         ret = PQI_STATUS_FAILURE;
268                         goto out;
269                 }
270                  
271                 DBG_INFO("ioctl_dma_buf.dma_addr  = %p\n",(void*)ioctl_dma_buf.dma_addr);
272                 DBG_INFO("ioctl_dma_buf.virt_addr = %p\n",(void*)ioctl_dma_buf.virt_addr);
273         
274                 drv_buf = (char *)ioctl_dma_buf.virt_addr;
275                 if (iocommand->Request.Type.Direction & PQIIOCTL_WRITE) {
276                         if ((ret = os_copy_from_user(softs, (void *)drv_buf, (void *)iocommand->buf, 
277                                                 iocommand->buf_size, mode)) != 0) { 
278                                 ret = PQI_STATUS_FAILURE;
279                                 goto free_mem;
280                         }
281                 }
282         }
283         
284         request.header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
285         request.header.iu_length = offsetof(pqisrc_raid_req_t, sg_descriptors[1]) - 
286                                                                         PQI_REQUEST_HEADER_LENGTH;
287         memcpy(request.lun_number, iocommand->LUN_info.LunAddrBytes, 
288                 sizeof(request.lun_number));
289         memcpy(request.cdb, iocommand->Request.CDB, iocommand->Request.CDBLen);
290         request.additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
291         
292         switch (iocommand->Request.Type.Direction) {
293         case PQIIOCTL_NONE:
294                 request.data_direction = SOP_DATA_DIR_NONE;
295                 break;
296         case PQIIOCTL_WRITE:
297                 request.data_direction = SOP_DATA_DIR_FROM_DEVICE;
298                 break;
299         case PQIIOCTL_READ:
300                 request.data_direction = SOP_DATA_DIR_TO_DEVICE;
301                 break;
302         case PQIIOCTL_BIDIRECTIONAL:
303                 request.data_direction = SOP_DATA_DIR_BIDIRECTIONAL;
304                 break;
305         }
306
307         request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
308         if (iocommand->buf_size > 0) {
309                 request.buffer_length = iocommand->buf_size;
310                 request.sg_descriptors[0].addr = ioctl_dma_buf.dma_addr;
311                 request.sg_descriptors[0].len = iocommand->buf_size;
312                 request.sg_descriptors[0].flags =  SG_FLAG_LAST;
313         }
314         tag = pqisrc_get_tag(&softs->taglist);
315         if (INVALID_ELEM == tag) {
316                 DBG_ERR("Tag not available\n");
317                 ret = PQI_STATUS_FAILURE;
318                 goto free_mem;
319         }
320         request.request_id = tag;
321         request.response_queue_id = ob_q->q_id;
322         request.error_index = request.request_id;
323         rcb = &softs->rcb[tag];
324
325         rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success;
326         rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error;
327         rcb->tag = tag;
328         rcb->req_pending = true;
329         /* Submit Command */
330         ret = pqisrc_submit_cmnd(softs, ib_q, &request);
331         if (ret != PQI_STATUS_SUCCESS) {
332                 DBG_ERR("Unable to submit command\n");
333                 goto err_out;
334         }
335
336         ret = pqisrc_wait_on_condition(softs, rcb);
337         if (ret != PQI_STATUS_SUCCESS) {
338                 DBG_ERR("Passthru IOCTL cmd timed out !!\n");
339                 goto err_out;
340         }
341
342         memset(&iocommand->error_info, 0, sizeof(iocommand->error_info));
343
344
345         if (rcb->status) {
346                 size_t sense_data_length;
347
348                 memcpy(&error_info, rcb->error_info, sizeof(error_info));
349                 iocommand->error_info.ScsiStatus = error_info.status;
350                 sense_data_length = error_info.sense_data_len;
351
352                 if (!sense_data_length)
353                         sense_data_length = error_info.resp_data_len;
354
355                 if (sense_data_length && 
356                         (sense_data_length > sizeof(error_info.data)))
357                                 sense_data_length = sizeof(error_info.data);
358
359                 if (sense_data_length) {
360                         if (sense_data_length >
361                                 sizeof(iocommand->error_info.SenseInfo))
362                                 sense_data_length =
363                                         sizeof(iocommand->error_info.SenseInfo);
364                         memcpy (iocommand->error_info.SenseInfo,
365                                         error_info.data, sense_data_length);
366                         iocommand->error_info.SenseLen = sense_data_length;
367                 }
368
369                 if (error_info.data_out_result == 
370                                 PQI_RAID_DATA_IN_OUT_UNDERFLOW){
371                         rcb->status = REQUEST_SUCCESS;
372                 }
373         }
374
375         if (rcb->status == REQUEST_SUCCESS && iocommand->buf_size > 0 && 
376                 (iocommand->Request.Type.Direction & PQIIOCTL_READ)) {
377
378                 if ((ret = os_copy_to_user(softs, (void*)iocommand->buf, 
379                         (void*)drv_buf, iocommand->buf_size, mode)) != 0) {
380                                 DBG_ERR("Failed to copy the response\n");       
381                                 goto err_out;
382                 }
383         }
384
385         os_reset_rcb(rcb); 
386         pqisrc_put_tag(&softs->taglist, request.request_id);
387         if (iocommand->buf_size > 0)
388                         os_dma_mem_free(softs,&ioctl_dma_buf);
389
390         DBG_FUNC("OUT\n");
391         return ret;
392 err_out:
393         os_reset_rcb(rcb); 
394         pqisrc_put_tag(&softs->taglist, request.request_id);
395
396 free_mem:
397         if (iocommand->buf_size > 0)
398                 os_dma_mem_free(softs, &ioctl_dma_buf);
399
400 out:
401         DBG_FUNC("Failed OUT\n");
402         return PQI_STATUS_FAILURE;
403 }