2 * Copyright (c) 2018 Microsemi Corporation.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Management interface for smartpqi driver
33 #include "smartpqi_includes.h"
36 * Wrapper function to copy to user from kernel
38 int os_copy_to_user(struct pqisrc_softstate *softs, void *dest_buf,
39 void *src_buf, int size, int mode)
41 return(copyout(src_buf, dest_buf, size));
45 * Wrapper function to copy from user to kernel
47 int os_copy_from_user(struct pqisrc_softstate *softs, void *dest_buf,
48 void *src_buf, int size, int mode)
50 return(copyin(src_buf, dest_buf, size));
54 * Device open function for ioctl entry
56 static int smartpqi_open(struct cdev *cdev, int flags, int devtype,
59 int error = PQI_STATUS_SUCCESS;
65 * Device close function for ioctl entry
67 static int smartpqi_close(struct cdev *cdev, int flags, int devtype,
70 int error = PQI_STATUS_SUCCESS;
76 * ioctl for getting driver info
78 static void smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev *cdev)
80 struct pqisrc_softstate *softs = cdev->si_drv1;
81 pdriver_info driver_info = (pdriver_info)udata;
83 DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
85 driver_info->major_version = PQISRC_DRIVER_MAJOR;
86 driver_info->minor_version = PQISRC_DRIVER_MINOR;
87 driver_info->release_version = PQISRC_DRIVER_RELEASE;
88 driver_info->build_revision = PQISRC_DRIVER_REVISION;
89 driver_info->max_targets = PQI_MAX_DEVICES - 1;
90 driver_info->max_io = softs->max_io_for_scsi_ml;
91 driver_info->max_transfer_length = softs->pqi_cap.max_transfer_size;
97 * ioctl for getting controller info
99 static void smartpqi_get_pci_info_ioctl(caddr_t udata, struct cdev *cdev)
101 struct pqisrc_softstate *softs = cdev->si_drv1;
102 device_t dev = softs->os_specific.pqi_dev;
103 pqi_pci_info_t *pci_info = (pqi_pci_info_t *)udata;
104 uint32_t sub_vendor = 0;
105 uint32_t sub_device = 0;
109 DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
111 pci_info->bus = pci_get_bus(dev);
112 pci_info->dev_fn = pci_get_function(dev);
113 pci_info->domain = pci_get_domain(dev);
114 sub_vendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
115 sub_device = pci_read_config(dev, PCIR_SUBDEV_0, 2);
116 pci_info->board_id = ((sub_device << 16) & 0xffff0000) | sub_vendor;
117 vendor = pci_get_vendor(dev);
118 device = pci_get_device(dev);
119 pci_info->chip_id = ((device << 16) & 0xffff0000) | vendor;
125 * ioctl entry point for user
127 static int smartpqi_ioctl(struct cdev *cdev, u_long cmd, caddr_t udata,
128 int flags, struct thread *td)
130 int error = PQI_STATUS_SUCCESS;
131 struct pqisrc_softstate *softs = cdev->si_drv1;
133 DBG_FUNC("IN cmd = 0x%lx udata = %p cdev = %p\n", cmd, udata, cdev);
136 DBG_ERR("udata is null !!\n");
139 if (pqisrc_ctrl_offline(softs)){
140 DBG_ERR("Controller s offline !!\n");
145 case CCISS_GETDRIVVER:
146 smartpqi_get_driver_info_ioctl(udata, cdev);
148 case CCISS_GETPCIINFO:
149 smartpqi_get_pci_info_ioctl(udata, cdev);
151 case SMARTPQI_PASS_THRU:
153 error = pqisrc_passthru_ioctl(softs, udata, 0);
154 error = PQI_STATUS_SUCCESS;
157 error = pqisrc_scan_devices(softs);
160 DBG_WARN( "!IOCTL cmd 0x%lx not supported", cmd);
165 DBG_FUNC("OUT error = %d\n", error);
169 static struct cdevsw smartpqi_cdevsw =
171 .d_version = D_VERSION,
172 .d_open = smartpqi_open,
173 .d_close = smartpqi_close,
174 .d_ioctl = smartpqi_ioctl,
175 .d_name = "smartpqi",
179 * Function to create device node for ioctl
181 int create_char_dev(struct pqisrc_softstate *softs, int card_index)
183 int error = PQI_STATUS_SUCCESS;
185 DBG_FUNC("IN idx = %d\n", card_index);
187 softs->os_specific.cdev = make_dev(&smartpqi_cdevsw, card_index,
188 UID_ROOT, GID_OPERATOR, 0640,
189 "smartpqi%u", card_index);
190 if(softs->os_specific.cdev) {
191 softs->os_specific.cdev->si_drv1 = softs;
193 error = PQI_STATUS_FAILURE;
196 DBG_FUNC("OUT error = %d\n", error);
201 * Function to destroy device node for ioctl
203 void destroy_char_dev(struct pqisrc_softstate *softs)
206 if (softs->os_specific.cdev) {
207 destroy_dev(softs->os_specific.cdev);
208 softs->os_specific.cdev = NULL;
214 * Function used to send passthru commands to adapter
215 * to support management tools. For eg. ssacli, sscon.
218 pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
220 int ret = PQI_STATUS_SUCCESS;
221 char *drv_buf = NULL;
223 IOCTL_Command_struct *iocommand = (IOCTL_Command_struct *)arg;
224 dma_mem_t ioctl_dma_buf;
225 pqisrc_raid_req_t request;
226 raid_path_error_info_elem_t error_info;
227 ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
228 ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
231 memset(&request, 0, sizeof(request));
232 memset(&error_info, 0, sizeof(error_info));
236 if (pqisrc_ctrl_offline(softs))
237 return PQI_STATUS_FAILURE;
240 return (PQI_STATUS_FAILURE);
242 if (iocommand->buf_size < 1 &&
243 iocommand->Request.Type.Direction != PQIIOCTL_NONE)
244 return PQI_STATUS_FAILURE;
245 if (iocommand->Request.CDBLen > sizeof(request.cdb))
246 return PQI_STATUS_FAILURE;
248 switch (iocommand->Request.Type.Direction) {
252 case PQIIOCTL_BIDIRECTIONAL:
255 return PQI_STATUS_FAILURE;
258 if (iocommand->buf_size > 0) {
259 memset(&ioctl_dma_buf, 0, sizeof(struct dma_mem));
260 ioctl_dma_buf.tag = "Ioctl_PassthruCmd_Buffer";
261 ioctl_dma_buf.size = iocommand->buf_size;
262 ioctl_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
263 /* allocate memory */
264 ret = os_dma_mem_alloc(softs, &ioctl_dma_buf);
266 DBG_ERR("Failed to Allocate dma mem for Ioctl PassthruCmd Buffer : %d\n", ret);
267 ret = PQI_STATUS_FAILURE;
271 DBG_INFO("ioctl_dma_buf.dma_addr = %p\n",(void*)ioctl_dma_buf.dma_addr);
272 DBG_INFO("ioctl_dma_buf.virt_addr = %p\n",(void*)ioctl_dma_buf.virt_addr);
274 drv_buf = (char *)ioctl_dma_buf.virt_addr;
275 if (iocommand->Request.Type.Direction & PQIIOCTL_WRITE) {
276 if ((ret = os_copy_from_user(softs, (void *)drv_buf, (void *)iocommand->buf,
277 iocommand->buf_size, mode)) != 0) {
278 ret = PQI_STATUS_FAILURE;
284 request.header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
285 request.header.iu_length = offsetof(pqisrc_raid_req_t, sg_descriptors[1]) -
286 PQI_REQUEST_HEADER_LENGTH;
287 memcpy(request.lun_number, iocommand->LUN_info.LunAddrBytes,
288 sizeof(request.lun_number));
289 memcpy(request.cdb, iocommand->Request.CDB, iocommand->Request.CDBLen);
290 request.additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
292 switch (iocommand->Request.Type.Direction) {
294 request.data_direction = SOP_DATA_DIR_NONE;
297 request.data_direction = SOP_DATA_DIR_FROM_DEVICE;
300 request.data_direction = SOP_DATA_DIR_TO_DEVICE;
302 case PQIIOCTL_BIDIRECTIONAL:
303 request.data_direction = SOP_DATA_DIR_BIDIRECTIONAL;
307 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
308 if (iocommand->buf_size > 0) {
309 request.buffer_length = iocommand->buf_size;
310 request.sg_descriptors[0].addr = ioctl_dma_buf.dma_addr;
311 request.sg_descriptors[0].len = iocommand->buf_size;
312 request.sg_descriptors[0].flags = SG_FLAG_LAST;
314 tag = pqisrc_get_tag(&softs->taglist);
315 if (INVALID_ELEM == tag) {
316 DBG_ERR("Tag not available\n");
317 ret = PQI_STATUS_FAILURE;
320 request.request_id = tag;
321 request.response_queue_id = ob_q->q_id;
322 request.error_index = request.request_id;
323 rcb = &softs->rcb[tag];
325 rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success;
326 rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error;
328 rcb->req_pending = true;
330 ret = pqisrc_submit_cmnd(softs, ib_q, &request);
331 if (ret != PQI_STATUS_SUCCESS) {
332 DBG_ERR("Unable to submit command\n");
336 ret = pqisrc_wait_on_condition(softs, rcb);
337 if (ret != PQI_STATUS_SUCCESS) {
338 DBG_ERR("Passthru IOCTL cmd timed out !!\n");
342 memset(&iocommand->error_info, 0, sizeof(iocommand->error_info));
346 size_t sense_data_length;
348 memcpy(&error_info, rcb->error_info, sizeof(error_info));
349 iocommand->error_info.ScsiStatus = error_info.status;
350 sense_data_length = error_info.sense_data_len;
352 if (!sense_data_length)
353 sense_data_length = error_info.resp_data_len;
355 if (sense_data_length &&
356 (sense_data_length > sizeof(error_info.data)))
357 sense_data_length = sizeof(error_info.data);
359 if (sense_data_length) {
360 if (sense_data_length >
361 sizeof(iocommand->error_info.SenseInfo))
363 sizeof(iocommand->error_info.SenseInfo);
364 memcpy (iocommand->error_info.SenseInfo,
365 error_info.data, sense_data_length);
366 iocommand->error_info.SenseLen = sense_data_length;
369 if (error_info.data_out_result ==
370 PQI_RAID_DATA_IN_OUT_UNDERFLOW){
371 rcb->status = REQUEST_SUCCESS;
375 if (rcb->status == REQUEST_SUCCESS && iocommand->buf_size > 0 &&
376 (iocommand->Request.Type.Direction & PQIIOCTL_READ)) {
378 if ((ret = os_copy_to_user(softs, (void*)iocommand->buf,
379 (void*)drv_buf, iocommand->buf_size, mode)) != 0) {
380 DBG_ERR("Failed to copy the response\n");
386 pqisrc_put_tag(&softs->taglist, request.request_id);
387 if (iocommand->buf_size > 0)
388 os_dma_mem_free(softs,&ioctl_dma_buf);
394 pqisrc_put_tag(&softs->taglist, request.request_id);
397 if (iocommand->buf_size > 0)
398 os_dma_mem_free(softs, &ioctl_dma_buf);
401 DBG_FUNC("Failed OUT\n");
402 return PQI_STATUS_FAILURE;