2 * Copyright (c) 2018 Microsemi Corporation.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * CAM interface for smartpqi driver
32 #include "smartpqi_includes.h"
35 * Set cam sim properties of the smartpqi adapter.
37 static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
40 pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
45 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
47 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
49 cpi->max_lun = PQI_MAX_MULTILUN;
50 cpi->max_target = 1088;
51 cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE;
52 cpi->initiator_id = 255;
53 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
54 strncpy(cpi->hba_vid, "Microsemi", HBA_IDLEN);
55 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
56 cpi->unit_number = cam_sim_unit(sim);
57 cpi->bus_id = cam_sim_bus(sim);
58 cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */
59 cpi->protocol = PROTO_SCSI;
60 cpi->protocol_version = SCSI_REV_SPC4;
61 cpi->transport = XPORT_SPI;
62 cpi->transport_version = 2;
63 cpi->ccb_h.status = CAM_REQ_CMP;
69 * Get transport settings of the smartpqi adapter
71 static void get_transport_settings(struct pqisrc_softstate *softs,
72 struct ccb_trans_settings *cts)
74 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
75 struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas;
76 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
80 cts->protocol = PROTO_SCSI;
81 cts->protocol_version = SCSI_REV_SPC4;
82 cts->transport = XPORT_SPI;
83 cts->transport_version = 2;
84 spi->valid = CTS_SPI_VALID_DISC;
85 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
86 scsi->valid = CTS_SCSI_VALID_TQ;
87 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
88 sas->valid = CTS_SAS_VALID_SPEED;
89 cts->ccb_h.status = CAM_REQ_CMP;
95 * Add the target to CAM layer and rescan, when a new device is found
97 void os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) {
102 if(softs->os_specific.sim_registered) {
103 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) {
104 DBG_ERR("rescan failed (can't allocate CCB)\n");
108 if (xpt_create_path(&ccb->ccb_h.path, NULL,
109 cam_sim_path(softs->os_specific.sim),
110 device->target, device->lun) != CAM_REQ_CMP) {
111 DBG_ERR("rescan failed (can't create path)\n");
122 * Remove the device from CAM layer when deleted or hot removed
124 void os_remove_device(pqisrc_softstate_t *softs,
125 pqi_scsi_dev_t *device) {
126 struct cam_path *tmppath;
130 if(softs->os_specific.sim_registered) {
131 if (xpt_create_path(&tmppath, NULL,
132 cam_sim_path(softs->os_specific.sim),
133 device->target, device->lun) != CAM_REQ_CMP) {
134 DBG_ERR("unable to create path for async event");
137 xpt_async(AC_LOST_DEVICE, tmppath, NULL);
138 xpt_free_path(tmppath);
139 pqisrc_free_device(softs, device);
147 * Function to release the frozen simq
149 static void pqi_release_camq( rcb_t *rcb )
151 pqisrc_softstate_t *softs;
152 struct ccb_scsiio *csio;
154 csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
159 if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) {
160 softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY;
161 if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
162 xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
164 csio->ccb_h.status |= CAM_RELEASE_SIMQ;
171 * Function to dma-unmap the completed request
173 static void pqi_unmap_request(void *arg)
175 pqisrc_softstate_t *softs;
178 DBG_IO("IN rcb = %p\n", arg);
183 if (!(rcb->cm_flags & PQI_CMD_MAPPED))
186 if (rcb->bcount != 0 ) {
187 if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
188 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
190 BUS_DMASYNC_POSTREAD);
191 if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
192 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
194 BUS_DMASYNC_POSTWRITE);
195 bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat,
198 rcb->cm_flags &= ~PQI_CMD_MAPPED;
200 if(rcb->sgt && rcb->nseg)
201 os_mem_free(rcb->softs, (void*)rcb->sgt,
202 rcb->nseg*sizeof(sgt_t));
204 pqisrc_put_tag(&softs->taglist, rcb->tag);
210 * Construct meaningful LD name for volume here.
213 smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio)
215 struct scsi_inquiry_data *inq = NULL;
217 pqi_scsi_dev_t *device = NULL;
221 cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ?
222 (uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes;
223 if(cdb[0] == INQUIRY &&
224 (cdb[1] & SI_EVPD) == 0 &&
225 (csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN &&
226 csio->dxfer_len >= SHORT_INQUIRY_LENGTH) {
228 inq = (struct scsi_inquiry_data *)csio->data_ptr;
230 device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun];
232 /* Let the disks be probed and dealt with via CAM. Only for LD
233 let it fall through and inquiry be tweaked */
234 if( !device || !pqisrc_is_logical_device(device) ||
235 (device->devtype != DISK_DEVICE) ||
236 pqisrc_is_external_raid_device(device)) {
240 strncpy(inq->vendor, "MSCC",
242 strncpy(inq->product,
243 pqisrc_raidlevel_to_string(device->raid_level),
245 strncpy(inq->revision, device->volume_offline?"OFF":"OK",
253 * Handle completion of a command - pass results back through the CCB
256 os_io_response_success(rcb_t *rcb)
258 struct ccb_scsiio *csio;
260 DBG_IO("IN rcb = %p\n", rcb);
263 panic("rcb is null");
265 csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
268 panic("csio is null");
270 rcb->status = REQUEST_SUCCESS;
271 csio->ccb_h.status = CAM_REQ_CMP;
273 smartpqi_fix_ld_inquiry(rcb->softs, csio);
274 pqi_release_camq(rcb);
275 pqi_unmap_request(rcb);
276 xpt_done((union ccb *)csio);
282 * Error response handling for raid IO
284 void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
286 struct ccb_scsiio *csio;
287 pqisrc_softstate_t *softs;
291 csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
294 panic("csio is null");
298 ASSERT(err_info != NULL);
299 csio->scsi_status = err_info->status;
300 csio->ccb_h.status = CAM_REQ_CMP_ERR;
302 if (csio->ccb_h.func_code == XPT_SCSI_IO) {
304 * Handle specific SCSI status values.
306 switch(csio->scsi_status) {
307 case PQI_RAID_STATUS_QUEUE_FULL:
308 csio->ccb_h.status = CAM_REQ_CMP;
309 DBG_ERR("Queue Full error");
311 /* check condition, sense data included */
312 case PQI_RAID_STATUS_CHECK_CONDITION:
314 uint16_t sense_data_len =
315 LE_16(err_info->sense_data_len);
316 uint8_t *sense_data = NULL;
318 sense_data = err_info->data;
319 memset(&csio->sense_data, 0, csio->sense_len);
320 sense_data_len = (sense_data_len >
325 memcpy(&csio->sense_data, sense_data,
327 if (csio->sense_len > sense_data_len)
328 csio->sense_resid = csio->sense_len
331 csio->sense_resid = 0;
332 csio->ccb_h.status = CAM_SCSI_STATUS_ERROR
339 case PQI_RAID_DATA_IN_OUT_UNDERFLOW:
342 resid = rcb->bcount-err_info->data_out_transferred;
344 csio->ccb_h.status = CAM_REQ_CMP;
348 csio->ccb_h.status = CAM_REQ_CMP;
353 if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) {
354 softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY;
355 if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
356 xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
358 csio->ccb_h.status |= CAM_RELEASE_SIMQ;
361 pqi_unmap_request(rcb);
362 xpt_done((union ccb *)csio);
369 * Error response handling for aio.
371 void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
373 struct ccb_scsiio *csio;
374 pqisrc_softstate_t *softs;
379 panic("rcb is null");
381 rcb->status = REQUEST_SUCCESS;
382 csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
384 panic("csio is null");
388 switch (err_info->service_resp) {
389 case PQI_AIO_SERV_RESPONSE_COMPLETE:
390 csio->ccb_h.status = err_info->status;
392 case PQI_AIO_SERV_RESPONSE_FAILURE:
393 switch(err_info->status) {
394 case PQI_AIO_STATUS_IO_ABORTED:
395 csio->ccb_h.status = CAM_REQ_ABORTED;
396 DBG_WARN_BTL(rcb->dvp, "IO aborted\n");
398 case PQI_AIO_STATUS_UNDERRUN:
399 csio->ccb_h.status = CAM_REQ_CMP;
401 LE_32(err_info->resd_count);
403 case PQI_AIO_STATUS_OVERRUN:
404 csio->ccb_h.status = CAM_REQ_CMP;
406 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
407 DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n");
408 rcb->dvp->offload_enabled = false;
409 csio->ccb_h.status |= CAM_REQUEUE_REQ;
411 case PQI_AIO_STATUS_IO_ERROR:
412 case PQI_AIO_STATUS_IO_NO_DEVICE:
413 case PQI_AIO_STATUS_INVALID_DEVICE:
415 DBG_WARN_BTL(rcb->dvp,"IO Error/Invalid/No device\n");
416 csio->ccb_h.status |=
417 CAM_SCSI_STATUS_ERROR;
421 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
422 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
423 csio->ccb_h.status = CAM_REQ_CMP;
425 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
426 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
427 DBG_WARN_BTL(rcb->dvp,"TMF rejected/Incorrect Lun\n");
428 csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
431 DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n");
432 csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
435 if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) {
436 csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION;
437 uint8_t *sense_data = NULL;
438 unsigned sense_data_len = LE_16(err_info->data_len);
440 sense_data = err_info->data;
441 DBG_ERR_BTL(rcb->dvp, "SCSI_STATUS_CHECK_COND sense size %u\n",
443 memset(&csio->sense_data, 0, csio->sense_len);
445 memcpy(&csio->sense_data, sense_data, ((sense_data_len >
446 csio->sense_len) ? csio->sense_len : sense_data_len));
447 if (csio->sense_len > sense_data_len)
448 csio->sense_resid = csio->sense_len - sense_data_len;
450 csio->sense_resid = 0;
451 csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
454 smartpqi_fix_ld_inquiry(softs, csio);
455 pqi_release_camq(rcb);
456 pqi_unmap_request(rcb);
457 xpt_done((union ccb *)csio);
462 * Command-mapping helper function - populate this command's s/g table.
465 pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
467 pqisrc_softstate_t *softs;
473 if( error || nseg > softs->pqi_cap.max_sg_elem )
475 xpt_freeze_simq(softs->os_specific.sim, 1);
476 rcb->cm_ccb->ccb_h.status |= (CAM_REQUEUE_REQ|
478 DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n",
479 error, nseg, softs->pqi_cap.max_sg_elem);
480 pqi_unmap_request(rcb);
481 xpt_done((union ccb *)rcb->cm_ccb);
485 rcb->sgt = os_mem_alloc(softs, nseg * sizeof(rcb_t));
487 if (rcb->sgt != NULL) {
488 for (int i = 0; i < nseg; i++) {
489 rcb->sgt[i].addr = segs[i].ds_addr;
490 rcb->sgt[i].len = segs[i].ds_len;
491 rcb->sgt[i].flags = 0;
495 if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
496 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
497 rcb->cm_datamap, BUS_DMASYNC_PREREAD);
498 if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
499 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
500 rcb->cm_datamap, BUS_DMASYNC_PREWRITE);
502 /* Call IO functions depending on pd or ld */
503 rcb->status = REQUEST_PENDING;
505 error = pqisrc_build_send_io(softs, rcb);
508 rcb->req_pending = false;
509 xpt_freeze_simq(softs->os_specific.sim, 1);
510 rcb->cm_ccb->ccb_h.status |= (CAM_REQUEUE_REQ
512 DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error);
513 pqi_unmap_request(rcb);
514 xpt_done((union ccb *)rcb->cm_ccb);
520 * Function to dma-map the request buffer
522 static int pqi_map_request( rcb_t *rcb )
524 pqisrc_softstate_t *softs = rcb->softs;
525 int error = PQI_STATUS_SUCCESS;
526 union ccb *ccb = rcb->cm_ccb;
530 /* check that mapping is necessary */
531 if (rcb->cm_flags & PQI_CMD_MAPPED)
533 rcb->cm_flags |= PQI_CMD_MAPPED;
536 error = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat,
537 rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0);
539 DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed = %d count = %d\n",
545 * Set up the command to go to the controller. If there are no
546 * data buffers associated with the command then it can bypass
549 /* Call IO functions depending on pd or ld */
550 rcb->status = REQUEST_PENDING;
552 error = pqisrc_build_send_io(softs, rcb);
556 DBG_FUNC("OUT error = %d\n", error);
562 * Function to clear the request control block
564 void os_reset_rcb( rcb_t *rcb )
566 rcb->error_info = NULL;
569 rcb->tag = INVALID_ELEM;
579 rcb->encrypt_enable = false;
580 rcb->ioaccel_handle = 0;
582 rcb->req_pending = false;
586 * Callback function for the lun rescan
588 static void smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb)
590 xpt_free_path(ccb->ccb_h.path);
596 * Function to rescan the lun
598 static void smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
601 union ccb *ccb = NULL;
602 cam_status status = 0;
603 struct cam_path *path = NULL;
607 ccb = xpt_alloc_ccb_nowait();
608 status = xpt_create_path(&path, NULL,
609 cam_sim_path(softs->os_specific.sim), target, lun);
610 if (status != CAM_REQ_CMP) {
611 DBG_ERR("xpt_create_path status(%d) != CAM_REQ_CMP \n",
617 bzero(ccb, sizeof(union ccb));
618 xpt_setup_ccb(&ccb->ccb_h, path, 5);
619 ccb->ccb_h.func_code = XPT_SCAN_LUN;
620 ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb;
621 ccb->crcn.flags = CAM_FLAG_NONE;
629 * Function to rescan the lun under each target
631 void smartpqi_target_rescan(struct pqisrc_softstate *softs)
633 int target = 0, lun = 0;
637 for(target = 0; target < PQI_MAX_DEVICES; target++){
638 for(lun = 0; lun < PQI_MAX_MULTILUN; lun++){
639 if(softs->device_list[target][lun]){
640 smartpqi_lun_rescan(softs, target, lun);
649 * Set the mode of tagged command queueing for the current task.
651 uint8_t os_get_task_attr(rcb_t *rcb)
653 union ccb *ccb = rcb->cm_ccb;
654 uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
656 switch(ccb->csio.tag_action) {
657 case MSG_HEAD_OF_Q_TAG:
658 tag_action = SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE;
660 case MSG_ORDERED_Q_TAG:
661 tag_action = SOP_TASK_ATTRIBUTE_ORDERED;
663 case MSG_SIMPLE_Q_TAG:
665 tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
672 * Complete all outstanding commands
674 void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
680 for (tag = 1; tag < softs->max_outstanding_io; tag++) {
681 rcb_t *prcb = &softs->rcb[tag];
682 if(prcb->req_pending && prcb->cm_ccb ) {
683 prcb->req_pending = false;
684 prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP;
685 xpt_done((union ccb *)prcb->cm_ccb);
694 * IO handling functionality entry point
696 static int pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
699 uint32_t tag, no_transfer = 0;
700 pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
702 int32_t error = PQI_STATUS_FAILURE;
707 if( softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL ) {
708 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
709 DBG_INFO("Device = %d not there\n", ccb->ccb_h.target_id);
710 return PQI_STATUS_FAILURE;
713 dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
714 /* Check controller state */
715 if (IN_PQI_RESET(softs)) {
716 ccb->ccb_h.status = CAM_SCSI_BUS_RESET
717 | CAM_BUSY | CAM_REQ_INPROG;
718 DBG_WARN("Device = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id);
721 /* Check device state */
722 if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) {
723 ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP;
724 DBG_WARN("Device = %d GONE/OFFLINE\n", ccb->ccb_h.target_id);
727 /* Check device reset */
728 if (DEV_RESET(dvp)) {
729 ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY;
730 DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id);
734 if (dvp->expose_device == false) {
735 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
736 DBG_INFO("Device = %d not exposed\n", ccb->ccb_h.target_id);
740 tag = pqisrc_get_tag(&softs->taglist);
741 if( tag == INVALID_ELEM ) {
742 DBG_ERR("Get Tag failed\n");
743 xpt_freeze_simq(softs->os_specific.sim, 1);
744 softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
745 ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ);
746 return PQI_STATUS_FAILURE;
749 DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist);
751 rcb = &softs->rcb[tag];
755 rcb->cmdlen = ccb->csio.cdb_len;
756 ccb->ccb_h.sim_priv.entries[0].ptr = rcb;
758 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
760 rcb->data_dir = SOP_DATA_DIR_FROM_DEVICE;
763 rcb->data_dir = SOP_DATA_DIR_TO_DEVICE;
769 DBG_ERR("Unknown Dir\n");
773 rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
776 rcb->cm_data = (void *)ccb->csio.data_ptr;
777 rcb->bcount = ccb->csio.dxfer_len;
783 * Submit the request to the adapter.
785 * Note that this may fail if we're unable to map the request (and
786 * if we ever learn a transport layer other than simple, may fail
787 * if the adapter rejects the command).
789 if ((error = pqi_map_request(rcb)) != 0) {
790 rcb->req_pending = false;
791 xpt_freeze_simq(softs->os_specific.sim, 1);
792 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
793 if (error == EINPROGRESS) {
794 DBG_WARN("In Progress on %d\n", ccb->ccb_h.target_id);
797 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
798 DBG_WARN("Requeue req error = %d target = %d\n", error,
799 ccb->ccb_h.target_id);
800 pqi_unmap_request(rcb);
804 DBG_FUNC("OUT error = %d\n", error);
809 * Abort a task, task management functionality
812 pqisrc_scsi_abort_task(pqisrc_softstate_t *softs, union ccb *ccb)
814 rcb_t *rcb = ccb->ccb_h.sim_priv.entries[0].ptr;
815 uint32_t abort_tag = rcb->tag;
817 int rval = PQI_STATUS_SUCCESS;
822 qid = (uint16_t)rcb->resp_qid;
824 tag = pqisrc_get_tag(&softs->taglist);
825 rcb = &softs->rcb[tag];
829 rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, abort_tag,
830 SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK);
832 if (PQI_STATUS_SUCCESS == rval) {
834 if (REQUEST_SUCCESS == rval) {
835 ccb->ccb_h.status = CAM_REQ_ABORTED;
838 pqisrc_put_tag(&softs->taglist, abort_tag);
839 pqisrc_put_tag(&softs->taglist,rcb->tag);
841 DBG_FUNC("OUT rval = %d\n", rval);
847 * Abort a taskset, task management functionality
850 pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb)
854 int rval = PQI_STATUS_SUCCESS;
858 tag = pqisrc_get_tag(&softs->taglist);
859 rcb = &softs->rcb[tag];
862 rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, 0,
863 SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET);
865 if (rval == PQI_STATUS_SUCCESS) {
869 pqisrc_put_tag(&softs->taglist,rcb->tag);
871 DBG_FUNC("OUT rval = %d\n", rval);
877 * Target reset task management functionality
880 pqisrc_target_reset( pqisrc_softstate_t *softs, union ccb *ccb)
882 pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
885 int rval = PQI_STATUS_SUCCESS;
890 DBG_ERR("bad target t%d\n", ccb->ccb_h.target_id);
894 tag = pqisrc_get_tag(&softs->taglist);
895 rcb = &softs->rcb[tag];
898 devp->reset_in_progress = true;
899 rval = pqisrc_send_tmf(softs, devp, rcb, 0,
900 SOP_TASK_MANAGEMENT_LUN_RESET);
901 if (PQI_STATUS_SUCCESS == rval) {
904 devp->reset_in_progress = false;
905 pqisrc_put_tag(&softs->taglist,rcb->tag);
907 DBG_FUNC("OUT rval = %d\n", rval);
909 return ((rval == REQUEST_SUCCESS) ?
910 PQI_STATUS_SUCCESS : PQI_STATUS_FAILURE);
914 * cam entry point of the smartpqi module.
916 static void smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb)
918 struct pqisrc_softstate *softs = cam_sim_softc(sim);
919 struct ccb_hdr *ccb_h = &ccb->ccb_h;
923 switch (ccb_h->func_code) {
926 if(!pqisrc_io_start(sim, ccb)) {
931 case XPT_CALC_GEOMETRY:
933 struct ccb_calc_geometry *ccg;
935 if (ccg->block_size == 0) {
936 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
937 ccb->ccb_h.status = CAM_REQ_INVALID;
940 cam_calc_geometry(ccg, /* extended */ 1);
941 ccb->ccb_h.status = CAM_REQ_CMP;
946 update_sim_properties(sim, &ccb->cpi);
947 ccb->ccb_h.status = CAM_REQ_CMP;
950 case XPT_GET_TRAN_SETTINGS:
951 get_transport_settings(softs, &ccb->cts);
952 ccb->ccb_h.status = CAM_REQ_CMP;
955 if(pqisrc_scsi_abort_task(softs, ccb)) {
956 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
958 DBG_ERR("Abort task failed on %d\n",
959 ccb->ccb_h.target_id);
964 if (pqisrc_scsi_abort_task_set(softs, ccb)) {
965 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
966 DBG_ERR("Abort task set failed on %d\n",
967 ccb->ccb_h.target_id);
973 if(pqisrc_target_reset(softs, ccb)) {
974 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
975 DBG_ERR("Target reset failed on %d\n",
976 ccb->ccb_h.target_id);
980 ccb->ccb_h.status = CAM_REQ_CMP;
984 ccb->ccb_h.status = CAM_REQ_CMP;
986 case XPT_SET_TRAN_SETTINGS:
987 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
990 DBG_WARN("UNSUPPORTED FUNC CODE\n");
991 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1000 * Function to poll the response, when interrupts are unavailable
1001 * This also serves supporting crash dump.
1003 static void smartpqi_poll(struct cam_sim *sim)
1005 struct pqisrc_softstate *softs = cam_sim_softc(sim);
1008 for (i = 1; i < softs->intr_count; i++ )
1009 pqisrc_process_response_queue(softs, i);
1013 * Function to adjust the queue depth of a device
1015 void smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth)
1017 struct ccb_relsim crs;
1021 xpt_setup_ccb(&crs.ccb_h, path, 5);
1022 crs.ccb_h.func_code = XPT_REL_SIMQ;
1023 crs.ccb_h.flags = CAM_DEV_QFREEZE;
1024 crs.release_flags = RELSIM_ADJUST_OPENINGS;
1025 crs.openings = queue_depth;
1026 xpt_action((union ccb *)&crs);
1027 if(crs.ccb_h.status != CAM_REQ_CMP) {
1028 printf("XPT_REL_SIMQ failed stat=%d\n", crs.ccb_h.status);
1035 * Function to register async callback for setting queue depth
1038 smartpqi_async(void *callback_arg, u_int32_t code,
1039 struct cam_path *path, void *arg)
1041 struct pqisrc_softstate *softs;
1042 softs = (struct pqisrc_softstate*)callback_arg;
1047 case AC_FOUND_DEVICE:
1049 struct ccb_getdev *cgd;
1050 cgd = (struct ccb_getdev *)arg;
1054 uint32_t t_id = cgd->ccb_h.target_id;
1056 if (t_id <= (PQI_CTLR_INDEX - 1)) {
1057 if (softs != NULL) {
1058 pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun];
1059 smartpqi_adjust_queue_depth(path,
1073 * Function to register sim with CAM layer for smartpqi driver
1075 int register_sim(struct pqisrc_softstate *softs, int card_index)
1078 int max_transactions;
1079 union ccb *ccb = NULL;
1080 cam_status status = 0;
1081 struct ccb_setasync csa;
1082 struct cam_sim *sim;
1086 max_transactions = softs->max_io_for_scsi_ml;
1087 softs->os_specific.devq = cam_simq_alloc(max_transactions);
1088 if (softs->os_specific.devq == NULL) {
1089 DBG_ERR("cam_simq_alloc failed txns = %d\n",
1091 return PQI_STATUS_FAILURE;
1094 sim = cam_sim_alloc(smartpqi_cam_action, \
1095 smartpqi_poll, "smartpqi", softs, \
1096 card_index, &softs->os_specific.cam_lock, \
1097 1, max_transactions, softs->os_specific.devq);
1099 DBG_ERR("cam_sim_alloc failed txns = %d\n",
1101 cam_simq_free(softs->os_specific.devq);
1102 return PQI_STATUS_FAILURE;
1105 softs->os_specific.sim = sim;
1106 mtx_lock(&softs->os_specific.cam_lock);
1107 status = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0);
1108 if (status != CAM_SUCCESS) {
1109 DBG_ERR("xpt_bus_register failed status=%d\n", status);
1110 cam_sim_free(softs->os_specific.sim, FALSE);
1111 cam_simq_free(softs->os_specific.devq);
1112 mtx_unlock(&softs->os_specific.cam_lock);
1113 return PQI_STATUS_FAILURE;
1116 softs->os_specific.sim_registered = TRUE;
1117 ccb = xpt_alloc_ccb_nowait();
1119 DBG_ERR("xpt_create_path failed\n");
1120 return PQI_STATUS_FAILURE;
1123 if (xpt_create_path(&ccb->ccb_h.path, NULL,
1124 cam_sim_path(softs->os_specific.sim),
1125 CAM_TARGET_WILDCARD,
1126 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1127 DBG_ERR("xpt_create_path failed\n");
1129 xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
1130 cam_sim_free(softs->os_specific.sim, TRUE);
1131 mtx_unlock(&softs->os_specific.cam_lock);
1132 return PQI_STATUS_FAILURE;
1135 * Callback to set the queue depth per target which is
1136 * derived from the FW.
1138 softs->os_specific.path = ccb->ccb_h.path;
1139 xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
1140 csa.ccb_h.func_code = XPT_SASYNC_CB;
1141 csa.event_enable = AC_FOUND_DEVICE;
1142 csa.callback = smartpqi_async;
1143 csa.callback_arg = softs;
1144 xpt_action((union ccb *)&csa);
1145 if (csa.ccb_h.status != CAM_REQ_CMP) {
1146 DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n",
1150 mtx_unlock(&softs->os_specific.cam_lock);
1156 * Function to deregister smartpqi sim from cam layer
1158 void deregister_sim(struct pqisrc_softstate *softs)
1160 struct ccb_setasync csa;
1164 if (softs->os_specific.mtx_init) {
1165 mtx_lock(&softs->os_specific.cam_lock);
1169 xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
1170 csa.ccb_h.func_code = XPT_SASYNC_CB;
1171 csa.event_enable = 0;
1172 csa.callback = smartpqi_async;
1173 csa.callback_arg = softs;
1174 xpt_action((union ccb *)&csa);
1175 xpt_free_path(softs->os_specific.path);
1177 xpt_release_simq(softs->os_specific.sim, 0);
1179 xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
1180 softs->os_specific.sim_registered = FALSE;
1182 if (softs->os_specific.sim) {
1183 cam_sim_free(softs->os_specific.sim, FALSE);
1184 softs->os_specific.sim = NULL;
1186 if (softs->os_specific.mtx_init) {
1187 mtx_unlock(&softs->os_specific.cam_lock);
1189 if (softs->os_specific.devq != NULL) {
1190 cam_simq_free(softs->os_specific.devq);
1192 if (softs->os_specific.mtx_init) {
1193 mtx_destroy(&softs->os_specific.cam_lock);
1194 softs->os_specific.mtx_init = FALSE;
1197 mtx_destroy(&softs->os_specific.map_lock);