2 * Copyright (c) 2018 Microsemi Corporation.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * CAM interface for smartpqi driver
32 #include "smartpqi_includes.h"
35 * Set cam sim properties of the smartpqi adapter.
37 static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
40 pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
45 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
47 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
49 cpi->max_lun = PQI_MAX_MULTILUN;
50 cpi->max_target = 1088;
51 cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE;
52 cpi->initiator_id = 255;
53 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
54 strncpy(cpi->hba_vid, "Microsemi", HBA_IDLEN);
55 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
56 cpi->unit_number = cam_sim_unit(sim);
57 cpi->bus_id = cam_sim_bus(sim);
58 cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */
59 cpi->protocol = PROTO_SCSI;
60 cpi->protocol_version = SCSI_REV_SPC4;
61 cpi->transport = XPORT_SPI;
62 cpi->transport_version = 2;
63 cpi->ccb_h.status = CAM_REQ_CMP;
69 * Get transport settings of the smartpqi adapter
71 static void get_transport_settings(struct pqisrc_softstate *softs,
72 struct ccb_trans_settings *cts)
74 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
75 struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas;
76 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
80 cts->protocol = PROTO_SCSI;
81 cts->protocol_version = SCSI_REV_SPC4;
82 cts->transport = XPORT_SPI;
83 cts->transport_version = 2;
84 spi->valid = CTS_SPI_VALID_DISC;
85 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
86 scsi->valid = CTS_SCSI_VALID_TQ;
87 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
88 sas->valid = CTS_SAS_VALID_SPEED;
89 cts->ccb_h.status = CAM_REQ_CMP;
95 * Add the target to CAM layer and rescan, when a new device is found
97 void os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) {
102 if(softs->os_specific.sim_registered) {
103 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) {
104 DBG_ERR("rescan failed (can't allocate CCB)\n");
108 if (xpt_create_path(&ccb->ccb_h.path, NULL,
109 cam_sim_path(softs->os_specific.sim),
110 device->target, device->lun) != CAM_REQ_CMP) {
111 DBG_ERR("rescan failed (can't create path)\n");
122 * Remove the device from CAM layer when deleted or hot removed
124 void os_remove_device(pqisrc_softstate_t *softs,
125 pqi_scsi_dev_t *device) {
126 struct cam_path *tmppath;
130 if(softs->os_specific.sim_registered) {
131 if (xpt_create_path(&tmppath, NULL,
132 cam_sim_path(softs->os_specific.sim),
133 device->target, device->lun) != CAM_REQ_CMP) {
134 DBG_ERR("unable to create path for async event");
137 xpt_async(AC_LOST_DEVICE, tmppath, NULL);
138 xpt_free_path(tmppath);
139 pqisrc_free_device(softs, device);
147 * Function to release the frozen simq
149 static void pqi_release_camq( rcb_t *rcb )
151 pqisrc_softstate_t *softs;
152 struct ccb_scsiio *csio;
154 csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
159 if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) {
160 softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY;
161 if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
162 xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
164 csio->ccb_h.status |= CAM_RELEASE_SIMQ;
171 * Function to dma-unmap the completed request
173 static void pqi_unmap_request(void *arg)
175 pqisrc_softstate_t *softs;
178 DBG_IO("IN rcb = %p\n", arg);
183 if (!(rcb->cm_flags & PQI_CMD_MAPPED))
186 if (rcb->bcount != 0 ) {
187 if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
188 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
190 BUS_DMASYNC_POSTREAD);
191 if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
192 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
194 BUS_DMASYNC_POSTWRITE);
195 bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat,
198 rcb->cm_flags &= ~PQI_CMD_MAPPED;
200 if(rcb->sgt && rcb->nseg)
201 os_mem_free(rcb->softs, (void*)rcb->sgt,
202 rcb->nseg*sizeof(sgt_t));
204 pqisrc_put_tag(&softs->taglist, rcb->tag);
210 * Construct meaningful LD name for volume here.
213 smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio)
215 struct scsi_inquiry_data *inq = NULL;
217 pqi_scsi_dev_t *device = NULL;
221 cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ?
222 (uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes;
223 if(cdb[0] == INQUIRY &&
224 (cdb[1] & SI_EVPD) == 0 &&
225 (csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN &&
226 csio->dxfer_len >= SHORT_INQUIRY_LENGTH) {
227 inq = (struct scsi_inquiry_data *)csio->data_ptr;
229 device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun];
231 /* Let the disks be probed and dealt with via CAM. Only for LD
232 let it fall through and inquiry be tweaked */
233 if( !device || !pqisrc_is_logical_device(device) ||
234 (device->devtype != DISK_DEVICE) ||
235 pqisrc_is_external_raid_device(device)) {
239 strncpy(inq->vendor, "MSCC",
241 strncpy(inq->product,
242 pqisrc_raidlevel_to_string(device->raid_level),
244 strncpy(inq->revision, device->volume_offline?"OFF":"OK",
252 * Handle completion of a command - pass results back through the CCB
255 os_io_response_success(rcb_t *rcb)
257 struct ccb_scsiio *csio;
259 DBG_IO("IN rcb = %p\n", rcb);
262 panic("rcb is null");
264 csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
267 panic("csio is null");
269 rcb->status = REQUEST_SUCCESS;
270 csio->ccb_h.status = CAM_REQ_CMP;
272 smartpqi_fix_ld_inquiry(rcb->softs, csio);
273 pqi_release_camq(rcb);
274 pqi_unmap_request(rcb);
275 xpt_done((union ccb *)csio);
281 * Error response handling for raid IO
283 void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
285 struct ccb_scsiio *csio;
286 pqisrc_softstate_t *softs;
290 csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
293 panic("csio is null");
297 ASSERT(err_info != NULL);
298 csio->scsi_status = err_info->status;
299 csio->ccb_h.status = CAM_REQ_CMP_ERR;
301 if (csio->ccb_h.func_code == XPT_SCSI_IO) {
303 * Handle specific SCSI status values.
305 switch(csio->scsi_status) {
306 case PQI_RAID_STATUS_QUEUE_FULL:
307 csio->ccb_h.status = CAM_REQ_CMP;
308 DBG_ERR("Queue Full error");
310 /* check condition, sense data included */
311 case PQI_RAID_STATUS_CHECK_CONDITION:
313 uint16_t sense_data_len =
314 LE_16(err_info->sense_data_len);
315 uint8_t *sense_data = NULL;
317 sense_data = err_info->data;
318 memset(&csio->sense_data, 0, csio->sense_len);
319 sense_data_len = (sense_data_len >
324 memcpy(&csio->sense_data, sense_data,
326 if (csio->sense_len > sense_data_len)
327 csio->sense_resid = csio->sense_len
330 csio->sense_resid = 0;
331 csio->ccb_h.status = CAM_SCSI_STATUS_ERROR
337 case PQI_RAID_DATA_IN_OUT_UNDERFLOW:
340 resid = rcb->bcount-err_info->data_out_transferred;
342 csio->ccb_h.status = CAM_REQ_CMP;
346 csio->ccb_h.status = CAM_REQ_CMP;
351 if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) {
352 softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY;
353 if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
354 xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
356 csio->ccb_h.status |= CAM_RELEASE_SIMQ;
359 pqi_unmap_request(rcb);
360 xpt_done((union ccb *)csio);
366 * Error response handling for aio.
368 void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
370 struct ccb_scsiio *csio;
371 pqisrc_softstate_t *softs;
376 panic("rcb is null");
378 rcb->status = REQUEST_SUCCESS;
379 csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
381 panic("csio is null");
385 switch (err_info->service_resp) {
386 case PQI_AIO_SERV_RESPONSE_COMPLETE:
387 csio->ccb_h.status = err_info->status;
389 case PQI_AIO_SERV_RESPONSE_FAILURE:
390 switch(err_info->status) {
391 case PQI_AIO_STATUS_IO_ABORTED:
392 csio->ccb_h.status = CAM_REQ_ABORTED;
393 DBG_WARN_BTL(rcb->dvp, "IO aborted\n");
395 case PQI_AIO_STATUS_UNDERRUN:
396 csio->ccb_h.status = CAM_REQ_CMP;
398 LE_32(err_info->resd_count);
400 case PQI_AIO_STATUS_OVERRUN:
401 csio->ccb_h.status = CAM_REQ_CMP;
403 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
404 DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n");
405 rcb->dvp->offload_enabled = false;
406 csio->ccb_h.status |= CAM_REQUEUE_REQ;
408 case PQI_AIO_STATUS_IO_ERROR:
409 case PQI_AIO_STATUS_IO_NO_DEVICE:
410 case PQI_AIO_STATUS_INVALID_DEVICE:
412 DBG_WARN_BTL(rcb->dvp,"IO Error/Invalid/No device\n");
413 csio->ccb_h.status |=
414 CAM_SCSI_STATUS_ERROR;
418 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
419 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
420 csio->ccb_h.status = CAM_REQ_CMP;
422 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
423 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
424 DBG_WARN_BTL(rcb->dvp,"TMF rejected/Incorrect Lun\n");
425 csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
428 DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n");
429 csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
432 if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) {
433 csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION;
434 uint8_t *sense_data = NULL;
435 unsigned sense_data_len = LE_16(err_info->data_len);
437 sense_data = err_info->data;
438 DBG_ERR_BTL(rcb->dvp, "SCSI_STATUS_CHECK_COND sense size %u\n",
440 memset(&csio->sense_data, 0, csio->sense_len);
442 memcpy(&csio->sense_data, sense_data, ((sense_data_len >
443 csio->sense_len) ? csio->sense_len : sense_data_len));
444 if (csio->sense_len > sense_data_len)
445 csio->sense_resid = csio->sense_len - sense_data_len;
447 csio->sense_resid = 0;
448 csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
451 smartpqi_fix_ld_inquiry(softs, csio);
452 pqi_release_camq(rcb);
453 pqi_unmap_request(rcb);
454 xpt_done((union ccb *)csio);
459 pqi_freeze_ccb(union ccb *ccb)
461 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
462 ccb->ccb_h.status |= CAM_DEV_QFRZN;
463 xpt_freeze_devq(ccb->ccb_h.path, 1);
468 * Command-mapping helper function - populate this command's s/g table.
471 pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
473 pqisrc_softstate_t *softs;
479 if( error || nseg > softs->pqi_cap.max_sg_elem )
481 rcb->cm_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
482 pqi_freeze_ccb(rcb->cm_ccb);
483 DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n",
484 error, nseg, softs->pqi_cap.max_sg_elem);
485 pqi_unmap_request(rcb);
486 xpt_done((union ccb *)rcb->cm_ccb);
490 rcb->sgt = os_mem_alloc(softs, nseg * sizeof(rcb_t));
491 if (rcb->sgt == NULL) {
492 rcb->cm_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
493 pqi_freeze_ccb(rcb->cm_ccb);
494 DBG_ERR_BTL(rcb->dvp, "os_mem_alloc() failed; nseg = %d\n", nseg);
495 pqi_unmap_request(rcb);
496 xpt_done((union ccb *)rcb->cm_ccb);
501 for (int i = 0; i < nseg; i++) {
502 rcb->sgt[i].addr = segs[i].ds_addr;
503 rcb->sgt[i].len = segs[i].ds_len;
504 rcb->sgt[i].flags = 0;
507 if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
508 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
509 rcb->cm_datamap, BUS_DMASYNC_PREREAD);
510 if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
511 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
512 rcb->cm_datamap, BUS_DMASYNC_PREWRITE);
514 /* Call IO functions depending on pd or ld */
515 rcb->status = REQUEST_PENDING;
517 error = pqisrc_build_send_io(softs, rcb);
520 rcb->req_pending = false;
521 rcb->cm_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
522 pqi_freeze_ccb(rcb->cm_ccb);
523 DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error);
524 pqi_unmap_request(rcb);
525 xpt_done((union ccb *)rcb->cm_ccb);
531 * Function to dma-map the request buffer
533 static int pqi_map_request( rcb_t *rcb )
535 pqisrc_softstate_t *softs = rcb->softs;
536 int error = PQI_STATUS_SUCCESS;
537 union ccb *ccb = rcb->cm_ccb;
541 /* check that mapping is necessary */
542 if (rcb->cm_flags & PQI_CMD_MAPPED)
544 rcb->cm_flags |= PQI_CMD_MAPPED;
547 error = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat,
548 rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0);
550 DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed = %d count = %d\n",
556 * Set up the command to go to the controller. If there are no
557 * data buffers associated with the command then it can bypass
560 /* Call IO functions depending on pd or ld */
561 rcb->status = REQUEST_PENDING;
563 error = pqisrc_build_send_io(softs, rcb);
566 DBG_FUNC("OUT error = %d\n", error);
572 * Function to clear the request control block
574 void os_reset_rcb( rcb_t *rcb )
576 rcb->error_info = NULL;
579 rcb->tag = INVALID_ELEM;
589 rcb->encrypt_enable = false;
590 rcb->ioaccel_handle = 0;
592 rcb->req_pending = false;
596 * Callback function for the lun rescan
598 static void smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb)
600 xpt_free_path(ccb->ccb_h.path);
605 * Function to rescan the lun
607 static void smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
610 union ccb *ccb = NULL;
611 cam_status status = 0;
612 struct cam_path *path = NULL;
616 ccb = xpt_alloc_ccb_nowait();
617 status = xpt_create_path(&path, NULL,
618 cam_sim_path(softs->os_specific.sim), target, lun);
619 if (status != CAM_REQ_CMP) {
620 DBG_ERR("xpt_create_path status(%d) != CAM_REQ_CMP \n",
626 xpt_setup_ccb(&ccb->ccb_h, path, 5);
627 ccb->ccb_h.func_code = XPT_SCAN_LUN;
628 ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb;
629 ccb->crcn.flags = CAM_FLAG_NONE;
637 * Function to rescan the lun under each target
639 void smartpqi_target_rescan(struct pqisrc_softstate *softs)
641 int target = 0, lun = 0;
645 for(target = 0; target < PQI_MAX_DEVICES; target++){
646 for(lun = 0; lun < PQI_MAX_MULTILUN; lun++){
647 if(softs->device_list[target][lun]){
648 smartpqi_lun_rescan(softs, target, lun);
657 * Set the mode of tagged command queueing for the current task.
659 uint8_t os_get_task_attr(rcb_t *rcb)
661 union ccb *ccb = rcb->cm_ccb;
662 uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
664 switch(ccb->csio.tag_action) {
665 case MSG_HEAD_OF_Q_TAG:
666 tag_action = SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE;
668 case MSG_ORDERED_Q_TAG:
669 tag_action = SOP_TASK_ATTRIBUTE_ORDERED;
671 case MSG_SIMPLE_Q_TAG:
673 tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
680 * Complete all outstanding commands
682 void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
688 for (tag = 1; tag < softs->max_outstanding_io; tag++) {
689 rcb_t *prcb = &softs->rcb[tag];
690 if(prcb->req_pending && prcb->cm_ccb ) {
691 prcb->req_pending = false;
692 prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP;
693 xpt_done((union ccb *)prcb->cm_ccb);
702 * IO handling functionality entry point
704 static int pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
707 uint32_t tag, no_transfer = 0;
708 pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
710 int32_t error = PQI_STATUS_FAILURE;
715 if( softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL ) {
716 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
717 DBG_INFO("Device = %d not there\n", ccb->ccb_h.target_id);
718 return PQI_STATUS_FAILURE;
721 dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
722 /* Check controller state */
723 if (IN_PQI_RESET(softs)) {
724 ccb->ccb_h.status = CAM_SCSI_BUS_RESET
725 | CAM_BUSY | CAM_REQ_INPROG;
726 DBG_WARN("Device = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id);
729 /* Check device state */
730 if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) {
731 ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP;
732 DBG_WARN("Device = %d GONE/OFFLINE\n", ccb->ccb_h.target_id);
735 /* Check device reset */
736 if (dvp->reset_in_progress) {
737 ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY;
738 DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id);
742 if (dvp->expose_device == false) {
743 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
744 DBG_INFO("Device = %d not exposed\n", ccb->ccb_h.target_id);
748 tag = pqisrc_get_tag(&softs->taglist);
749 if( tag == INVALID_ELEM ) {
750 DBG_ERR("Get Tag failed\n");
751 xpt_freeze_simq(softs->os_specific.sim, 1);
752 softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
753 ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ);
754 return PQI_STATUS_FAILURE;
757 DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist);
759 rcb = &softs->rcb[tag];
763 rcb->cmdlen = ccb->csio.cdb_len;
764 ccb->ccb_h.sim_priv.entries[0].ptr = rcb;
766 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
768 rcb->data_dir = SOP_DATA_DIR_FROM_DEVICE;
771 rcb->data_dir = SOP_DATA_DIR_TO_DEVICE;
777 DBG_ERR("Unknown Dir\n");
781 rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
784 rcb->cm_data = (void *)ccb->csio.data_ptr;
785 rcb->bcount = ccb->csio.dxfer_len;
791 * Submit the request to the adapter.
793 * Note that this may fail if we're unable to map the request (and
794 * if we ever learn a transport layer other than simple, may fail
795 * if the adapter rejects the command).
797 if ((error = pqi_map_request(rcb)) != 0) {
798 rcb->req_pending = false;
799 xpt_freeze_simq(softs->os_specific.sim, 1);
800 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
801 if (error == EINPROGRESS) {
802 DBG_WARN("In Progress on %d\n", ccb->ccb_h.target_id);
805 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
806 DBG_WARN("Requeue req error = %d target = %d\n", error,
807 ccb->ccb_h.target_id);
808 pqi_unmap_request(rcb);
812 DBG_FUNC("OUT error = %d\n", error);
817 * Abort a task, task management functionality
820 pqisrc_scsi_abort_task(pqisrc_softstate_t *softs, union ccb *ccb)
822 rcb_t *rcb = ccb->ccb_h.sim_priv.entries[0].ptr;
823 uint32_t abort_tag = rcb->tag;
825 int rval = PQI_STATUS_SUCCESS;
830 qid = (uint16_t)rcb->resp_qid;
832 tag = pqisrc_get_tag(&softs->taglist);
833 rcb = &softs->rcb[tag];
837 rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, abort_tag,
838 SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK);
840 if (PQI_STATUS_SUCCESS == rval) {
842 if (REQUEST_SUCCESS == rval) {
843 ccb->ccb_h.status = CAM_REQ_ABORTED;
846 pqisrc_put_tag(&softs->taglist, abort_tag);
847 pqisrc_put_tag(&softs->taglist,rcb->tag);
849 DBG_FUNC("OUT rval = %d\n", rval);
855 * Abort a taskset, task management functionality
858 pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb)
862 int rval = PQI_STATUS_SUCCESS;
866 tag = pqisrc_get_tag(&softs->taglist);
867 rcb = &softs->rcb[tag];
870 rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, 0,
871 SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET);
873 if (rval == PQI_STATUS_SUCCESS) {
877 pqisrc_put_tag(&softs->taglist,rcb->tag);
879 DBG_FUNC("OUT rval = %d\n", rval);
885 * Target reset task management functionality
888 pqisrc_target_reset( pqisrc_softstate_t *softs, union ccb *ccb)
890 pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
893 int rval = PQI_STATUS_SUCCESS;
898 DBG_ERR("bad target t%d\n", ccb->ccb_h.target_id);
902 tag = pqisrc_get_tag(&softs->taglist);
903 rcb = &softs->rcb[tag];
906 devp->reset_in_progress = true;
907 rval = pqisrc_send_tmf(softs, devp, rcb, 0,
908 SOP_TASK_MANAGEMENT_LUN_RESET);
909 if (PQI_STATUS_SUCCESS == rval) {
912 devp->reset_in_progress = false;
913 pqisrc_put_tag(&softs->taglist,rcb->tag);
915 DBG_FUNC("OUT rval = %d\n", rval);
917 return ((rval == REQUEST_SUCCESS) ?
918 PQI_STATUS_SUCCESS : PQI_STATUS_FAILURE);
922 * cam entry point of the smartpqi module.
924 static void smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb)
926 struct pqisrc_softstate *softs = cam_sim_softc(sim);
927 struct ccb_hdr *ccb_h = &ccb->ccb_h;
931 switch (ccb_h->func_code) {
934 if(!pqisrc_io_start(sim, ccb)) {
939 case XPT_CALC_GEOMETRY:
941 struct ccb_calc_geometry *ccg;
943 if (ccg->block_size == 0) {
944 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
945 ccb->ccb_h.status = CAM_REQ_INVALID;
948 cam_calc_geometry(ccg, /* extended */ 1);
949 ccb->ccb_h.status = CAM_REQ_CMP;
954 update_sim_properties(sim, &ccb->cpi);
955 ccb->ccb_h.status = CAM_REQ_CMP;
958 case XPT_GET_TRAN_SETTINGS:
959 get_transport_settings(softs, &ccb->cts);
960 ccb->ccb_h.status = CAM_REQ_CMP;
963 if(pqisrc_scsi_abort_task(softs, ccb)) {
964 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
966 DBG_ERR("Abort task failed on %d\n",
967 ccb->ccb_h.target_id);
972 if (pqisrc_scsi_abort_task_set(softs, ccb)) {
973 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
974 DBG_ERR("Abort task set failed on %d\n",
975 ccb->ccb_h.target_id);
981 if(pqisrc_target_reset(softs, ccb)) {
982 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
983 DBG_ERR("Target reset failed on %d\n",
984 ccb->ccb_h.target_id);
988 ccb->ccb_h.status = CAM_REQ_CMP;
992 ccb->ccb_h.status = CAM_REQ_CMP;
994 case XPT_SET_TRAN_SETTINGS:
995 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
998 DBG_WARN("UNSUPPORTED FUNC CODE\n");
999 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1008 * Function to poll the response, when interrupts are unavailable
1009 * This also serves supporting crash dump.
1011 static void smartpqi_poll(struct cam_sim *sim)
1013 struct pqisrc_softstate *softs = cam_sim_softc(sim);
1016 for (i = 1; i < softs->intr_count; i++ )
1017 pqisrc_process_response_queue(softs, i);
1021 * Function to adjust the queue depth of a device
1023 void smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth)
1025 struct ccb_relsim crs;
1029 xpt_setup_ccb(&crs.ccb_h, path, 5);
1030 crs.ccb_h.func_code = XPT_REL_SIMQ;
1031 crs.ccb_h.flags = CAM_DEV_QFREEZE;
1032 crs.release_flags = RELSIM_ADJUST_OPENINGS;
1033 crs.openings = queue_depth;
1034 xpt_action((union ccb *)&crs);
1035 if(crs.ccb_h.status != CAM_REQ_CMP) {
1036 printf("XPT_REL_SIMQ failed stat=%d\n", crs.ccb_h.status);
1043 * Function to register async callback for setting queue depth
1046 smartpqi_async(void *callback_arg, u_int32_t code,
1047 struct cam_path *path, void *arg)
1049 struct pqisrc_softstate *softs;
1050 softs = (struct pqisrc_softstate*)callback_arg;
1055 case AC_FOUND_DEVICE:
1057 struct ccb_getdev *cgd;
1058 cgd = (struct ccb_getdev *)arg;
1062 uint32_t t_id = cgd->ccb_h.target_id;
1064 if (t_id <= (PQI_CTLR_INDEX - 1)) {
1065 if (softs != NULL) {
1066 pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun];
1067 smartpqi_adjust_queue_depth(path,
1081 * Function to register sim with CAM layer for smartpqi driver
1083 int register_sim(struct pqisrc_softstate *softs, int card_index)
1086 int max_transactions;
1087 union ccb *ccb = NULL;
1088 cam_status status = 0;
1089 struct ccb_setasync csa;
1090 struct cam_sim *sim;
1094 max_transactions = softs->max_io_for_scsi_ml;
1095 softs->os_specific.devq = cam_simq_alloc(max_transactions);
1096 if (softs->os_specific.devq == NULL) {
1097 DBG_ERR("cam_simq_alloc failed txns = %d\n",
1099 return PQI_STATUS_FAILURE;
1102 sim = cam_sim_alloc(smartpqi_cam_action, \
1103 smartpqi_poll, "smartpqi", softs, \
1104 card_index, &softs->os_specific.cam_lock, \
1105 1, max_transactions, softs->os_specific.devq);
1107 DBG_ERR("cam_sim_alloc failed txns = %d\n",
1109 cam_simq_free(softs->os_specific.devq);
1110 return PQI_STATUS_FAILURE;
1113 softs->os_specific.sim = sim;
1114 mtx_lock(&softs->os_specific.cam_lock);
1115 status = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0);
1116 if (status != CAM_SUCCESS) {
1117 DBG_ERR("xpt_bus_register failed status=%d\n", status);
1118 cam_sim_free(softs->os_specific.sim, FALSE);
1119 cam_simq_free(softs->os_specific.devq);
1120 mtx_unlock(&softs->os_specific.cam_lock);
1121 return PQI_STATUS_FAILURE;
1124 softs->os_specific.sim_registered = TRUE;
1125 ccb = xpt_alloc_ccb_nowait();
1127 DBG_ERR("xpt_create_path failed\n");
1128 return PQI_STATUS_FAILURE;
1131 if (xpt_create_path(&ccb->ccb_h.path, NULL,
1132 cam_sim_path(softs->os_specific.sim),
1133 CAM_TARGET_WILDCARD,
1134 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1135 DBG_ERR("xpt_create_path failed\n");
1137 xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
1138 cam_sim_free(softs->os_specific.sim, TRUE);
1139 mtx_unlock(&softs->os_specific.cam_lock);
1140 return PQI_STATUS_FAILURE;
1143 * Callback to set the queue depth per target which is
1144 * derived from the FW.
1146 softs->os_specific.path = ccb->ccb_h.path;
1147 xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
1148 csa.ccb_h.func_code = XPT_SASYNC_CB;
1149 csa.event_enable = AC_FOUND_DEVICE;
1150 csa.callback = smartpqi_async;
1151 csa.callback_arg = softs;
1152 xpt_action((union ccb *)&csa);
1153 if (csa.ccb_h.status != CAM_REQ_CMP) {
1154 DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n",
1158 mtx_unlock(&softs->os_specific.cam_lock);
1164 * Function to deregister smartpqi sim from cam layer
1166 void deregister_sim(struct pqisrc_softstate *softs)
1168 struct ccb_setasync csa;
1172 if (softs->os_specific.mtx_init) {
1173 mtx_lock(&softs->os_specific.cam_lock);
1176 xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
1177 csa.ccb_h.func_code = XPT_SASYNC_CB;
1178 csa.event_enable = 0;
1179 csa.callback = smartpqi_async;
1180 csa.callback_arg = softs;
1181 xpt_action((union ccb *)&csa);
1182 xpt_free_path(softs->os_specific.path);
1184 xpt_release_simq(softs->os_specific.sim, 0);
1186 xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
1187 softs->os_specific.sim_registered = FALSE;
1189 if (softs->os_specific.sim) {
1190 cam_sim_free(softs->os_specific.sim, FALSE);
1191 softs->os_specific.sim = NULL;
1193 if (softs->os_specific.mtx_init) {
1194 mtx_unlock(&softs->os_specific.cam_lock);
1196 if (softs->os_specific.devq != NULL) {
1197 cam_simq_free(softs->os_specific.devq);
1199 if (softs->os_specific.mtx_init) {
1200 mtx_destroy(&softs->os_specific.cam_lock);
1201 softs->os_specific.mtx_init = FALSE;
1204 mtx_destroy(&softs->os_specific.map_lock);