2 * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * CAM interface for smartpqi driver
31 #include "smartpqi_includes.h"
34 * Set cam sim properties of the smartpqi adapter.
37 update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
40 pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
43 device_t dev = softs->os_specific.pqi_dev;
48 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
50 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
52 cpi->max_lun = PQI_MAX_MULTILUN;
53 cpi->max_target = 1088;
54 cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE;
55 cpi->initiator_id = 255;
56 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
57 strlcpy(cpi->hba_vid, "Microsemi", HBA_IDLEN);
58 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
59 cpi->unit_number = cam_sim_unit(sim);
60 cpi->bus_id = cam_sim_bus(sim);
61 cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */
62 cpi->protocol = PROTO_SCSI;
63 cpi->protocol_version = SCSI_REV_SPC4;
64 cpi->transport = XPORT_SPI;
65 cpi->transport_version = 2;
66 cpi->ccb_h.status = CAM_REQ_CMP;
67 cpi->hba_vendor = pci_get_vendor(dev);
68 cpi->hba_device = pci_get_device(dev);
69 cpi->hba_subvendor = pci_get_subvendor(dev);
70 cpi->hba_subdevice = pci_get_subdevice(dev);
77 * Get transport settings of the smartpqi adapter
80 get_transport_settings(struct pqisrc_softstate *softs,
81 struct ccb_trans_settings *cts)
83 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
84 struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas;
85 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
89 cts->protocol = PROTO_SCSI;
90 cts->protocol_version = SCSI_REV_SPC4;
91 cts->transport = XPORT_SPI;
92 cts->transport_version = 2;
93 spi->valid = CTS_SPI_VALID_DISC;
94 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
95 scsi->valid = CTS_SCSI_VALID_TQ;
96 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
97 sas->valid = CTS_SAS_VALID_SPEED;
98 cts->ccb_h.status = CAM_REQ_CMP;
104 * Add the target to CAM layer and rescan, when a new device is found
107 os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
113 if(softs->os_specific.sim_registered) {
114 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) {
115 DBG_ERR("rescan failed (can't allocate CCB)\n");
119 if (xpt_create_path(&ccb->ccb_h.path, NULL,
120 cam_sim_path(softs->os_specific.sim),
121 device->target, device->lun) != CAM_REQ_CMP) {
122 DBG_ERR("rescan failed (can't create path)\n");
133 * Remove the device from CAM layer when deleted or hot removed
136 os_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
138 struct cam_path *tmppath;
142 if(softs->os_specific.sim_registered) {
143 if (xpt_create_path(&tmppath, NULL,
144 cam_sim_path(softs->os_specific.sim),
145 device->target, device->lun) != CAM_REQ_CMP) {
146 DBG_ERR("unable to create path for async event");
149 xpt_async(AC_LOST_DEVICE, tmppath, NULL);
150 xpt_free_path(tmppath);
151 softs->device_list[device->target][device->lun] = NULL;
152 pqisrc_free_device(softs, device);
160 * Function to release the frozen simq
163 pqi_release_camq(rcb_t *rcb)
165 pqisrc_softstate_t *softs;
166 struct ccb_scsiio *csio;
168 csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
173 if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) {
174 softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY;
175 if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
176 xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
178 csio->ccb_h.status |= CAM_RELEASE_SIMQ;
185 pqi_synch_request(rcb_t *rcb)
187 pqisrc_softstate_t *softs = rcb->softs;
189 DBG_IO("IN rcb = %p\n", rcb);
191 if (!(rcb->cm_flags & PQI_CMD_MAPPED))
194 if (rcb->bcount != 0 ) {
195 if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
196 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
198 BUS_DMASYNC_POSTREAD);
199 if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
200 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
202 BUS_DMASYNC_POSTWRITE);
203 bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat,
206 rcb->cm_flags &= ~PQI_CMD_MAPPED;
208 if(rcb->sgt && rcb->nseg)
209 os_mem_free(rcb->softs, (void*)rcb->sgt,
210 rcb->nseg*sizeof(sgt_t));
216 * Function to dma-unmap the completed request
219 pqi_unmap_request(rcb_t *rcb)
221 DBG_IO("IN rcb = %p\n", rcb);
223 pqi_synch_request(rcb);
224 pqisrc_put_tag(&rcb->softs->taglist, rcb->tag);
230 * Construct meaningful LD name for volume here.
233 smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio)
235 struct scsi_inquiry_data *inq = NULL;
237 pqi_scsi_dev_t *device = NULL;
241 if (pqisrc_ctrl_offline(softs))
244 cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ?
245 (uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes;
246 if(cdb[0] == INQUIRY &&
247 (cdb[1] & SI_EVPD) == 0 &&
248 (csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN &&
249 csio->dxfer_len >= SHORT_INQUIRY_LENGTH) {
251 inq = (struct scsi_inquiry_data *)csio->data_ptr;
253 device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun];
255 /* Let the disks be probed and dealt with via CAM. Only for LD
256 let it fall through and inquiry be tweaked */
257 if (!device || !pqisrc_is_logical_device(device) ||
258 (device->devtype != DISK_DEVICE) ||
259 pqisrc_is_external_raid_device(device)) {
263 strncpy(inq->vendor, device->vendor,
265 strncpy(inq->product,
266 pqisrc_raidlevel_to_string(device->raid_level),
268 strncpy(inq->revision, device->volume_offline?"OFF":"OK",
276 pqi_complete_scsi_io(struct ccb_scsiio *csio, rcb_t *rcb)
278 uint32_t release_tag;
279 pqisrc_softstate_t *softs = rcb->softs;
281 DBG_IO("IN scsi io = %p\n", csio);
283 pqi_synch_request(rcb);
284 smartpqi_fix_ld_inquiry(rcb->softs, csio);
285 pqi_release_camq(rcb);
286 release_tag = rcb->tag;
288 pqisrc_put_tag(&softs->taglist, release_tag);
289 xpt_done((union ccb *)csio);
295 * Handle completion of a command - pass results back through the CCB
298 os_io_response_success(rcb_t *rcb)
300 struct ccb_scsiio *csio;
302 DBG_IO("IN rcb = %p\n", rcb);
305 panic("rcb is null");
307 csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
310 panic("csio is null");
312 rcb->status = REQUEST_SUCCESS;
313 csio->ccb_h.status = CAM_REQ_CMP;
315 pqi_complete_scsi_io(csio, rcb);
321 copy_sense_data_to_csio(struct ccb_scsiio *csio,
322 uint8_t *sense_data, uint16_t sense_data_len)
324 DBG_IO("IN csio = %p\n", csio);
326 memset(&csio->sense_data, 0, csio->sense_len);
328 sense_data_len = (sense_data_len > csio->sense_len) ?
329 csio->sense_len : sense_data_len;
332 memcpy(&csio->sense_data, sense_data, sense_data_len);
334 if (csio->sense_len > sense_data_len)
335 csio->sense_resid = csio->sense_len - sense_data_len;
337 csio->sense_resid = 0;
343 * Error response handling for raid IO
346 os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
348 struct ccb_scsiio *csio;
349 pqisrc_softstate_t *softs;
353 csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
356 panic("csio is null");
360 csio->ccb_h.status = CAM_REQ_CMP_ERR;
362 if (!err_info || !rcb->dvp) {
363 DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n",
368 csio->scsi_status = err_info->status;
370 if (csio->ccb_h.func_code == XPT_SCSI_IO) {
372 * Handle specific SCSI status values.
374 switch(csio->scsi_status) {
375 case PQI_RAID_STATUS_QUEUE_FULL:
376 csio->ccb_h.status = CAM_REQ_CMP;
377 DBG_ERR("Queue Full error\n");
379 /* check condition, sense data included */
380 case PQI_RAID_STATUS_CHECK_CONDITION:
382 uint16_t sense_data_len =
383 LE_16(err_info->sense_data_len);
384 uint8_t *sense_data = NULL;
386 sense_data = err_info->data;
387 copy_sense_data_to_csio(csio, sense_data, sense_data_len);
388 csio->ccb_h.status = CAM_SCSI_STATUS_ERROR
395 case PQI_RAID_DATA_IN_OUT_UNDERFLOW:
398 resid = rcb->bcount-err_info->data_out_transferred;
400 csio->ccb_h.status = CAM_REQ_CMP;
404 csio->ccb_h.status = CAM_REQ_CMP;
410 pqi_complete_scsi_io(csio, rcb);
416 * Error response handling for aio.
419 os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
421 struct ccb_scsiio *csio;
422 pqisrc_softstate_t *softs;
427 panic("rcb is null");
429 rcb->status = REQUEST_SUCCESS;
430 csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
432 panic("csio is null");
436 if (!err_info || !rcb->dvp) {
437 csio->ccb_h.status = CAM_REQ_CMP_ERR;
438 DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n",
443 switch (err_info->service_resp) {
444 case PQI_AIO_SERV_RESPONSE_COMPLETE:
445 csio->ccb_h.status = err_info->status;
447 case PQI_AIO_SERV_RESPONSE_FAILURE:
448 switch(err_info->status) {
449 case PQI_AIO_STATUS_IO_ABORTED:
450 csio->ccb_h.status = CAM_REQ_ABORTED;
451 DBG_WARN_BTL(rcb->dvp, "IO aborted\n");
453 case PQI_AIO_STATUS_UNDERRUN:
454 csio->ccb_h.status = CAM_REQ_CMP;
456 LE_32(err_info->resd_count);
458 case PQI_AIO_STATUS_OVERRUN:
459 csio->ccb_h.status = CAM_REQ_CMP;
461 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
462 DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n");
463 /* Timed out TMF response comes here */
465 rcb->req_pending = false;
466 rcb->status = REQUEST_SUCCESS;
467 DBG_ERR("AIO Disabled for TMF\n");
470 rcb->dvp->aio_enabled = false;
471 rcb->dvp->offload_enabled = false;
472 csio->ccb_h.status |= CAM_REQUEUE_REQ;
474 case PQI_AIO_STATUS_IO_ERROR:
475 case PQI_AIO_STATUS_IO_NO_DEVICE:
476 case PQI_AIO_STATUS_INVALID_DEVICE:
478 DBG_WARN_BTL(rcb->dvp,"IO Error/Invalid/No device\n");
479 csio->ccb_h.status |=
480 CAM_SCSI_STATUS_ERROR;
484 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
485 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
486 DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n",
487 (err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_COMPLETE) ? "COMPLETE" : "SUCCEEDED");
488 rcb->status = REQUEST_SUCCESS;
489 rcb->req_pending = false;
491 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
492 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
493 DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n",
494 (err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_REJECTED) ? "REJECTED" : "INCORRECT LUN");
495 rcb->status = REQUEST_FAILED;
496 rcb->req_pending = false;
499 DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n");
500 csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
504 if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) {
505 csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION;
506 uint8_t *sense_data = NULL;
507 unsigned sense_data_len = LE_16(err_info->data_len);
509 sense_data = err_info->data;
510 DBG_ERR_BTL(rcb->dvp, "SCSI_STATUS_CHECK_COND sense size %u\n",
512 copy_sense_data_to_csio(csio, sense_data, sense_data_len);
513 csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
517 pqi_complete_scsi_io(csio, rcb);
522 pqi_freeze_ccb(union ccb *ccb)
524 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
525 ccb->ccb_h.status |= CAM_DEV_QFRZN;
526 xpt_freeze_devq(ccb->ccb_h.path, 1);
531 * Command-mapping helper function - populate this command's s/g table.
534 pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
536 rcb_t *rcb = (rcb_t *)arg;
537 pqisrc_softstate_t *softs = rcb->softs;
540 if (error || nseg > softs->pqi_cap.max_sg_elem) {
541 DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n",
542 error, nseg, softs->pqi_cap.max_sg_elem);
546 rcb->sgt = os_mem_alloc(softs, nseg * sizeof(sgt_t));
549 DBG_ERR_BTL(rcb->dvp, "os_mem_alloc() failed; nseg = %d\n", nseg);
554 for (int i = 0; i < nseg; i++) {
555 rcb->sgt[i].addr = segs[i].ds_addr;
556 rcb->sgt[i].len = segs[i].ds_len;
557 rcb->sgt[i].flags = 0;
560 if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
561 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
562 rcb->cm_datamap, BUS_DMASYNC_PREREAD);
563 if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
564 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
565 rcb->cm_datamap, BUS_DMASYNC_PREWRITE);
567 /* Call IO functions depending on pd or ld */
568 rcb->status = REQUEST_PENDING;
570 error = pqisrc_build_send_io(softs, rcb);
573 rcb->req_pending = false;
574 DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error);
576 /* Successfully IO was submitted to the device. */
582 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
584 pqi_unmap_request(rcb);
590 * Function to dma-map the request buffer
593 pqi_map_request(rcb_t *rcb)
595 pqisrc_softstate_t *softs = rcb->softs;
596 int bsd_status = BSD_SUCCESS;
597 union ccb *ccb = rcb->cm_ccb;
601 /* check that mapping is necessary */
602 if (rcb->cm_flags & PQI_CMD_MAPPED)
605 rcb->cm_flags |= PQI_CMD_MAPPED;
608 bsd_status = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat,
609 rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0);
610 if (bsd_status != BSD_SUCCESS && bsd_status != EINPROGRESS) {
611 DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed, return status = %d transfer length = %d\n",
612 bsd_status, rcb->bcount);
617 * Set up the command to go to the controller. If there are no
618 * data buffers associated with the command then it can bypass
621 /* Call IO functions depending on pd or ld */
622 rcb->status = REQUEST_PENDING;
624 if (pqisrc_build_send_io(softs, rcb) != PQI_STATUS_SUCCESS) {
629 DBG_FUNC("OUT error = %d\n", bsd_status);
635 * Function to clear the request control block
638 os_reset_rcb(rcb_t *rcb)
640 rcb->error_info = NULL;
643 rcb->tag = INVALID_ELEM;
653 rcb->encrypt_enable = false;
654 rcb->ioaccel_handle = 0;
656 rcb->req_pending = false;
661 * Callback function for the lun rescan
664 smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb)
666 xpt_free_path(ccb->ccb_h.path);
672 * Function to rescan the lun
675 smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
678 union ccb *ccb = NULL;
679 cam_status status = 0;
680 struct cam_path *path = NULL;
684 ccb = xpt_alloc_ccb_nowait();
686 DBG_ERR("Unable to alloc ccb for lun rescan\n");
690 status = xpt_create_path(&path, NULL,
691 cam_sim_path(softs->os_specific.sim), target, lun);
692 if (status != CAM_REQ_CMP) {
693 DBG_ERR("xpt_create_path status(%d) != CAM_REQ_CMP \n",
699 bzero(ccb, sizeof(union ccb));
700 xpt_setup_ccb(&ccb->ccb_h, path, 5);
701 ccb->ccb_h.func_code = XPT_SCAN_LUN;
702 ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb;
703 ccb->crcn.flags = CAM_FLAG_NONE;
711 * Function to rescan the lun under each target
714 smartpqi_target_rescan(struct pqisrc_softstate *softs)
716 int target = 0, lun = 0;
720 for(target = 0; target < PQI_MAX_DEVICES; target++){
721 for(lun = 0; lun < PQI_MAX_MULTILUN; lun++){
722 if(softs->device_list[target][lun]){
723 smartpqi_lun_rescan(softs, target, lun);
732 * Set the mode of tagged command queueing for the current task.
735 os_get_task_attr(rcb_t *rcb)
737 union ccb *ccb = rcb->cm_ccb;
738 uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
740 switch(ccb->csio.tag_action) {
741 case MSG_HEAD_OF_Q_TAG:
742 tag_action = SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE;
744 case MSG_ORDERED_Q_TAG:
745 tag_action = SOP_TASK_ATTRIBUTE_ORDERED;
747 case MSG_SIMPLE_Q_TAG:
749 tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
756 * Complete all outstanding commands
759 os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
762 pqi_scsi_dev_t *dvp = NULL;
766 for (tag = 1; tag <= softs->max_outstanding_io; tag++) {
767 rcb_t *prcb = &softs->rcb[tag];
769 if(prcb->req_pending && prcb->cm_ccb ) {
770 prcb->req_pending = false;
771 prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP;
772 pqi_complete_scsi_io(&prcb->cm_ccb->csio, prcb);
774 pqisrc_decrement_device_active_io(softs, dvp);
783 * IO handling functionality entry point
786 pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
789 uint32_t tag, no_transfer = 0;
790 pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
797 if (softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL) {
798 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
799 DBG_INFO("Device = %d not there\n", ccb->ccb_h.target_id);
803 dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
804 /* Check controller state */
805 if (IN_PQI_RESET(softs)) {
806 ccb->ccb_h.status = CAM_SCSI_BUS_RESET
807 | CAM_BUSY | CAM_REQ_INPROG;
808 DBG_WARN("Device = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id);
811 /* Check device state */
812 if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) {
813 ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP;
814 DBG_WARN("Device = %d GONE/OFFLINE\n", ccb->ccb_h.target_id);
817 /* Check device reset */
818 if (DEVICE_RESET(dvp)) {
819 ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY;
820 DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id);
824 if (dvp->expose_device == false) {
825 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
826 DBG_INFO("Device = %d not exposed\n", ccb->ccb_h.target_id);
830 tag = pqisrc_get_tag(&softs->taglist);
831 if (tag == INVALID_ELEM) {
832 DBG_ERR("Get Tag failed\n");
833 xpt_freeze_simq(softs->os_specific.sim, 1);
834 softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
835 ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ);
839 DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist);
841 rcb = &softs->rcb[tag];
845 rcb->cmdlen = ccb->csio.cdb_len;
846 ccb->ccb_h.sim_priv.entries[0].ptr = rcb;
848 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
850 rcb->data_dir = SOP_DATA_DIR_FROM_DEVICE;
853 rcb->data_dir = SOP_DATA_DIR_TO_DEVICE;
859 DBG_ERR("Unknown Dir\n");
863 rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
866 rcb->cm_data = (void *)ccb->csio.data_ptr;
867 rcb->bcount = ccb->csio.dxfer_len;
873 * Submit the request to the adapter.
875 * Note that this may fail if we're unable to map the request (and
876 * if we ever learn a transport layer other than simple, may fail
877 * if the adapter rejects the command).
879 if ((error = pqi_map_request(rcb)) != BSD_SUCCESS) {
880 xpt_freeze_simq(softs->os_specific.sim, 1);
881 if (error == EINPROGRESS) {
882 /* Release simq in the completion */
883 softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
886 rcb->req_pending = false;
887 ccb->ccb_h.status |= CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
888 DBG_WARN("Requeue req error = %d target = %d\n", error,
889 ccb->ccb_h.target_id);
890 pqi_unmap_request(rcb);
895 DBG_FUNC("OUT error = %d\n", error);
901 pqi_tmf_status_to_bsd_tmf_status(int pqi_status, rcb_t *rcb)
903 if (PQI_STATUS_SUCCESS == pqi_status &&
904 REQUEST_SUCCESS == rcb->status)
911 * Abort a task, task management functionality
914 pqisrc_scsi_abort_task(pqisrc_softstate_t *softs, union ccb *ccb)
916 struct ccb_hdr *ccb_h = &ccb->ccb_h;
918 rcb_t *prcb = ccb->ccb_h.sim_priv.entries[0].ptr;
924 tag = pqisrc_get_tag(&softs->taglist);
925 rcb = &softs->rcb[tag];
929 DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code);
936 rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, prcb,
937 SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK);
939 if ((rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb)) == BSD_SUCCESS)
940 ccb->ccb_h.status = CAM_REQ_ABORTED;
944 pqisrc_put_tag(&softs->taglist, tag);
946 DBG_FUNC("OUT rval = %d\n", rval);
952 * Abort a taskset, task management functionality
955 pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb)
957 struct ccb_hdr *ccb_h = &ccb->ccb_h;
964 tag = pqisrc_get_tag(&softs->taglist);
965 rcb = &softs->rcb[tag];
969 DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code);
976 rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, NULL,
977 SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET);
979 rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb);
983 pqisrc_put_tag(&softs->taglist, tag);
985 DBG_FUNC("OUT rval = %d\n", rval);
991 * Target reset task management functionality
994 pqisrc_target_reset( pqisrc_softstate_t *softs, union ccb *ccb)
996 struct ccb_hdr *ccb_h = &ccb->ccb_h;
997 pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
1005 DBG_ERR("bad target %d, tmf type : 0x%x\n", ccb_h->target_id, ccb_h->func_code);
1009 tag = pqisrc_get_tag(&softs->taglist);
1010 rcb = &softs->rcb[tag];
1013 devp->reset_in_progress = true;
1017 rval = pqisrc_send_tmf(softs, devp, rcb, NULL,
1018 SOP_TASK_MANAGEMENT_LUN_RESET);
1020 rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb);
1021 devp->reset_in_progress = false;
1024 pqisrc_put_tag(&softs->taglist, tag);
1026 DBG_FUNC("OUT rval = %d\n", rval);
1033 * cam entry point of the smartpqi module.
1036 smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb)
1038 struct pqisrc_softstate *softs = cam_sim_softc(sim);
1039 struct ccb_hdr *ccb_h = &ccb->ccb_h;
1043 switch (ccb_h->func_code) {
1046 if(!pqisrc_io_start(sim, ccb)) {
1051 case XPT_CALC_GEOMETRY:
1053 struct ccb_calc_geometry *ccg;
1055 if (ccg->block_size == 0) {
1056 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1057 ccb->ccb_h.status |= CAM_REQ_INVALID;
1060 cam_calc_geometry(ccg, /* extended */ 1);
1061 ccb->ccb_h.status = CAM_REQ_CMP;
1066 update_sim_properties(sim, &ccb->cpi);
1067 ccb->ccb_h.status = CAM_REQ_CMP;
1070 case XPT_GET_TRAN_SETTINGS:
1071 get_transport_settings(softs, &ccb->cts);
1072 ccb->ccb_h.status = CAM_REQ_CMP;
1075 if(pqisrc_scsi_abort_task(softs, ccb)) {
1076 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1078 DBG_ERR("Abort task failed on %d\n",
1079 ccb->ccb_h.target_id);
1084 if (pqisrc_scsi_abort_task_set(softs, ccb)) {
1085 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1086 DBG_ERR("Abort task set failed on %d\n",
1087 ccb->ccb_h.target_id);
1093 if(pqisrc_target_reset(softs, ccb)) {
1094 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1095 DBG_ERR("Target reset failed on %d\n",
1096 ccb->ccb_h.target_id);
1100 ccb->ccb_h.status = CAM_REQ_CMP;
1104 ccb->ccb_h.status = CAM_REQ_CMP;
1106 case XPT_SET_TRAN_SETTINGS:
1107 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1110 DBG_WARN("UNSUPPORTED FUNC CODE\n");
1111 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1120 * Function to poll the response, when interrupts are unavailable
1121 * This also serves supporting crash dump.
1124 smartpqi_poll(struct cam_sim *sim)
1126 struct pqisrc_softstate *softs = cam_sim_softc(sim);
1129 for (i = 1; i < softs->intr_count; i++ )
1130 pqisrc_process_response_queue(softs, i);
1134 * Function to adjust the queue depth of a device
1137 smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth)
1139 struct ccb_relsim crs;
1143 memset(&crs, 0, sizeof(crs));
1144 xpt_setup_ccb(&crs.ccb_h, path, 5);
1145 crs.ccb_h.func_code = XPT_REL_SIMQ;
1146 crs.ccb_h.flags = CAM_DEV_QFREEZE;
1147 crs.release_flags = RELSIM_ADJUST_OPENINGS;
1148 crs.openings = queue_depth;
1149 xpt_action((union ccb *)&crs);
1150 if(crs.ccb_h.status != CAM_REQ_CMP) {
1151 printf("XPT_REL_SIMQ failed stat=%d\n", crs.ccb_h.status);
1158 * Function to register async callback for setting queue depth
1161 smartpqi_async(void *callback_arg, u_int32_t code,
1162 struct cam_path *path, void *arg)
1164 struct pqisrc_softstate *softs;
1165 softs = (struct pqisrc_softstate*)callback_arg;
1170 case AC_FOUND_DEVICE:
1172 struct ccb_getdev *cgd;
1173 cgd = (struct ccb_getdev *)arg;
1177 uint32_t t_id = cgd->ccb_h.target_id;
1179 if (t_id <= (PQI_CTLR_INDEX - 1)) {
1180 if (softs != NULL) {
1181 pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun];
1183 DBG_ERR("Target is null, target id=%d\n", t_id);
1186 smartpqi_adjust_queue_depth(path,
1200 * Function to register sim with CAM layer for smartpqi driver
1203 register_sim(struct pqisrc_softstate *softs, int card_index)
1205 int max_transactions;
1206 union ccb *ccb = NULL;
1208 struct ccb_setasync csa;
1209 struct cam_sim *sim;
1213 max_transactions = softs->max_io_for_scsi_ml;
1214 softs->os_specific.devq = cam_simq_alloc(max_transactions);
1215 if (softs->os_specific.devq == NULL) {
1216 DBG_ERR("cam_simq_alloc failed txns = %d\n",
1221 sim = cam_sim_alloc(smartpqi_cam_action, \
1222 smartpqi_poll, "smartpqi", softs, \
1223 card_index, &softs->os_specific.cam_lock, \
1224 1, max_transactions, softs->os_specific.devq);
1226 DBG_ERR("cam_sim_alloc failed txns = %d\n",
1228 cam_simq_free(softs->os_specific.devq);
1232 softs->os_specific.sim = sim;
1233 mtx_lock(&softs->os_specific.cam_lock);
1234 error = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0);
1235 if (error != CAM_SUCCESS) {
1236 DBG_ERR("xpt_bus_register failed errno %d\n", error);
1237 cam_sim_free(softs->os_specific.sim, FALSE);
1238 cam_simq_free(softs->os_specific.devq);
1239 mtx_unlock(&softs->os_specific.cam_lock);
1243 softs->os_specific.sim_registered = TRUE;
1244 ccb = xpt_alloc_ccb_nowait();
1246 DBG_ERR("xpt_create_path failed\n");
1250 if (xpt_create_path(&ccb->ccb_h.path, NULL,
1251 cam_sim_path(softs->os_specific.sim),
1252 CAM_TARGET_WILDCARD,
1253 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1254 DBG_ERR("xpt_create_path failed\n");
1256 xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
1257 cam_sim_free(softs->os_specific.sim, TRUE);
1258 mtx_unlock(&softs->os_specific.cam_lock);
1262 * Callback to set the queue depth per target which is
1263 * derived from the FW.
1265 softs->os_specific.path = ccb->ccb_h.path;
1266 memset(&csa, 0, sizeof(csa));
1267 xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
1268 csa.ccb_h.func_code = XPT_SASYNC_CB;
1269 csa.event_enable = AC_FOUND_DEVICE;
1270 csa.callback = smartpqi_async;
1271 csa.callback_arg = softs;
1272 xpt_action((union ccb *)&csa);
1273 if (csa.ccb_h.status != CAM_REQ_CMP) {
1274 DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n",
1278 mtx_unlock(&softs->os_specific.cam_lock);
1285 * Function to deregister smartpqi sim from cam layer
1288 deregister_sim(struct pqisrc_softstate *softs)
1290 struct ccb_setasync csa;
1294 if (softs->os_specific.mtx_init) {
1295 mtx_lock(&softs->os_specific.cam_lock);
1299 memset(&csa, 0, sizeof(csa));
1300 xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
1301 csa.ccb_h.func_code = XPT_SASYNC_CB;
1302 csa.event_enable = 0;
1303 csa.callback = smartpqi_async;
1304 csa.callback_arg = softs;
1305 xpt_action((union ccb *)&csa);
1306 xpt_free_path(softs->os_specific.path);
1308 if (softs->os_specific.sim) {
1309 xpt_release_simq(softs->os_specific.sim, 0);
1310 xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
1311 softs->os_specific.sim_registered = FALSE;
1312 cam_sim_free(softs->os_specific.sim, FALSE);
1313 softs->os_specific.sim = NULL;
1316 if (softs->os_specific.mtx_init) {
1317 mtx_unlock(&softs->os_specific.cam_lock);
1319 if (softs->os_specific.devq != NULL) {
1320 cam_simq_free(softs->os_specific.devq);
1322 if (softs->os_specific.mtx_init) {
1323 mtx_destroy(&softs->os_specific.cam_lock);
1324 softs->os_specific.mtx_init = FALSE;
1327 mtx_destroy(&softs->os_specific.map_lock);
1333 os_rescan_target(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
1335 struct cam_path *tmppath;
1339 if(softs->os_specific.sim_registered) {
1340 if (xpt_create_path(&tmppath, NULL,
1341 cam_sim_path(softs->os_specific.sim),
1342 device->target, device->lun) != CAM_REQ_CMP) {
1343 DBG_ERR("unable to create path for async event!!! Bus: %d Target: %d Lun: %d\n",
1344 device->bus, device->target, device->lun);
1347 xpt_async(AC_INQ_CHANGED, tmppath, NULL);
1348 xpt_free_path(tmppath);
1351 device->scsi_rescan = false;