]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/smartpqi/smartpqi_cam.c
Merge llvm, clang, compiler-rt, libc++, libunwind, lld, lldb and openmp
[FreeBSD/FreeBSD.git] / sys / dev / smartpqi / smartpqi_cam.c
1 /*-
2  * Copyright (c) 2018 Microsemi Corporation.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 /* $FreeBSD$ */
28 /*
29  * CAM interface for smartpqi driver
30  */
31
32 #include "smartpqi_includes.h"
33
34 /*
35  * Set cam sim properties of the smartpqi adapter.
36  */
37 static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
38 {
39
40         pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
41                                         cam_sim_softc(sim);
42         DBG_FUNC("IN\n");
43
44         cpi->version_num = 1;
45         cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
46         cpi->target_sprt = 0;
47         cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
48         cpi->hba_eng_cnt = 0;
49         cpi->max_lun = PQI_MAX_MULTILUN;
50         cpi->max_target = 1088;
51         cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE;
52         cpi->initiator_id = 255;
53         strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
54         strncpy(cpi->hba_vid, "Microsemi", HBA_IDLEN);
55         strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
56         cpi->unit_number = cam_sim_unit(sim);
57         cpi->bus_id = cam_sim_bus(sim);
58         cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */
59         cpi->protocol = PROTO_SCSI;
60         cpi->protocol_version = SCSI_REV_SPC4;
61         cpi->transport = XPORT_SPI;
62         cpi->transport_version = 2;
63         cpi->ccb_h.status = CAM_REQ_CMP;
64
65         DBG_FUNC("OUT\n");
66 }
67
68 /*
69  * Get transport settings of the smartpqi adapter 
70  */
71 static void get_transport_settings(struct pqisrc_softstate *softs,
72                 struct ccb_trans_settings *cts)
73 {
74         struct ccb_trans_settings_scsi  *scsi = &cts->proto_specific.scsi;
75         struct ccb_trans_settings_sas   *sas = &cts->xport_specific.sas;
76         struct ccb_trans_settings_spi   *spi = &cts->xport_specific.spi;
77
78         DBG_FUNC("IN\n");
79         
80         cts->protocol = PROTO_SCSI;
81         cts->protocol_version = SCSI_REV_SPC4;
82         cts->transport = XPORT_SPI;
83         cts->transport_version = 2;
84         spi->valid = CTS_SPI_VALID_DISC;
85         spi->flags = CTS_SPI_FLAGS_DISC_ENB;
86         scsi->valid = CTS_SCSI_VALID_TQ;
87         scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
88         sas->valid = CTS_SAS_VALID_SPEED;
89         cts->ccb_h.status = CAM_REQ_CMP;
90
91         DBG_FUNC("OUT\n");
92 }
93
94 /*
95  *  Add the target to CAM layer and rescan, when a new device is found
96  */
97 void os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) {
98         union ccb                       *ccb;
99
100         DBG_FUNC("IN\n");
101
102         if(softs->os_specific.sim_registered) { 
103                 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) {
104                         DBG_ERR("rescan failed (can't allocate CCB)\n");
105                         return;
106                 }
107
108                 if (xpt_create_path(&ccb->ccb_h.path, NULL,
109                         cam_sim_path(softs->os_specific.sim),
110                         device->target, device->lun) != CAM_REQ_CMP) {
111                         DBG_ERR("rescan failed (can't create path)\n");
112                         xpt_free_ccb(ccb);
113                         return;
114                 }
115                 xpt_rescan(ccb);
116         }
117
118         DBG_FUNC("OUT\n");
119 }
120
121 /*
122  * Remove the device from CAM layer when deleted or hot removed
123  */
124 void os_remove_device(pqisrc_softstate_t *softs,
125         pqi_scsi_dev_t *device) {
126         struct cam_path *tmppath;
127
128         DBG_FUNC("IN\n");
129         
130         if(softs->os_specific.sim_registered) {
131                 if (xpt_create_path(&tmppath, NULL, 
132                         cam_sim_path(softs->os_specific.sim),
133                         device->target, device->lun) != CAM_REQ_CMP) {
134                         DBG_ERR("unable to create path for async event");
135                         return;
136                 }
137                 xpt_async(AC_LOST_DEVICE, tmppath, NULL);
138                 xpt_free_path(tmppath);
139                 pqisrc_free_device(softs, device);
140         }
141
142         DBG_FUNC("OUT\n");
143
144 }
145
146 /*
147  * Function to release the frozen simq
148  */
149 static void pqi_release_camq( rcb_t *rcb )
150 {
151         pqisrc_softstate_t *softs;
152         struct ccb_scsiio *csio;
153
154         csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
155         softs = rcb->softs;
156
157         DBG_FUNC("IN\n");
158
159         if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) {
160                 softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY;
161                 if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
162                         xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
163                 else
164                         csio->ccb_h.status |= CAM_RELEASE_SIMQ;
165         }
166
167         DBG_FUNC("OUT\n");
168 }
169
170 /*
171  * Function to dma-unmap the completed request
172  */
173 static void pqi_unmap_request(void *arg)
174 {
175         pqisrc_softstate_t *softs;
176         rcb_t *rcb;
177
178         DBG_IO("IN rcb = %p\n", arg);
179
180         rcb = (rcb_t *)arg;
181         softs = rcb->softs;
182
183         if (!(rcb->cm_flags & PQI_CMD_MAPPED))
184                 return;
185
186         if (rcb->bcount != 0 ) {
187                 if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
188                         bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
189                                         rcb->cm_datamap,
190                                         BUS_DMASYNC_POSTREAD);
191                 if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
192                         bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
193                                         rcb->cm_datamap,
194                                         BUS_DMASYNC_POSTWRITE);
195                 bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat,
196                                         rcb->cm_datamap);
197         }
198         rcb->cm_flags &= ~PQI_CMD_MAPPED;
199
200         if(rcb->sgt && rcb->nseg)
201                 os_mem_free(rcb->softs, (void*)rcb->sgt,
202                         rcb->nseg*sizeof(sgt_t));
203
204         pqisrc_put_tag(&softs->taglist, rcb->tag);
205
206         DBG_IO("OUT\n");
207 }
208
209 /*
210  * Construct meaningful LD name for volume here.
211  */
212 static void
213 smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio)
214 {
215         struct scsi_inquiry_data *inq = NULL;
216         uint8_t *cdb = NULL;
217         pqi_scsi_dev_t *device = NULL;
218
219         DBG_FUNC("IN\n");
220
221         cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ?
222                 (uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes;
223         if(cdb[0] == INQUIRY && 
224                 (cdb[1] & SI_EVPD) == 0 &&
225                 (csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN &&
226                 csio->dxfer_len >= SHORT_INQUIRY_LENGTH) {
227
228                 inq = (struct scsi_inquiry_data *)csio->data_ptr;
229
230                 device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun];
231
232                 /* Let the disks be probed and dealt with via CAM. Only for LD
233                   let it fall through and inquiry be tweaked */
234                 if( !device ||  !pqisrc_is_logical_device(device) ||
235                                 (device->devtype != DISK_DEVICE)  || 
236                                 pqisrc_is_external_raid_device(device)) {
237                         return;
238                 }
239
240                 strncpy(inq->vendor, "MSCC",
241                         SID_VENDOR_SIZE);
242                 strncpy(inq->product, 
243                         pqisrc_raidlevel_to_string(device->raid_level),
244                         SID_PRODUCT_SIZE);
245                 strncpy(inq->revision, device->volume_offline?"OFF":"OK",
246                         SID_REVISION_SIZE);
247         }
248
249         DBG_FUNC("OUT\n");
250 }
251
252 /*
253  * Handle completion of a command - pass results back through the CCB
254  */
255 void
256 os_io_response_success(rcb_t *rcb)
257 {
258         struct ccb_scsiio               *csio;
259
260         DBG_IO("IN rcb = %p\n", rcb);
261
262         if (rcb == NULL) 
263                 panic("rcb is null");
264
265         csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
266         
267         if (csio == NULL) 
268                 panic("csio is null");
269
270         rcb->status = REQUEST_SUCCESS;
271         csio->ccb_h.status = CAM_REQ_CMP;
272
273         smartpqi_fix_ld_inquiry(rcb->softs, csio);
274         pqi_release_camq(rcb);
275         pqi_unmap_request(rcb);
276         xpt_done((union ccb *)csio);
277
278         DBG_IO("OUT\n");
279 }
280
281 /*
282  * Error response handling for raid IO
283  */
284 void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
285 {
286         struct ccb_scsiio *csio;
287         pqisrc_softstate_t *softs;
288
289         DBG_IO("IN\n");
290
291         csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
292
293         if (csio == NULL)
294                 panic("csio is null");
295
296         softs = rcb->softs;
297
298         ASSERT(err_info != NULL);
299         csio->scsi_status = err_info->status;
300         csio->ccb_h.status = CAM_REQ_CMP_ERR;
301
302         if (csio->ccb_h.func_code == XPT_SCSI_IO) {
303                 /*
304                  * Handle specific SCSI status values.
305                  */
306                 switch(csio->scsi_status) {
307                         case PQI_RAID_STATUS_QUEUE_FULL:
308                                 csio->ccb_h.status = CAM_REQ_CMP;
309                                 DBG_ERR("Queue Full error");
310                                 break;
311                                 /* check condition, sense data included */
312                         case PQI_RAID_STATUS_CHECK_CONDITION:
313                                 {
314                                 uint16_t sense_data_len = 
315                                         LE_16(err_info->sense_data_len);
316                                 uint8_t *sense_data = NULL;
317                                 if (sense_data_len)
318                                         sense_data = err_info->data;
319                                 memset(&csio->sense_data, 0, csio->sense_len);
320                                 sense_data_len = (sense_data_len >
321                                                 csio->sense_len) ?
322                                                 csio->sense_len :
323                                                 sense_data_len;
324                                 if (sense_data)
325                                         memcpy(&csio->sense_data, sense_data,
326                                                 sense_data_len);
327                                 if (csio->sense_len > sense_data_len)
328                                         csio->sense_resid = csio->sense_len
329                                                         - sense_data_len;
330                                         else
331                                                 csio->sense_resid = 0;
332                                 csio->ccb_h.status = CAM_SCSI_STATUS_ERROR
333                                                         | CAM_AUTOSNS_VALID
334                                                         | CAM_REQ_CMP_ERR;
335
336                                 }
337                                 break;
338
339                         case PQI_RAID_DATA_IN_OUT_UNDERFLOW:
340                                 {
341                                 uint32_t resid = 0;
342                                 resid = rcb->bcount-err_info->data_out_transferred;
343                                 csio->resid  = resid;
344                                 csio->ccb_h.status = CAM_REQ_CMP;
345                                 break;
346                                 }
347                         default:
348                                 csio->ccb_h.status = CAM_REQ_CMP;
349                                 break;
350                 }
351         }
352
353         if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) {
354                 softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY;
355                 if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
356                         xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
357                 else
358                         csio->ccb_h.status |= CAM_RELEASE_SIMQ;
359         }
360
361         pqi_unmap_request(rcb);
362         xpt_done((union ccb *)csio);
363
364         DBG_IO("OUT\n");
365 }
366
367
368 /*
369  * Error response handling for aio.
370  */
371 void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
372 {
373         struct ccb_scsiio *csio;
374         pqisrc_softstate_t *softs;
375
376         DBG_IO("IN\n");
377
378         if (rcb == NULL)
379                 panic("rcb is null");
380
381         rcb->status = REQUEST_SUCCESS;
382         csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
383         if (csio == NULL)
384                 panic("csio is null");
385
386         softs = rcb->softs;
387
388         switch (err_info->service_resp) {
389                 case PQI_AIO_SERV_RESPONSE_COMPLETE:
390                         csio->ccb_h.status = err_info->status;
391                         break;
392                 case PQI_AIO_SERV_RESPONSE_FAILURE:
393                         switch(err_info->status) {
394                                 case PQI_AIO_STATUS_IO_ABORTED:
395                                         csio->ccb_h.status = CAM_REQ_ABORTED;
396                                         DBG_WARN_BTL(rcb->dvp, "IO aborted\n");
397                                         break;
398                                 case PQI_AIO_STATUS_UNDERRUN:
399                                         csio->ccb_h.status = CAM_REQ_CMP;
400                                         csio->resid =
401                                                 LE_32(err_info->resd_count);
402                                         break;
403                                 case PQI_AIO_STATUS_OVERRUN:
404                                         csio->ccb_h.status = CAM_REQ_CMP;
405                                         break;
406                                 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
407                                         DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n");
408                                         rcb->dvp->offload_enabled = false;
409                                         csio->ccb_h.status |= CAM_REQUEUE_REQ;
410                                         break;
411                                 case PQI_AIO_STATUS_IO_ERROR:
412                                 case PQI_AIO_STATUS_IO_NO_DEVICE:
413                                 case PQI_AIO_STATUS_INVALID_DEVICE:
414                                 default:
415                                         DBG_WARN_BTL(rcb->dvp,"IO Error/Invalid/No device\n");
416                                         csio->ccb_h.status |=
417                                                 CAM_SCSI_STATUS_ERROR;
418                                         break;
419                         }
420                         break;
421                 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
422                 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
423                         csio->ccb_h.status = CAM_REQ_CMP;
424                         break;
425                 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
426                 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
427                         DBG_WARN_BTL(rcb->dvp,"TMF rejected/Incorrect Lun\n");
428                         csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
429                         break;
430                 default:
431                         DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n");
432                         csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
433                         break;
434         }
435         if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) {
436                 csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION;
437                 uint8_t *sense_data = NULL;
438                 unsigned sense_data_len = LE_16(err_info->data_len);
439                 if (sense_data_len)
440                         sense_data = err_info->data;
441                 DBG_ERR_BTL(rcb->dvp, "SCSI_STATUS_CHECK_COND  sense size %u\n",
442                         sense_data_len);
443                 memset(&csio->sense_data, 0, csio->sense_len);
444                 if (sense_data)
445                         memcpy(&csio->sense_data, sense_data, ((sense_data_len >
446                                 csio->sense_len) ? csio->sense_len : sense_data_len));
447                 if (csio->sense_len > sense_data_len)
448                         csio->sense_resid = csio->sense_len - sense_data_len;
449                 else
450                         csio->sense_resid = 0;
451                 csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
452         }
453
454         smartpqi_fix_ld_inquiry(softs, csio);
455         pqi_release_camq(rcb);
456         pqi_unmap_request(rcb);
457         xpt_done((union ccb *)csio);
458         DBG_IO("OUT\n");
459 }
460
461 static void
462 pqi_freeze_ccb(union ccb *ccb)
463 {
464         if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
465                 ccb->ccb_h.status |= CAM_DEV_QFRZN;
466                 xpt_freeze_devq(ccb->ccb_h.path, 1);
467         }
468 }
469
470 /*
471  * Command-mapping helper function - populate this command's s/g table.
472  */
473 static void
474 pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
475 {
476         pqisrc_softstate_t *softs;
477         rcb_t *rcb;
478
479         rcb = (rcb_t *)arg;
480         softs = rcb->softs;
481
482         if(  error || nseg > softs->pqi_cap.max_sg_elem )
483         {
484                 rcb->cm_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
485                 pqi_freeze_ccb(rcb->cm_ccb);
486                 DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n", 
487                         error, nseg, softs->pqi_cap.max_sg_elem);
488                 pqi_unmap_request(rcb);
489                 xpt_done((union ccb *)rcb->cm_ccb);
490                 return;
491         }
492
493         rcb->sgt = os_mem_alloc(softs, nseg * sizeof(rcb_t));
494         if (rcb->sgt == NULL) {
495                 rcb->cm_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
496                 pqi_freeze_ccb(rcb->cm_ccb);
497                 DBG_ERR_BTL(rcb->dvp, "os_mem_alloc() failed; nseg = %d\n", nseg);
498                 pqi_unmap_request(rcb);
499                 xpt_done((union ccb *)rcb->cm_ccb);
500                 return;
501         }
502
503         rcb->nseg = nseg;
504         for (int i = 0; i < nseg; i++) {
505                 rcb->sgt[i].addr = segs[i].ds_addr;
506                 rcb->sgt[i].len = segs[i].ds_len;
507                 rcb->sgt[i].flags = 0;
508         }
509
510         if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE)
511                 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
512                         rcb->cm_datamap, BUS_DMASYNC_PREREAD);
513         if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE)
514                 bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat,
515                         rcb->cm_datamap, BUS_DMASYNC_PREWRITE);
516
517         /* Call IO functions depending on pd or ld */
518         rcb->status = REQUEST_PENDING;
519
520         error = pqisrc_build_send_io(softs, rcb);
521
522         if (error) {
523                 rcb->req_pending = false;
524                 rcb->cm_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
525                 pqi_freeze_ccb(rcb->cm_ccb);
526                 DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error);
527                 pqi_unmap_request(rcb);
528                 xpt_done((union ccb *)rcb->cm_ccb);
529                 return;
530         }
531 }
532
533 /*
534  * Function to dma-map the request buffer 
535  */
536 static int pqi_map_request( rcb_t *rcb )
537 {
538         pqisrc_softstate_t *softs = rcb->softs;
539         int error = PQI_STATUS_SUCCESS;
540         union ccb *ccb = rcb->cm_ccb;
541
542         DBG_FUNC("IN\n");
543
544         /* check that mapping is necessary */
545         if (rcb->cm_flags & PQI_CMD_MAPPED)
546                 return(0);
547         rcb->cm_flags |= PQI_CMD_MAPPED;
548
549         if (rcb->bcount) {
550                 error = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat,
551                         rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0);
552                 if (error != 0){
553                         DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed = %d count = %d\n", 
554                                         error, rcb->bcount);
555                         return error;
556                 }
557         } else {
558                 /*
559                  * Set up the command to go to the controller.  If there are no
560                  * data buffers associated with the command then it can bypass
561                  * busdma.
562                  */
563                 /* Call IO functions depending on pd or ld */
564                 rcb->status = REQUEST_PENDING;
565
566                 error = pqisrc_build_send_io(softs, rcb);
567
568         }
569
570         DBG_FUNC("OUT error = %d\n", error);
571
572         return error;
573 }
574
575 /*
576  * Function to clear the request control block
577  */
578 void os_reset_rcb( rcb_t *rcb )
579 {
580         rcb->error_info = NULL;
581         rcb->req = NULL;
582         rcb->status = -1;
583         rcb->tag = INVALID_ELEM;
584         rcb->dvp = NULL;
585         rcb->cdbp = NULL;
586         rcb->softs = NULL;
587         rcb->cm_flags = 0;
588         rcb->cm_data = NULL;
589         rcb->bcount = 0;        
590         rcb->nseg = 0;
591         rcb->sgt = NULL;
592         rcb->cm_ccb = NULL;
593         rcb->encrypt_enable = false;
594         rcb->ioaccel_handle = 0;
595         rcb->resp_qid = 0;
596         rcb->req_pending = false;
597 }
598
599 /*
600  * Callback function for the lun rescan
601  */
602 static void smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb)
603 {
604         xpt_free_path(ccb->ccb_h.path);
605         xpt_free_ccb(ccb);
606 }
607
608
609 /*
610  * Function to rescan the lun
611  */
612 static void smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target, 
613                         int lun)
614 {
615         union ccb   *ccb = NULL;
616         cam_status  status = 0;
617         struct cam_path     *path = NULL;       
618
619         DBG_FUNC("IN\n");
620
621         ccb = xpt_alloc_ccb_nowait();
622         status = xpt_create_path(&path, NULL,
623                                 cam_sim_path(softs->os_specific.sim), target, lun);
624         if (status != CAM_REQ_CMP) {
625                 DBG_ERR("xpt_create_path status(%d) != CAM_REQ_CMP \n",
626                                  status);
627                 xpt_free_ccb(ccb);
628                 return;
629         }
630
631         bzero(ccb, sizeof(union ccb));
632         xpt_setup_ccb(&ccb->ccb_h, path, 5);
633         ccb->ccb_h.func_code = XPT_SCAN_LUN;
634         ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb;
635         ccb->crcn.flags = CAM_FLAG_NONE;
636
637         xpt_action(ccb);
638
639         DBG_FUNC("OUT\n");
640 }
641
642 /*
643  * Function to rescan the lun under each target
644  */
645 void smartpqi_target_rescan(struct pqisrc_softstate *softs)
646 {
647         int target = 0, lun = 0;
648
649         DBG_FUNC("IN\n");
650
651         for(target = 0; target < PQI_MAX_DEVICES; target++){
652                 for(lun = 0; lun < PQI_MAX_MULTILUN; lun++){
653                         if(softs->device_list[target][lun]){
654                                 smartpqi_lun_rescan(softs, target, lun);
655                         }
656                 }
657         }
658
659         DBG_FUNC("OUT\n");
660 }
661
662 /*
663  * Set the mode of tagged command queueing for the current task.
664  */
665 uint8_t os_get_task_attr(rcb_t *rcb) 
666 {
667         union ccb *ccb = rcb->cm_ccb;
668         uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
669
670         switch(ccb->csio.tag_action) {
671         case MSG_HEAD_OF_Q_TAG:
672                 tag_action = SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE;
673                 break;
674         case MSG_ORDERED_Q_TAG:
675                 tag_action = SOP_TASK_ATTRIBUTE_ORDERED;
676                 break;
677         case MSG_SIMPLE_Q_TAG:
678         default:
679                 tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
680                 break;
681         }
682         return tag_action;
683 }
684
685 /*
686  * Complete all outstanding commands
687  */
688 void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
689 {
690         int tag = 0;
691
692         DBG_FUNC("IN\n");
693
694         for (tag = 1; tag < softs->max_outstanding_io; tag++) {
695                 rcb_t *prcb = &softs->rcb[tag];
696                 if(prcb->req_pending && prcb->cm_ccb ) {
697                         prcb->req_pending = false;
698                         prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP;
699                         xpt_done((union ccb *)prcb->cm_ccb);
700                         prcb->cm_ccb = NULL;
701                 }
702         }
703
704         DBG_FUNC("OUT\n");
705 }
706
707 /*
708  * IO handling functionality entry point
709  */
710 static int pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
711 {
712         rcb_t *rcb;
713         uint32_t tag, no_transfer = 0;
714         pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
715                                         cam_sim_softc(sim);
716         int32_t error = PQI_STATUS_FAILURE;
717         pqi_scsi_dev_t *dvp;
718
719         DBG_FUNC("IN\n");
720         
721         if( softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL ) {
722                 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
723                 DBG_INFO("Device  = %d not there\n", ccb->ccb_h.target_id);
724                 return PQI_STATUS_FAILURE;
725         }
726
727         dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
728         /* Check  controller state */
729         if (IN_PQI_RESET(softs)) {
730                 ccb->ccb_h.status = CAM_SCSI_BUS_RESET
731                                         | CAM_BUSY | CAM_REQ_INPROG;
732                 DBG_WARN("Device  = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id);
733                 return error;
734         }
735         /* Check device state */
736         if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) {
737                 ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP;
738                 DBG_WARN("Device  = %d GONE/OFFLINE\n", ccb->ccb_h.target_id);
739                 return error;
740         }
741         /* Check device reset */
742         if (dvp->reset_in_progress) {
743                 ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY;
744                 DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id);
745                 return error;
746         }
747
748         if (dvp->expose_device == false) {
749                 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
750                 DBG_INFO("Device  = %d not exposed\n", ccb->ccb_h.target_id);
751                 return error;
752         }
753
754         tag = pqisrc_get_tag(&softs->taglist);
755         if( tag == INVALID_ELEM ) {
756                 DBG_ERR("Get Tag failed\n");
757                 xpt_freeze_simq(softs->os_specific.sim, 1);
758                 softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
759                 ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ);
760                 return PQI_STATUS_FAILURE;
761         }
762
763         DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist);
764
765         rcb = &softs->rcb[tag];
766         os_reset_rcb( rcb );
767         rcb->tag = tag;
768         rcb->softs = softs;
769         rcb->cmdlen = ccb->csio.cdb_len;
770         ccb->ccb_h.sim_priv.entries[0].ptr = rcb;
771
772         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
773                 case CAM_DIR_IN:
774                         rcb->data_dir = SOP_DATA_DIR_FROM_DEVICE;
775                         break;
776                 case CAM_DIR_OUT:
777                         rcb->data_dir = SOP_DATA_DIR_TO_DEVICE;
778                         break;
779                 case CAM_DIR_NONE:
780                         no_transfer = 1;
781                         break;
782                 default:
783                         DBG_ERR("Unknown Dir\n");
784                         break;
785         }
786         rcb->cm_ccb = ccb;
787         rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
788
789         if (!no_transfer) {
790                 rcb->cm_data = (void *)ccb->csio.data_ptr;
791                 rcb->bcount = ccb->csio.dxfer_len;
792         } else {
793                 rcb->cm_data = NULL;
794                 rcb->bcount = 0;
795         }
796         /*
797          * Submit the request to the adapter.
798          *
799          * Note that this may fail if we're unable to map the request (and
800          * if we ever learn a transport layer other than simple, may fail
801          * if the adapter rejects the command).
802          */
803         if ((error = pqi_map_request(rcb)) != 0) {
804                 rcb->req_pending = false;
805                 xpt_freeze_simq(softs->os_specific.sim, 1);
806                 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
807                 if (error == EINPROGRESS) {
808                         DBG_WARN("In Progress on %d\n", ccb->ccb_h.target_id);
809                         error = 0;
810                 } else {
811                         ccb->ccb_h.status |= CAM_REQUEUE_REQ;
812                         DBG_WARN("Requeue req error = %d target = %d\n", error,
813                                 ccb->ccb_h.target_id);
814                         pqi_unmap_request(rcb);
815                 }
816         }
817
818         DBG_FUNC("OUT error = %d\n", error);
819         return error;
820 }
821
822 /*
823  * Abort a task, task management functionality
824  */
825 static int
826 pqisrc_scsi_abort_task(pqisrc_softstate_t *softs,  union ccb *ccb)
827 {
828         rcb_t *rcb = ccb->ccb_h.sim_priv.entries[0].ptr;
829         uint32_t abort_tag = rcb->tag;
830         uint32_t tag = 0;
831         int rval = PQI_STATUS_SUCCESS;
832         uint16_t qid;
833
834     DBG_FUNC("IN\n");
835
836         qid = (uint16_t)rcb->resp_qid;
837
838         tag = pqisrc_get_tag(&softs->taglist);
839         rcb = &softs->rcb[tag];
840         rcb->tag = tag;
841         rcb->resp_qid = qid;
842
843         rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, abort_tag,
844                 SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK);
845
846         if (PQI_STATUS_SUCCESS == rval) {
847                 rval = rcb->status;
848                 if (REQUEST_SUCCESS == rval) {
849                         ccb->ccb_h.status = CAM_REQ_ABORTED;
850                 }
851         }
852         pqisrc_put_tag(&softs->taglist, abort_tag);
853         pqisrc_put_tag(&softs->taglist,rcb->tag);
854
855         DBG_FUNC("OUT rval = %d\n", rval);
856
857         return rval;
858 }
859
860 /*
861  * Abort a taskset, task management functionality
862  */
863 static int
864 pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb)
865 {
866         rcb_t *rcb = NULL;
867         uint32_t tag = 0;
868         int rval = PQI_STATUS_SUCCESS;
869
870         DBG_FUNC("IN\n");
871
872         tag = pqisrc_get_tag(&softs->taglist);
873         rcb = &softs->rcb[tag];
874         rcb->tag = tag;
875
876         rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, 0,
877                         SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET);
878
879         if (rval == PQI_STATUS_SUCCESS) {
880                 rval = rcb->status;
881         }
882
883         pqisrc_put_tag(&softs->taglist,rcb->tag);
884
885         DBG_FUNC("OUT rval = %d\n", rval);
886
887         return rval;
888 }
889
890 /*
891  * Target reset task management functionality
892  */
893 static int
894 pqisrc_target_reset( pqisrc_softstate_t *softs,  union ccb *ccb)
895 {
896         pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
897         rcb_t *rcb = NULL;
898         uint32_t tag = 0;
899         int rval = PQI_STATUS_SUCCESS;
900
901         DBG_FUNC("IN\n");
902
903         if (devp == NULL) {
904                 DBG_ERR("bad target t%d\n", ccb->ccb_h.target_id);
905                 return (-1);
906         }
907
908         tag = pqisrc_get_tag(&softs->taglist);
909         rcb = &softs->rcb[tag];
910         rcb->tag = tag;
911
912         devp->reset_in_progress = true;
913         rval = pqisrc_send_tmf(softs, devp, rcb, 0,
914                 SOP_TASK_MANAGEMENT_LUN_RESET);
915         if (PQI_STATUS_SUCCESS == rval) {
916                 rval = rcb->status;
917         }
918         devp->reset_in_progress = false;
919         pqisrc_put_tag(&softs->taglist,rcb->tag);
920
921         DBG_FUNC("OUT rval = %d\n", rval);
922
923         return ((rval == REQUEST_SUCCESS) ?
924                 PQI_STATUS_SUCCESS : PQI_STATUS_FAILURE);
925 }
926
927 /*
928  * cam entry point of the smartpqi module.
929  */
930 static void smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb)
931 {
932         struct pqisrc_softstate *softs = cam_sim_softc(sim);
933         struct ccb_hdr  *ccb_h = &ccb->ccb_h;
934
935         DBG_FUNC("IN\n");
936
937         switch (ccb_h->func_code) {
938                 case XPT_SCSI_IO:
939                 {
940                         if(!pqisrc_io_start(sim, ccb)) {
941                                 return;
942                         }
943                         break;
944                 }
945                 case XPT_CALC_GEOMETRY:
946                 {
947                         struct ccb_calc_geometry *ccg;
948                         ccg = &ccb->ccg;
949                         if (ccg->block_size == 0) {
950                                 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
951                                 ccb->ccb_h.status = CAM_REQ_INVALID;
952                                 break;
953                         }
954                         cam_calc_geometry(ccg, /* extended */ 1);
955                         ccb->ccb_h.status = CAM_REQ_CMP;
956                         break;
957                 }
958                 case XPT_PATH_INQ:
959                 {
960                         update_sim_properties(sim, &ccb->cpi);
961                         ccb->ccb_h.status = CAM_REQ_CMP;
962                         break;
963                 }
964                 case XPT_GET_TRAN_SETTINGS:
965                         get_transport_settings(softs, &ccb->cts);
966                         ccb->ccb_h.status = CAM_REQ_CMP;
967                         break;
968                 case XPT_ABORT:
969                         if(pqisrc_scsi_abort_task(softs,  ccb)) {
970                                 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
971                                 xpt_done(ccb);
972                                 DBG_ERR("Abort task failed on %d\n",
973                                         ccb->ccb_h.target_id);
974                                 return;
975                         }
976                         break;
977                 case XPT_TERM_IO:
978                         if (pqisrc_scsi_abort_task_set(softs,  ccb)) {
979                                 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
980                                 DBG_ERR("Abort task set failed on %d\n",
981                                         ccb->ccb_h.target_id);
982                                 xpt_done(ccb);
983                                 return;
984                         }
985                         break;
986                 case XPT_RESET_DEV:
987                         if(pqisrc_target_reset(softs,  ccb)) {
988                                 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
989                                 DBG_ERR("Target reset failed on %d\n",
990                                         ccb->ccb_h.target_id);
991                                 xpt_done(ccb);
992                                 return;
993                         } else {
994                                 ccb->ccb_h.status = CAM_REQ_CMP;
995                         }
996                         break;
997                 case XPT_RESET_BUS:
998                         ccb->ccb_h.status = CAM_REQ_CMP;
999                         break;
1000                 case XPT_SET_TRAN_SETTINGS:
1001                         ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1002                         return;
1003                 default:
1004                         DBG_WARN("UNSUPPORTED FUNC CODE\n");
1005                         ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1006                         break;
1007         }
1008         xpt_done(ccb);
1009
1010         DBG_FUNC("OUT\n");
1011 }
1012
1013 /*
1014  * Function to poll the response, when interrupts are unavailable
1015  * This also serves supporting crash dump.
1016  */
1017 static void smartpqi_poll(struct cam_sim *sim)
1018 {
1019         struct pqisrc_softstate *softs = cam_sim_softc(sim);
1020         int i;
1021
1022         for (i = 1; i < softs->intr_count; i++ )
1023                 pqisrc_process_response_queue(softs, i);
1024 }
1025
1026 /*
1027  * Function to adjust the queue depth of a device
1028  */
1029 void smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth)
1030 {
1031         struct ccb_relsim crs;
1032
1033         DBG_INFO("IN\n");
1034
1035         xpt_setup_ccb(&crs.ccb_h, path, 5);
1036         crs.ccb_h.func_code = XPT_REL_SIMQ;
1037         crs.ccb_h.flags = CAM_DEV_QFREEZE;
1038         crs.release_flags = RELSIM_ADJUST_OPENINGS;
1039         crs.openings = queue_depth;
1040         xpt_action((union ccb *)&crs);
1041         if(crs.ccb_h.status != CAM_REQ_CMP) {
1042                 printf("XPT_REL_SIMQ failed stat=%d\n", crs.ccb_h.status);
1043         }
1044
1045         DBG_INFO("OUT\n");
1046 }
1047
1048 /*
1049  * Function to register async callback for setting queue depth
1050  */
1051 static void
1052 smartpqi_async(void *callback_arg, u_int32_t code,
1053                 struct cam_path *path, void *arg)
1054 {
1055         struct pqisrc_softstate *softs;
1056         softs = (struct pqisrc_softstate*)callback_arg;
1057
1058         DBG_FUNC("IN\n");
1059
1060         switch (code) {
1061                 case AC_FOUND_DEVICE:
1062                 {
1063                         struct ccb_getdev *cgd;
1064                         cgd = (struct ccb_getdev *)arg;
1065                         if (cgd == NULL) {
1066                                 break;
1067                         }
1068                         uint32_t t_id = cgd->ccb_h.target_id;
1069
1070                         if (t_id <= (PQI_CTLR_INDEX - 1)) {
1071                                 if (softs != NULL) {
1072                                         pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun];
1073                                         smartpqi_adjust_queue_depth(path,
1074                                                         dvp->queue_depth);
1075                                 }
1076                         }
1077                         break;
1078                 }
1079                 default:
1080                         break;
1081         }
1082
1083         DBG_FUNC("OUT\n");
1084 }
1085
1086 /*
1087  * Function to register sim with CAM layer for smartpqi driver
1088  */
1089 int register_sim(struct pqisrc_softstate *softs, int card_index)
1090 {
1091         int error = 0;
1092         int max_transactions;
1093         union ccb   *ccb = NULL;
1094         cam_status status = 0;
1095         struct ccb_setasync csa;
1096         struct cam_sim *sim;
1097
1098         DBG_FUNC("IN\n");
1099
1100         max_transactions = softs->max_io_for_scsi_ml;
1101         softs->os_specific.devq = cam_simq_alloc(max_transactions);
1102         if (softs->os_specific.devq == NULL) {
1103                 DBG_ERR("cam_simq_alloc failed txns = %d\n",
1104                         max_transactions);
1105                 return PQI_STATUS_FAILURE;
1106         }
1107
1108         sim = cam_sim_alloc(smartpqi_cam_action, \
1109                                 smartpqi_poll, "smartpqi", softs, \
1110                                 card_index, &softs->os_specific.cam_lock, \
1111                                 1, max_transactions, softs->os_specific.devq);
1112         if (sim == NULL) {
1113                 DBG_ERR("cam_sim_alloc failed txns = %d\n",
1114                         max_transactions);
1115                 cam_simq_free(softs->os_specific.devq);
1116                 return PQI_STATUS_FAILURE;
1117         }
1118
1119         softs->os_specific.sim = sim;
1120         mtx_lock(&softs->os_specific.cam_lock);
1121         status = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0);
1122         if (status != CAM_SUCCESS) {
1123                 DBG_ERR("xpt_bus_register failed status=%d\n", status);
1124                 cam_sim_free(softs->os_specific.sim, FALSE);
1125                 cam_simq_free(softs->os_specific.devq);
1126                 mtx_unlock(&softs->os_specific.cam_lock);
1127                 return PQI_STATUS_FAILURE;
1128         }
1129
1130         softs->os_specific.sim_registered = TRUE;
1131         ccb = xpt_alloc_ccb_nowait();
1132         if (ccb == NULL) {
1133                 DBG_ERR("xpt_create_path failed\n");
1134                 return PQI_STATUS_FAILURE;
1135         }
1136
1137         if (xpt_create_path(&ccb->ccb_h.path, NULL,
1138                         cam_sim_path(softs->os_specific.sim),
1139                         CAM_TARGET_WILDCARD,
1140                         CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1141                 DBG_ERR("xpt_create_path failed\n");
1142                 xpt_free_ccb(ccb);
1143                 xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
1144                 cam_sim_free(softs->os_specific.sim, TRUE);
1145                 mtx_unlock(&softs->os_specific.cam_lock);
1146                 return PQI_STATUS_FAILURE;
1147         }
1148         /*
1149          * Callback to set the queue depth per target which is 
1150          * derived from the FW.
1151          */
1152         softs->os_specific.path = ccb->ccb_h.path;
1153         xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
1154         csa.ccb_h.func_code = XPT_SASYNC_CB;
1155         csa.event_enable = AC_FOUND_DEVICE;
1156         csa.callback = smartpqi_async;
1157         csa.callback_arg = softs;
1158         xpt_action((union ccb *)&csa);
1159         if (csa.ccb_h.status != CAM_REQ_CMP) {
1160                 DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n", 
1161                         csa.ccb_h.status);
1162         }
1163
1164         mtx_unlock(&softs->os_specific.cam_lock);
1165         DBG_INFO("OUT\n");
1166         return error;
1167 }
1168
1169 /*
1170  * Function to deregister smartpqi sim from cam layer
1171  */
1172 void deregister_sim(struct pqisrc_softstate *softs)
1173 {
1174         struct ccb_setasync csa;
1175         
1176         DBG_FUNC("IN\n");
1177
1178         if (softs->os_specific.mtx_init) {
1179                 mtx_lock(&softs->os_specific.cam_lock);
1180         }
1181
1182
1183         xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
1184         csa.ccb_h.func_code = XPT_SASYNC_CB;
1185         csa.event_enable = 0;
1186         csa.callback = smartpqi_async;
1187         csa.callback_arg = softs;
1188         xpt_action((union ccb *)&csa);
1189         xpt_free_path(softs->os_specific.path);
1190
1191         xpt_release_simq(softs->os_specific.sim, 0);
1192
1193         xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
1194         softs->os_specific.sim_registered = FALSE;
1195
1196         if (softs->os_specific.sim) {
1197                 cam_sim_free(softs->os_specific.sim, FALSE);
1198                 softs->os_specific.sim = NULL;
1199         }
1200         if (softs->os_specific.mtx_init) {
1201                 mtx_unlock(&softs->os_specific.cam_lock);
1202         }
1203         if (softs->os_specific.devq != NULL) {
1204                 cam_simq_free(softs->os_specific.devq);
1205         }
1206         if (softs->os_specific.mtx_init) {
1207                 mtx_destroy(&softs->os_specific.cam_lock);
1208                 softs->os_specific.mtx_init = FALSE;
1209         }
1210
1211         mtx_destroy(&softs->os_specific.map_lock);
1212
1213         DBG_FUNC("OUT\n");
1214 }