4 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <dev/isci/isci.h>
37 #include <sys/malloc.h>
39 #include <cam/cam_periph.h>
40 #include <cam/cam_xpt_periph.h>
42 #include <dev/isci/scil/sci_memory_descriptor_list.h>
43 #include <dev/isci/scil/sci_memory_descriptor_list_decorator.h>
45 #include <dev/isci/scil/scif_controller.h>
46 #include <dev/isci/scil/scif_library.h>
47 #include <dev/isci/scil/scif_io_request.h>
48 #include <dev/isci/scil/scif_task_request.h>
49 #include <dev/isci/scil/scif_remote_device.h>
50 #include <dev/isci/scil/scif_domain.h>
51 #include <dev/isci/scil/scif_user_callback.h>
53 void isci_action(struct cam_sim *sim, union ccb *ccb);
54 void isci_poll(struct cam_sim *sim);
56 #define ccb_sim_ptr sim_priv.entries[0].ptr
59 * @brief This user callback will inform the user that the controller has
60 * had a serious unexpected error. The user should not the error,
61 * disable interrupts, and wait for current ongoing processing to
62 * complete. Subsequently, the user should reset the controller.
64 * @param[in] controller This parameter specifies the controller that had
69 void scif_cb_controller_error(SCI_CONTROLLER_HANDLE_T controller,
70 SCI_CONTROLLER_ERROR error)
73 isci_log_message(0, "ISCI", "scif_cb_controller_error: 0x%x\n",
78 * @brief This user callback will inform the user that the controller has
79 * finished the start process.
81 * @param[in] controller This parameter specifies the controller that was
83 * @param[in] completion_status This parameter specifies the results of
84 * the start operation. SCI_SUCCESS indicates successful
89 void scif_cb_controller_start_complete(SCI_CONTROLLER_HANDLE_T controller,
90 SCI_STATUS completion_status)
93 struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
94 sci_object_get_association(controller);
96 isci_controller->is_started = TRUE;
98 /* Set bits for all domains. We will clear them one-by-one once
99 * the domains complete discovery, or return error when calling
100 * scif_domain_discover. Once all bits are clear, we will register
101 * the controller with CAM.
103 isci_controller->initial_discovery_mask = (1 << SCI_MAX_DOMAINS) - 1;
105 for(index = 0; index < SCI_MAX_DOMAINS; index++) {
107 SCI_DOMAIN_HANDLE_T domain =
108 isci_controller->domain[index].sci_object;
110 status = scif_domain_discover(
112 scif_domain_get_suggested_discover_timeout(domain),
116 if (status != SCI_SUCCESS)
118 isci_controller_domain_discovery_complete(
119 isci_controller, &isci_controller->domain[index]);
125 * @brief This user callback will inform the user that the controller has
126 * finished the stop process. Note, after user calls
127 * scif_controller_stop(), before user receives this controller stop
128 * complete callback, user should not expect any callback from
129 * framework, such like scif_cb_domain_change_notification().
131 * @param[in] controller This parameter specifies the controller that was
133 * @param[in] completion_status This parameter specifies the results of
134 * the stop operation. SCI_SUCCESS indicates successful
139 void scif_cb_controller_stop_complete(SCI_CONTROLLER_HANDLE_T controller,
140 SCI_STATUS completion_status)
142 struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
143 sci_object_get_association(controller);
145 isci_controller->is_started = FALSE;
149 isci_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
151 SCI_PHYSICAL_ADDRESS *phys_addr = arg;
153 *phys_addr = seg[0].ds_addr;
157 * @brief This method will be invoked to allocate memory dynamically.
159 * @param[in] controller This parameter represents the controller
160 * object for which to allocate memory.
161 * @param[out] mde This parameter represents the memory descriptor to
162 * be filled in by the user that will reference the newly
167 void scif_cb_controller_allocate_memory(SCI_CONTROLLER_HANDLE_T controller,
168 SCI_PHYSICAL_MEMORY_DESCRIPTOR_T *mde)
170 struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
171 sci_object_get_association(controller);
174 * Note this routine is only used for buffers needed to translate
175 * SCSI UNMAP commands to ATA DSM commands for SATA disks.
177 * We first try to pull a buffer from the controller's pool, and only
178 * call contigmalloc if one isn't there.
180 if (!sci_pool_empty(isci_controller->unmap_buffer_pool)) {
181 sci_pool_get(isci_controller->unmap_buffer_pool,
182 mde->virtual_address);
184 mde->virtual_address = contigmalloc(PAGE_SIZE,
185 M_ISCI, M_NOWAIT, 0, BUS_SPACE_MAXADDR,
186 mde->constant_memory_alignment, 0);
188 if (mde->virtual_address != NULL)
189 bus_dmamap_load(isci_controller->buffer_dma_tag,
190 NULL, mde->virtual_address, PAGE_SIZE,
191 isci_single_map, &mde->physical_address,
196 * @brief This method will be invoked to allocate memory dynamically.
198 * @param[in] controller This parameter represents the controller
199 * object for which to allocate memory.
200 * @param[out] mde This parameter represents the memory descriptor to
201 * be filled in by the user that will reference the newly
206 void scif_cb_controller_free_memory(SCI_CONTROLLER_HANDLE_T controller,
207 SCI_PHYSICAL_MEMORY_DESCRIPTOR_T * mde)
209 struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
210 sci_object_get_association(controller);
213 * Put the buffer back into the controller's buffer pool, rather
214 * than invoking configfree. This helps reduce chance we won't
215 * have buffers available when system is under memory pressure.
217 sci_pool_put(isci_controller->unmap_buffer_pool,
218 mde->virtual_address);
221 void isci_controller_construct(struct ISCI_CONTROLLER *controller,
222 struct isci_softc *isci)
224 SCI_CONTROLLER_HANDLE_T scif_controller_handle;
226 scif_library_allocate_controller(isci->sci_library_handle,
227 &scif_controller_handle);
229 scif_controller_construct(isci->sci_library_handle,
230 scif_controller_handle, NULL);
232 controller->isci = isci;
233 controller->scif_controller_handle = scif_controller_handle;
235 /* This allows us to later use
236 * sci_object_get_association(scif_controller_handle)
237 * inside of a callback routine to get our struct ISCI_CONTROLLER object
239 sci_object_set_association(scif_controller_handle, (void *)controller);
241 controller->is_started = FALSE;
242 controller->is_frozen = FALSE;
243 controller->release_queued_ccbs = FALSE;
244 controller->sim = NULL;
245 controller->initial_discovery_mask = 0;
247 sci_fast_list_init(&controller->pending_device_reset_list);
249 mtx_init(&controller->lock, "isci", NULL, MTX_DEF);
251 uint32_t domain_index;
253 for(domain_index = 0; domain_index < SCI_MAX_DOMAINS; domain_index++) {
254 isci_domain_construct( &controller->domain[domain_index],
255 domain_index, controller);
258 controller->timer_memory = malloc(
259 sizeof(struct ISCI_TIMER) * SCI_MAX_TIMERS, M_ISCI,
262 sci_pool_initialize(controller->timer_pool);
264 struct ISCI_TIMER *timer = (struct ISCI_TIMER *)
265 controller->timer_memory;
267 for ( int i = 0; i < SCI_MAX_TIMERS; i++ ) {
268 sci_pool_put(controller->timer_pool, timer++);
271 sci_pool_initialize(controller->unmap_buffer_pool);
274 SCI_STATUS isci_controller_initialize(struct ISCI_CONTROLLER *controller)
276 SCIC_USER_PARAMETERS_T scic_user_parameters;
277 SCI_CONTROLLER_HANDLE_T scic_controller_handle;
278 unsigned long tunable;
281 scic_controller_handle =
282 scif_controller_get_scic_handle(controller->scif_controller_handle);
284 if (controller->isci->oem_parameters_found == TRUE)
286 scic_oem_parameters_set(
287 scic_controller_handle,
288 &controller->oem_parameters,
289 (uint8_t)(controller->oem_parameters_version));
292 scic_user_parameters_get(scic_controller_handle, &scic_user_parameters);
294 if (TUNABLE_ULONG_FETCH("hw.isci.no_outbound_task_timeout", &tunable))
295 scic_user_parameters.sds1.no_outbound_task_timeout =
298 if (TUNABLE_ULONG_FETCH("hw.isci.ssp_max_occupancy_timeout", &tunable))
299 scic_user_parameters.sds1.ssp_max_occupancy_timeout =
302 if (TUNABLE_ULONG_FETCH("hw.isci.stp_max_occupancy_timeout", &tunable))
303 scic_user_parameters.sds1.stp_max_occupancy_timeout =
306 if (TUNABLE_ULONG_FETCH("hw.isci.ssp_inactivity_timeout", &tunable))
307 scic_user_parameters.sds1.ssp_inactivity_timeout =
310 if (TUNABLE_ULONG_FETCH("hw.isci.stp_inactivity_timeout", &tunable))
311 scic_user_parameters.sds1.stp_inactivity_timeout =
314 if (TUNABLE_ULONG_FETCH("hw.isci.max_speed_generation", &tunable))
315 for (i = 0; i < SCI_MAX_PHYS; i++)
316 scic_user_parameters.sds1.phys[i].max_speed_generation =
319 scic_user_parameters_set(scic_controller_handle, &scic_user_parameters);
321 /* Scheduler bug in SCU requires SCIL to reserve some task contexts as a
322 * a workaround - one per domain.
324 controller->queue_depth = SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS;
326 if (TUNABLE_INT_FETCH("hw.isci.controller_queue_depth",
327 &controller->queue_depth)) {
328 controller->queue_depth = max(1, min(controller->queue_depth,
329 SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS));
332 /* Reserve one request so that we can ensure we have one available TC
333 * to do internal device resets.
335 controller->sim_queue_depth = controller->queue_depth - 1;
337 /* Although we save one TC to do internal device resets, it is possible
338 * we could end up using several TCs for simultaneous device resets
339 * while at the same time having CAM fill our controller queue. To
340 * simulate this condition, and how our driver handles it, we can set
341 * this io_shortage parameter, which will tell CAM that we have a
342 * large queue depth than we really do.
344 uint32_t io_shortage = 0;
345 TUNABLE_INT_FETCH("hw.isci.io_shortage", &io_shortage);
346 controller->sim_queue_depth += io_shortage;
348 /* Attach to CAM using xpt_bus_register now, then immediately freeze
349 * the simq. It will get released later when initial domain discovery
352 controller->has_been_scanned = FALSE;
353 mtx_lock(&controller->lock);
354 isci_controller_attach_to_cam(controller);
355 xpt_freeze_simq(controller->sim, 1);
356 mtx_unlock(&controller->lock);
358 return (scif_controller_initialize(controller->scif_controller_handle));
361 int isci_controller_allocate_memory(struct ISCI_CONTROLLER *controller)
364 device_t device = controller->isci->device;
365 uint32_t max_segment_size = isci_io_request_get_max_io_size();
367 struct ISCI_MEMORY *uncached_controller_memory =
368 &controller->uncached_controller_memory;
369 struct ISCI_MEMORY *cached_controller_memory =
370 &controller->cached_controller_memory;
371 struct ISCI_MEMORY *request_memory =
372 &controller->request_memory;
373 POINTER_UINT virtual_address;
374 bus_addr_t physical_address;
376 controller->mdl = sci_controller_get_memory_descriptor_list_handle(
377 controller->scif_controller_handle);
379 uncached_controller_memory->size = sci_mdl_decorator_get_memory_size(
380 controller->mdl, SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS);
382 error = isci_allocate_dma_buffer(device, uncached_controller_memory);
387 sci_mdl_decorator_assign_memory( controller->mdl,
388 SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS,
389 uncached_controller_memory->virtual_address,
390 uncached_controller_memory->physical_address);
392 cached_controller_memory->size = sci_mdl_decorator_get_memory_size(
394 SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
397 error = isci_allocate_dma_buffer(device, cached_controller_memory);
402 sci_mdl_decorator_assign_memory(controller->mdl,
403 SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS,
404 cached_controller_memory->virtual_address,
405 cached_controller_memory->physical_address);
407 request_memory->size =
408 controller->queue_depth * isci_io_request_get_object_size();
410 error = isci_allocate_dma_buffer(device, request_memory);
415 /* For STP PIO testing, we want to ensure we can force multiple SGLs
416 * since this has been a problem area in SCIL. This tunable parameter
417 * will allow us to force DMA segments to a smaller size, ensuring
418 * that even if a physically contiguous buffer is attached to this
419 * I/O, the DMA subsystem will pass us multiple segments in our DMA
422 TUNABLE_INT_FETCH("hw.isci.max_segment_size", &max_segment_size);
424 /* Create DMA tag for our I/O requests. Then we can create DMA maps based off
425 * of this tag and store them in each of our ISCI_IO_REQUEST objects. This
426 * will enable better performance than creating the DMA maps everytime we get
429 status = bus_dma_tag_create(bus_get_dma_tag(device), 0x1, 0x0,
430 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
431 isci_io_request_get_max_io_size(),
432 SCI_MAX_SCATTER_GATHER_ELEMENTS, max_segment_size, 0, NULL, NULL,
433 &controller->buffer_dma_tag);
435 sci_pool_initialize(controller->request_pool);
437 virtual_address = request_memory->virtual_address;
438 physical_address = request_memory->physical_address;
440 for (int i = 0; i < controller->queue_depth; i++) {
441 struct ISCI_REQUEST *request =
442 (struct ISCI_REQUEST *)virtual_address;
444 isci_request_construct(request,
445 controller->scif_controller_handle,
446 controller->buffer_dma_tag, physical_address);
448 sci_pool_put(controller->request_pool, request);
450 virtual_address += isci_request_get_object_size();
451 physical_address += isci_request_get_object_size();
454 uint32_t remote_device_size = sizeof(struct ISCI_REMOTE_DEVICE) +
455 scif_remote_device_get_object_size();
457 controller->remote_device_memory = (uint8_t *) malloc(
458 remote_device_size * SCI_MAX_REMOTE_DEVICES, M_ISCI,
461 sci_pool_initialize(controller->remote_device_pool);
463 uint8_t *remote_device_memory_ptr = controller->remote_device_memory;
465 for (int i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
466 struct ISCI_REMOTE_DEVICE *remote_device =
467 (struct ISCI_REMOTE_DEVICE *)remote_device_memory_ptr;
469 controller->remote_device[i] = NULL;
470 remote_device->index = i;
471 remote_device->is_resetting = FALSE;
472 remote_device->frozen_lun_mask = 0;
473 sci_fast_list_element_init(remote_device,
474 &remote_device->pending_device_reset_element);
475 TAILQ_INIT(&remote_device->queued_ccbs);
476 remote_device->release_queued_ccb = FALSE;
477 remote_device->queued_ccb_in_progress = NULL;
480 * For the first SCI_MAX_DOMAINS device objects, do not put
481 * them in the pool, rather assign them to each domain. This
482 * ensures that any device attached directly to port "i" will
483 * always get CAM target id "i".
485 if (i < SCI_MAX_DOMAINS)
486 controller->domain[i].da_remote_device = remote_device;
488 sci_pool_put(controller->remote_device_pool,
490 remote_device_memory_ptr += remote_device_size;
496 void isci_controller_start(void *controller_handle)
498 struct ISCI_CONTROLLER *controller =
499 (struct ISCI_CONTROLLER *)controller_handle;
500 SCI_CONTROLLER_HANDLE_T scif_controller_handle =
501 controller->scif_controller_handle;
503 scif_controller_start(scif_controller_handle,
504 scif_controller_get_suggested_start_timeout(scif_controller_handle));
506 scic_controller_enable_interrupts(
507 scif_controller_get_scic_handle(controller->scif_controller_handle));
510 void isci_controller_domain_discovery_complete(
511 struct ISCI_CONTROLLER *isci_controller, struct ISCI_DOMAIN *isci_domain)
513 if (!isci_controller->has_been_scanned)
515 /* Controller has not been scanned yet. We'll clear
516 * the discovery bit for this domain, then check if all bits
517 * are now clear. That would indicate that all domains are
518 * done with discovery and we can then proceed with initial
522 isci_controller->initial_discovery_mask &=
523 ~(1 << isci_domain->index);
525 if (isci_controller->initial_discovery_mask == 0) {
526 struct isci_softc *driver = isci_controller->isci;
527 uint8_t next_index = isci_controller->index + 1;
529 isci_controller->has_been_scanned = TRUE;
531 /* Unfreeze simq to allow initial scan to proceed. */
532 xpt_release_simq(isci_controller->sim, TRUE);
534 #if __FreeBSD_version < 800000
535 /* When driver is loaded after boot, we need to
536 * explicitly rescan here for versions <8.0, because
537 * CAM only automatically scans new buses at boot
540 union ccb *ccb = xpt_alloc_ccb_nowait();
542 xpt_create_path(&ccb->ccb_h.path, NULL,
543 cam_sim_path(isci_controller->sim),
544 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
549 if (next_index < driver->controller_count) {
550 /* There are more controllers that need to
551 * start. So start the next one.
553 isci_controller_start(
554 &driver->controllers[next_index]);
558 /* All controllers have been started and completed discovery.
559 * Disestablish the config hook while will signal to the
560 * kernel during boot that it is safe to try to find and
561 * mount the root partition.
563 config_intrhook_disestablish(
564 &driver->config_hook);
570 int isci_controller_attach_to_cam(struct ISCI_CONTROLLER *controller)
572 struct isci_softc *isci = controller->isci;
573 device_t parent = device_get_parent(isci->device);
574 int unit = device_get_unit(isci->device);
575 struct cam_devq *isci_devq = cam_simq_alloc(controller->sim_queue_depth);
577 if(isci_devq == NULL) {
578 isci_log_message(0, "ISCI", "isci_devq is NULL \n");
582 controller->sim = cam_sim_alloc(isci_action, isci_poll, "isci",
583 controller, unit, &controller->lock, controller->sim_queue_depth,
584 controller->sim_queue_depth, isci_devq);
586 if(controller->sim == NULL) {
587 isci_log_message(0, "ISCI", "cam_sim_alloc... fails\n");
588 cam_simq_free(isci_devq);
592 if(xpt_bus_register(controller->sim, parent, controller->index)
594 isci_log_message(0, "ISCI", "xpt_bus_register...fails \n");
595 cam_sim_free(controller->sim, TRUE);
596 mtx_unlock(&controller->lock);
600 if(xpt_create_path(&controller->path, NULL,
601 cam_sim_path(controller->sim), CAM_TARGET_WILDCARD,
602 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
603 isci_log_message(0, "ISCI", "xpt_create_path....fails\n");
604 xpt_bus_deregister(cam_sim_path(controller->sim));
605 cam_sim_free(controller->sim, TRUE);
606 mtx_unlock(&controller->lock);
613 void isci_poll(struct cam_sim *sim)
615 struct ISCI_CONTROLLER *controller =
616 (struct ISCI_CONTROLLER *)cam_sim_softc(sim);
618 isci_interrupt_poll_handler(controller);
621 void isci_action(struct cam_sim *sim, union ccb *ccb)
623 struct ISCI_CONTROLLER *controller =
624 (struct ISCI_CONTROLLER *)cam_sim_softc(sim);
626 switch ( ccb->ccb_h.func_code ) {
629 struct ccb_pathinq *cpi = &ccb->cpi;
630 int bus = cam_sim_bus(sim);
631 ccb->ccb_h.ccb_sim_ptr = sim;
632 cpi->version_num = 1;
633 cpi->hba_inquiry = PI_TAG_ABLE;
634 cpi->target_sprt = 0;
635 cpi->hba_misc = PIM_NOBUSRESET | PIM_SEQSCAN |
637 cpi->hba_eng_cnt = 0;
638 cpi->max_target = SCI_MAX_REMOTE_DEVICES - 1;
639 cpi->max_lun = ISCI_MAX_LUN;
640 #if __FreeBSD_version >= 800102
641 cpi->maxio = isci_io_request_get_max_io_size();
643 cpi->unit_number = cam_sim_unit(sim);
645 cpi->initiator_id = SCI_MAX_REMOTE_DEVICES;
646 cpi->base_transfer_speed = 300000;
647 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
648 strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN);
649 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
650 cpi->transport = XPORT_SAS;
651 cpi->transport_version = 0;
652 cpi->protocol = PROTO_SCSI;
653 cpi->protocol_version = SCSI_REV_SPC2;
654 cpi->ccb_h.status = CAM_REQ_CMP;
658 case XPT_GET_TRAN_SETTINGS:
660 struct ccb_trans_settings *general_settings = &ccb->cts;
661 struct ccb_trans_settings_sas *sas_settings =
662 &general_settings->xport_specific.sas;
663 struct ccb_trans_settings_scsi *scsi_settings =
664 &general_settings->proto_specific.scsi;
665 struct ISCI_REMOTE_DEVICE *remote_device;
667 remote_device = controller->remote_device[ccb->ccb_h.target_id];
669 if (remote_device == NULL) {
670 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
671 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
672 ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
677 general_settings->protocol = PROTO_SCSI;
678 general_settings->transport = XPORT_SAS;
679 general_settings->protocol_version = SCSI_REV_SPC2;
680 general_settings->transport_version = 0;
681 scsi_settings->valid = CTS_SCSI_VALID_TQ;
682 scsi_settings->flags = CTS_SCSI_FLAGS_TAG_ENB;
683 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
684 ccb->ccb_h.status |= CAM_REQ_CMP;
686 sas_settings->bitrate =
687 isci_remote_device_get_bitrate(remote_device);
689 if (sas_settings->bitrate != 0)
690 sas_settings->valid = CTS_SAS_VALID_SPEED;
696 isci_io_request_execute_scsi_io(ccb, controller);
698 #if __FreeBSD_version >= 900026
700 isci_io_request_execute_smp_io(ccb, controller);
703 case XPT_SET_TRAN_SETTINGS:
704 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
705 ccb->ccb_h.status |= CAM_REQ_CMP;
708 case XPT_CALC_GEOMETRY:
709 cam_calc_geometry(&ccb->ccg, /*extended*/1);
714 struct ISCI_REMOTE_DEVICE *remote_device =
715 controller->remote_device[ccb->ccb_h.target_id];
717 if (remote_device != NULL)
718 isci_remote_device_reset(remote_device, ccb);
720 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
721 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
722 ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
728 ccb->ccb_h.status = CAM_REQ_CMP;
732 isci_log_message(0, "ISCI", "Unhandled func_code 0x%x\n",
733 ccb->ccb_h.func_code);
734 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
735 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
736 ccb->ccb_h.status |= CAM_REQ_INVALID;
743 * Unfortunately, SCIL doesn't cleanly handle retry conditions.
744 * CAM_REQUEUE_REQ works only when no one is using the pass(4) interface. So
745 * when SCIL denotes an I/O needs to be retried (typically because of mixing
746 * tagged/non-tagged ATA commands, or running out of NCQ slots), we queue
747 * these I/O internally. Once SCIL completes an I/O to this device, or we get
748 * a ready notification, we will retry the first I/O on the queue.
749 * Unfortunately, SCIL also doesn't cleanly handle starting the new I/O within
750 * the context of the completion handler, so we need to retry these I/O after
751 * the completion handler is done executing.
754 isci_controller_release_queued_ccbs(struct ISCI_CONTROLLER *controller)
756 struct ISCI_REMOTE_DEVICE *dev;
757 struct ccb_hdr *ccb_h;
760 KASSERT(mtx_owned(&controller->lock), ("controller lock not owned"));
762 controller->release_queued_ccbs = FALSE;
764 dev_idx < SCI_MAX_REMOTE_DEVICES;
767 dev = controller->remote_device[dev_idx];
769 dev->release_queued_ccb == TRUE &&
770 dev->queued_ccb_in_progress == NULL) {
771 dev->release_queued_ccb = FALSE;
772 ccb_h = TAILQ_FIRST(&dev->queued_ccbs);
777 isci_log_message(1, "ISCI", "release %p %x\n", ccb_h,
778 ((union ccb *)ccb_h)->csio.cdb_io.cdb_bytes[0]);
780 dev->queued_ccb_in_progress = (union ccb *)ccb_h;
781 isci_io_request_execute_scsi_io(
782 (union ccb *)ccb_h, controller);