]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/isci/isci_controller.c
Merge bmake-20201117
[FreeBSD/FreeBSD.git] / sys / dev / isci / isci_controller.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * BSD LICENSE
5  *
6  * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *   * Redistributions of source code must retain the above copyright
14  *     notice, this list of conditions and the following disclaimer.
15  *   * Redistributions in binary form must reproduce the above copyright
16  *     notice, this list of conditions and the following disclaimer in
17  *     the documentation and/or other materials provided with the
18  *     distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <dev/isci/isci.h>
37
38 #include <sys/conf.h>
39 #include <sys/malloc.h>
40
41 #include <cam/cam_periph.h>
42 #include <cam/cam_xpt_periph.h>
43
44 #include <dev/isci/scil/sci_memory_descriptor_list.h>
45 #include <dev/isci/scil/sci_memory_descriptor_list_decorator.h>
46
47 #include <dev/isci/scil/scif_controller.h>
48 #include <dev/isci/scil/scif_library.h>
49 #include <dev/isci/scil/scif_io_request.h>
50 #include <dev/isci/scil/scif_task_request.h>
51 #include <dev/isci/scil/scif_remote_device.h>
52 #include <dev/isci/scil/scif_domain.h>
53 #include <dev/isci/scil/scif_user_callback.h>
54 #include <dev/isci/scil/scic_sgpio.h>
55
56 #include <dev/led/led.h>
57
58 void isci_action(struct cam_sim *sim, union ccb *ccb);
59 void isci_poll(struct cam_sim *sim);
60
61 #define ccb_sim_ptr sim_priv.entries[0].ptr
62
63 /**
64  * @brief This user callback will inform the user that the controller has
65  *        had a serious unexpected error.  The user should not the error,
66  *        disable interrupts, and wait for current ongoing processing to
67  *        complete.  Subsequently, the user should reset the controller.
68  *
69  * @param[in]  controller This parameter specifies the controller that had
70  *                        an error.
71  *
72  * @return none
73  */
74 void scif_cb_controller_error(SCI_CONTROLLER_HANDLE_T controller,
75     SCI_CONTROLLER_ERROR error)
76 {
77
78         isci_log_message(0, "ISCI", "scif_cb_controller_error: 0x%x\n",
79             error);
80 }
81
82 /**
83  * @brief This user callback will inform the user that the controller has
84  *        finished the start process.
85  *
86  * @param[in]  controller This parameter specifies the controller that was
87  *             started.
88  * @param[in]  completion_status This parameter specifies the results of
89  *             the start operation.  SCI_SUCCESS indicates successful
90  *             completion.
91  *
92  * @return none
93  */
94 void scif_cb_controller_start_complete(SCI_CONTROLLER_HANDLE_T controller,
95     SCI_STATUS completion_status)
96 {
97         uint32_t index;
98         struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
99             sci_object_get_association(controller);
100
101         isci_controller->is_started = TRUE;
102
103         /* Set bits for all domains.  We will clear them one-by-one once
104          *  the domains complete discovery, or return error when calling
105          *  scif_domain_discover.  Once all bits are clear, we will register
106          *  the controller with CAM.
107          */
108         isci_controller->initial_discovery_mask = (1 << SCI_MAX_DOMAINS) - 1;
109
110         for(index = 0; index < SCI_MAX_DOMAINS; index++) {
111                 SCI_STATUS status;
112                 SCI_DOMAIN_HANDLE_T domain =
113                     isci_controller->domain[index].sci_object;
114
115                 status = scif_domain_discover(
116                         domain,
117                         scif_domain_get_suggested_discover_timeout(domain),
118                         DEVICE_TIMEOUT
119                 );
120
121                 if (status != SCI_SUCCESS)
122                 {
123                         isci_controller_domain_discovery_complete(
124                             isci_controller, &isci_controller->domain[index]);
125                 }
126         }
127 }
128
129 /**
130  * @brief This user callback will inform the user that the controller has
131  *        finished the stop process. Note, after user calls
132  *        scif_controller_stop(), before user receives this controller stop
133  *        complete callback, user should not expect any callback from
134  *        framework, such like scif_cb_domain_change_notification().
135  *
136  * @param[in]  controller This parameter specifies the controller that was
137  *             stopped.
138  * @param[in]  completion_status This parameter specifies the results of
139  *             the stop operation.  SCI_SUCCESS indicates successful
140  *             completion.
141  *
142  * @return none
143  */
144 void scif_cb_controller_stop_complete(SCI_CONTROLLER_HANDLE_T controller,
145     SCI_STATUS completion_status)
146 {
147         struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
148             sci_object_get_association(controller);
149
150         isci_controller->is_started = FALSE;
151 }
152
153 static void
154 isci_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
155 {
156         SCI_PHYSICAL_ADDRESS *phys_addr = arg;
157
158         *phys_addr = seg[0].ds_addr;
159 }
160
161 /**
162  * @brief This method will be invoked to allocate memory dynamically.
163  *
164  * @param[in]  controller This parameter represents the controller
165  *             object for which to allocate memory.
166  * @param[out] mde This parameter represents the memory descriptor to
167  *             be filled in by the user that will reference the newly
168  *             allocated memory.
169  *
170  * @return none
171  */
172 void scif_cb_controller_allocate_memory(SCI_CONTROLLER_HANDLE_T controller,
173     SCI_PHYSICAL_MEMORY_DESCRIPTOR_T *mde)
174 {
175         struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
176             sci_object_get_association(controller);
177
178         /*
179          * Note this routine is only used for buffers needed to translate
180          * SCSI UNMAP commands to ATA DSM commands for SATA disks.
181          *
182          * We first try to pull a buffer from the controller's pool, and only
183          * call contigmalloc if one isn't there.
184          */
185         if (!sci_pool_empty(isci_controller->unmap_buffer_pool)) {
186                 sci_pool_get(isci_controller->unmap_buffer_pool,
187                     mde->virtual_address);
188         } else
189                 mde->virtual_address = contigmalloc(PAGE_SIZE,
190                     M_ISCI, M_NOWAIT, 0, BUS_SPACE_MAXADDR,
191                     mde->constant_memory_alignment, 0);
192
193         if (mde->virtual_address != NULL)
194                 bus_dmamap_load(isci_controller->buffer_dma_tag,
195                     NULL, mde->virtual_address, PAGE_SIZE,
196                     isci_single_map, &mde->physical_address,
197                     BUS_DMA_NOWAIT);
198 }
199
200 /**
201  * @brief This method will be invoked to allocate memory dynamically.
202  *
203  * @param[in]  controller This parameter represents the controller
204  *             object for which to allocate memory.
205  * @param[out] mde This parameter represents the memory descriptor to
206  *             be filled in by the user that will reference the newly
207  *             allocated memory.
208  *
209  * @return none
210  */
211 void scif_cb_controller_free_memory(SCI_CONTROLLER_HANDLE_T controller,
212     SCI_PHYSICAL_MEMORY_DESCRIPTOR_T * mde)
213 {
214         struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
215             sci_object_get_association(controller);
216
217         /*
218          * Put the buffer back into the controller's buffer pool, rather
219          * than invoking configfree.  This helps reduce chance we won't
220          * have buffers available when system is under memory pressure.
221          */ 
222         sci_pool_put(isci_controller->unmap_buffer_pool,
223             mde->virtual_address);
224 }
225
226 void isci_controller_construct(struct ISCI_CONTROLLER *controller,
227     struct isci_softc *isci)
228 {
229         SCI_CONTROLLER_HANDLE_T scif_controller_handle;
230
231         scif_library_allocate_controller(isci->sci_library_handle,
232             &scif_controller_handle);
233
234         scif_controller_construct(isci->sci_library_handle,
235             scif_controller_handle, NULL);
236
237         controller->isci = isci;
238         controller->scif_controller_handle = scif_controller_handle;
239
240         /* This allows us to later use
241          *  sci_object_get_association(scif_controller_handle)
242          * inside of a callback routine to get our struct ISCI_CONTROLLER object
243          */
244         sci_object_set_association(scif_controller_handle, (void *)controller);
245
246         controller->is_started = FALSE;
247         controller->is_frozen = FALSE;
248         controller->release_queued_ccbs = FALSE;
249         controller->sim = NULL;
250         controller->initial_discovery_mask = 0;
251
252         sci_fast_list_init(&controller->pending_device_reset_list);
253
254         mtx_init(&controller->lock, "isci", NULL, MTX_DEF);
255
256         uint32_t domain_index;
257
258         for(domain_index = 0; domain_index < SCI_MAX_DOMAINS; domain_index++) {
259                 isci_domain_construct( &controller->domain[domain_index],
260                     domain_index, controller);
261         }
262
263         controller->timer_memory = malloc(
264             sizeof(struct ISCI_TIMER) * SCI_MAX_TIMERS, M_ISCI,
265             M_NOWAIT | M_ZERO);
266
267         sci_pool_initialize(controller->timer_pool);
268
269         struct ISCI_TIMER *timer = (struct ISCI_TIMER *)
270             controller->timer_memory;
271
272         for ( int i = 0; i < SCI_MAX_TIMERS; i++ ) {
273                 sci_pool_put(controller->timer_pool, timer++);
274         }
275
276         sci_pool_initialize(controller->unmap_buffer_pool);
277 }
278
279 static void isci_led_fault_func(void *priv, int onoff)
280 {
281         struct ISCI_PHY *phy = priv;
282
283         /* map onoff to the fault LED */
284         phy->led_fault = onoff;
285         scic_sgpio_update_led_state(phy->handle, 1 << phy->index, 
286                 phy->led_fault, phy->led_locate, 0);
287 }
288
289 static void isci_led_locate_func(void *priv, int onoff)
290 {
291         struct ISCI_PHY *phy = priv;
292
293         /* map onoff to the locate LED */
294         phy->led_locate = onoff;
295         scic_sgpio_update_led_state(phy->handle, 1 << phy->index, 
296                 phy->led_fault, phy->led_locate, 0);
297 }
298
299 SCI_STATUS isci_controller_initialize(struct ISCI_CONTROLLER *controller)
300 {
301         SCIC_USER_PARAMETERS_T scic_user_parameters;
302         SCI_CONTROLLER_HANDLE_T scic_controller_handle;
303         char led_name[64];
304         unsigned long tunable;
305         uint32_t io_shortage;
306         uint32_t fail_on_timeout;
307         int i;
308
309         scic_controller_handle =
310             scif_controller_get_scic_handle(controller->scif_controller_handle);
311
312         if (controller->isci->oem_parameters_found == TRUE)
313         {
314                 scic_oem_parameters_set(
315                     scic_controller_handle,
316                     &controller->oem_parameters,
317                     (uint8_t)(controller->oem_parameters_version));
318         }
319
320         scic_user_parameters_get(scic_controller_handle, &scic_user_parameters);
321
322         if (TUNABLE_ULONG_FETCH("hw.isci.no_outbound_task_timeout", &tunable))
323                 scic_user_parameters.sds1.no_outbound_task_timeout =
324                     (uint8_t)tunable;
325
326         if (TUNABLE_ULONG_FETCH("hw.isci.ssp_max_occupancy_timeout", &tunable))
327                 scic_user_parameters.sds1.ssp_max_occupancy_timeout =
328                     (uint16_t)tunable;
329
330         if (TUNABLE_ULONG_FETCH("hw.isci.stp_max_occupancy_timeout", &tunable))
331                 scic_user_parameters.sds1.stp_max_occupancy_timeout =
332                     (uint16_t)tunable;
333
334         if (TUNABLE_ULONG_FETCH("hw.isci.ssp_inactivity_timeout", &tunable))
335                 scic_user_parameters.sds1.ssp_inactivity_timeout =
336                     (uint16_t)tunable;
337
338         if (TUNABLE_ULONG_FETCH("hw.isci.stp_inactivity_timeout", &tunable))
339                 scic_user_parameters.sds1.stp_inactivity_timeout =
340                     (uint16_t)tunable;
341
342         if (TUNABLE_ULONG_FETCH("hw.isci.max_speed_generation", &tunable))
343                 for (i = 0; i < SCI_MAX_PHYS; i++)
344                         scic_user_parameters.sds1.phys[i].max_speed_generation =
345                             (uint8_t)tunable;
346
347         scic_user_parameters_set(scic_controller_handle, &scic_user_parameters);
348
349         /* Scheduler bug in SCU requires SCIL to reserve some task contexts as a
350          *  a workaround - one per domain.
351          */
352         controller->queue_depth = SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS;
353
354         if (TUNABLE_INT_FETCH("hw.isci.controller_queue_depth",
355             &controller->queue_depth)) {
356                 controller->queue_depth = max(1, min(controller->queue_depth,
357                     SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS));
358         }
359
360         /* Reserve one request so that we can ensure we have one available TC
361          *  to do internal device resets.
362          */
363         controller->sim_queue_depth = controller->queue_depth - 1;
364
365         /* Although we save one TC to do internal device resets, it is possible
366          *  we could end up using several TCs for simultaneous device resets
367          *  while at the same time having CAM fill our controller queue.  To
368          *  simulate this condition, and how our driver handles it, we can set
369          *  this io_shortage parameter, which will tell CAM that we have a
370          *  large queue depth than we really do.
371          */
372         io_shortage = 0;
373         TUNABLE_INT_FETCH("hw.isci.io_shortage", &io_shortage);
374         controller->sim_queue_depth += io_shortage;
375
376         fail_on_timeout = 1;
377         TUNABLE_INT_FETCH("hw.isci.fail_on_task_timeout", &fail_on_timeout);
378         controller->fail_on_task_timeout = fail_on_timeout;
379
380         /* Attach to CAM using xpt_bus_register now, then immediately freeze
381          *  the simq.  It will get released later when initial domain discovery
382          *  is complete.
383          */
384         controller->has_been_scanned = FALSE;
385         mtx_lock(&controller->lock);
386         isci_controller_attach_to_cam(controller);
387         xpt_freeze_simq(controller->sim, 1);
388         mtx_unlock(&controller->lock);
389
390         for (i = 0; i < SCI_MAX_PHYS; i++) {
391                 controller->phys[i].handle = scic_controller_handle;
392                 controller->phys[i].index = i;
393
394                 /* fault */
395                 controller->phys[i].led_fault = 0;
396                 sprintf(led_name, "isci.bus%d.port%d.fault", controller->index, i);
397                 controller->phys[i].cdev_fault = led_create(isci_led_fault_func,
398                     &controller->phys[i], led_name);
399                         
400                 /* locate */
401                 controller->phys[i].led_locate = 0;
402                 sprintf(led_name, "isci.bus%d.port%d.locate", controller->index, i);
403                 controller->phys[i].cdev_locate = led_create(isci_led_locate_func,
404                     &controller->phys[i], led_name);
405         }
406
407         return (scif_controller_initialize(controller->scif_controller_handle));
408 }
409
410 int isci_controller_allocate_memory(struct ISCI_CONTROLLER *controller)
411 {
412         int error;
413         device_t device =  controller->isci->device;
414         uint32_t max_segment_size = isci_io_request_get_max_io_size();
415         uint32_t status = 0;
416         struct ISCI_MEMORY *uncached_controller_memory =
417             &controller->uncached_controller_memory;
418         struct ISCI_MEMORY *cached_controller_memory =
419             &controller->cached_controller_memory;
420         struct ISCI_MEMORY *request_memory =
421             &controller->request_memory;
422         POINTER_UINT virtual_address;
423         bus_addr_t physical_address;
424
425         controller->mdl = sci_controller_get_memory_descriptor_list_handle(
426             controller->scif_controller_handle);
427
428         uncached_controller_memory->size = sci_mdl_decorator_get_memory_size(
429             controller->mdl, SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS);
430
431         error = isci_allocate_dma_buffer(device, controller,
432             uncached_controller_memory);
433
434         if (error != 0)
435             return (error);
436
437         sci_mdl_decorator_assign_memory( controller->mdl,
438             SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS,
439             uncached_controller_memory->virtual_address,
440             uncached_controller_memory->physical_address);
441
442         cached_controller_memory->size = sci_mdl_decorator_get_memory_size(
443             controller->mdl,
444             SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
445         );
446
447         error = isci_allocate_dma_buffer(device, controller,
448             cached_controller_memory);
449
450         if (error != 0)
451             return (error);
452
453         sci_mdl_decorator_assign_memory(controller->mdl,
454             SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS,
455             cached_controller_memory->virtual_address,
456             cached_controller_memory->physical_address);
457
458         request_memory->size =
459             controller->queue_depth * isci_io_request_get_object_size();
460
461         error = isci_allocate_dma_buffer(device, controller, request_memory);
462
463         if (error != 0)
464             return (error);
465
466         /* For STP PIO testing, we want to ensure we can force multiple SGLs
467          *  since this has been a problem area in SCIL.  This tunable parameter
468          *  will allow us to force DMA segments to a smaller size, ensuring
469          *  that even if a physically contiguous buffer is attached to this
470          *  I/O, the DMA subsystem will pass us multiple segments in our DMA
471          *  load callback.
472          */
473         TUNABLE_INT_FETCH("hw.isci.max_segment_size", &max_segment_size);
474
475         /* Create DMA tag for our I/O requests.  Then we can create DMA maps based off
476          *  of this tag and store them in each of our ISCI_IO_REQUEST objects.  This
477          *  will enable better performance than creating the DMA maps every time we get
478          *  an I/O.
479          */
480         status = bus_dma_tag_create(bus_get_dma_tag(device), 0x1,
481             ISCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
482             NULL, NULL, isci_io_request_get_max_io_size(),
483             SCI_MAX_SCATTER_GATHER_ELEMENTS, max_segment_size, 0,
484             busdma_lock_mutex, &controller->lock,
485             &controller->buffer_dma_tag);
486
487         sci_pool_initialize(controller->request_pool);
488
489         virtual_address = request_memory->virtual_address;
490         physical_address = request_memory->physical_address;
491
492         for (int i = 0; i < controller->queue_depth; i++) {
493                 struct ISCI_REQUEST *request =
494                     (struct ISCI_REQUEST *)virtual_address;
495
496                 isci_request_construct(request,
497                     controller->scif_controller_handle,
498                     controller->buffer_dma_tag, physical_address);
499
500                 sci_pool_put(controller->request_pool, request);
501
502                 virtual_address += isci_request_get_object_size();
503                 physical_address += isci_request_get_object_size();
504         }
505
506         uint32_t remote_device_size = sizeof(struct ISCI_REMOTE_DEVICE) +
507             scif_remote_device_get_object_size();
508
509         controller->remote_device_memory = (uint8_t *) malloc(
510             remote_device_size * SCI_MAX_REMOTE_DEVICES, M_ISCI,
511             M_NOWAIT | M_ZERO);
512
513         sci_pool_initialize(controller->remote_device_pool);
514
515         uint8_t *remote_device_memory_ptr = controller->remote_device_memory;
516
517         for (int i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
518                 struct ISCI_REMOTE_DEVICE *remote_device =
519                     (struct ISCI_REMOTE_DEVICE *)remote_device_memory_ptr;
520
521                 controller->remote_device[i] = NULL;
522                 remote_device->index = i;
523                 remote_device->is_resetting = FALSE;
524                 remote_device->frozen_lun_mask = 0;
525                 sci_fast_list_element_init(remote_device,
526                     &remote_device->pending_device_reset_element);
527                 TAILQ_INIT(&remote_device->queued_ccbs);
528                 remote_device->release_queued_ccb = FALSE;
529                 remote_device->queued_ccb_in_progress = NULL;
530
531                 /*
532                  * For the first SCI_MAX_DOMAINS device objects, do not put
533                  *  them in the pool, rather assign them to each domain.  This
534                  *  ensures that any device attached directly to port "i" will
535                  *  always get CAM target id "i".
536                  */
537                 if (i < SCI_MAX_DOMAINS)
538                         controller->domain[i].da_remote_device = remote_device;
539                 else
540                         sci_pool_put(controller->remote_device_pool,
541                             remote_device);
542                 remote_device_memory_ptr += remote_device_size;
543         }
544
545         return (0);
546 }
547
548 void isci_controller_start(void *controller_handle)
549 {
550         struct ISCI_CONTROLLER *controller =
551             (struct ISCI_CONTROLLER *)controller_handle;
552         SCI_CONTROLLER_HANDLE_T scif_controller_handle =
553             controller->scif_controller_handle;
554
555         scif_controller_start(scif_controller_handle,
556             scif_controller_get_suggested_start_timeout(scif_controller_handle));
557
558         scic_controller_enable_interrupts(
559             scif_controller_get_scic_handle(controller->scif_controller_handle));
560 }
561
562 void isci_controller_domain_discovery_complete(
563     struct ISCI_CONTROLLER *isci_controller, struct ISCI_DOMAIN *isci_domain)
564 {
565         if (!isci_controller->has_been_scanned)
566         {
567                 /* Controller has not been scanned yet.  We'll clear
568                  *  the discovery bit for this domain, then check if all bits
569                  *  are now clear.  That would indicate that all domains are
570                  *  done with discovery and we can then proceed with initial
571                  *  scan.
572                  */
573
574                 isci_controller->initial_discovery_mask &=
575                     ~(1 << isci_domain->index);
576
577                 if (isci_controller->initial_discovery_mask == 0) {
578                         struct isci_softc *driver = isci_controller->isci;
579                         uint8_t next_index = isci_controller->index + 1;
580
581                         isci_controller->has_been_scanned = TRUE;
582
583                         /* Unfreeze simq to allow initial scan to proceed. */
584                         xpt_release_simq(isci_controller->sim, TRUE);
585
586                         if (next_index < driver->controller_count) {
587                                 /*  There are more controllers that need to
588                                  *   start.  So start the next one.
589                                  */
590                                 isci_controller_start(
591                                     &driver->controllers[next_index]);
592                         }
593                         else
594                         {
595                                 /* All controllers have been started and completed discovery.
596                                  *  Disestablish the config hook while will signal to the
597                                  *  kernel during boot that it is safe to try to find and
598                                  *  mount the root partition.
599                                  */
600                                 config_intrhook_disestablish(
601                                     &driver->config_hook);
602                         }
603                 }
604         }
605 }
606
607 int isci_controller_attach_to_cam(struct ISCI_CONTROLLER *controller)
608 {
609         struct isci_softc *isci = controller->isci;
610         device_t parent = device_get_parent(isci->device);
611         int unit = device_get_unit(isci->device);
612         struct cam_devq *isci_devq = cam_simq_alloc(controller->sim_queue_depth);
613
614         if(isci_devq == NULL) {
615                 isci_log_message(0, "ISCI", "isci_devq is NULL \n");
616                 return (-1);
617         }
618
619         controller->sim = cam_sim_alloc(isci_action, isci_poll, "isci",
620             controller, unit, &controller->lock, controller->sim_queue_depth,
621             controller->sim_queue_depth, isci_devq);
622
623         if(controller->sim == NULL) {
624                 isci_log_message(0, "ISCI", "cam_sim_alloc... fails\n");
625                 cam_simq_free(isci_devq);
626                 return (-1);
627         }
628
629         if(xpt_bus_register(controller->sim, parent, controller->index)
630             != CAM_SUCCESS) {
631                 isci_log_message(0, "ISCI", "xpt_bus_register...fails \n");
632                 cam_sim_free(controller->sim, TRUE);
633                 mtx_unlock(&controller->lock);
634                 return (-1);
635         }
636
637         if(xpt_create_path(&controller->path, NULL,
638             cam_sim_path(controller->sim), CAM_TARGET_WILDCARD,
639             CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
640                 isci_log_message(0, "ISCI", "xpt_create_path....fails\n");
641                 xpt_bus_deregister(cam_sim_path(controller->sim));
642                 cam_sim_free(controller->sim, TRUE);
643                 mtx_unlock(&controller->lock);
644                 return (-1);
645         }
646
647         return (0);
648 }
649
650 void isci_poll(struct cam_sim *sim)
651 {
652         struct ISCI_CONTROLLER *controller =
653             (struct ISCI_CONTROLLER *)cam_sim_softc(sim);
654
655         isci_interrupt_poll_handler(controller);
656 }
657
658 void isci_action(struct cam_sim *sim, union ccb *ccb)
659 {
660         struct ISCI_CONTROLLER *controller =
661             (struct ISCI_CONTROLLER *)cam_sim_softc(sim);
662
663         switch ( ccb->ccb_h.func_code ) {
664         case XPT_PATH_INQ:
665                 {
666                         struct ccb_pathinq *cpi = &ccb->cpi;
667                         int bus = cam_sim_bus(sim);
668                         ccb->ccb_h.ccb_sim_ptr = sim;
669                         cpi->version_num = 1;
670                         cpi->hba_inquiry = PI_TAG_ABLE;
671                         cpi->target_sprt = 0;
672                         cpi->hba_misc = PIM_NOBUSRESET | PIM_SEQSCAN |
673                             PIM_UNMAPPED;
674                         cpi->hba_eng_cnt = 0;
675                         cpi->max_target = SCI_MAX_REMOTE_DEVICES - 1;
676                         cpi->max_lun = ISCI_MAX_LUN;
677                         cpi->maxio = isci_io_request_get_max_io_size();
678                         cpi->unit_number = cam_sim_unit(sim);
679                         cpi->bus_id = bus;
680                         cpi->initiator_id = SCI_MAX_REMOTE_DEVICES;
681                         cpi->base_transfer_speed = 300000;
682                         strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
683                         strlcpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN);
684                         strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
685                         cpi->transport = XPORT_SAS;
686                         cpi->transport_version = 0;
687                         cpi->protocol = PROTO_SCSI;
688                         cpi->protocol_version = SCSI_REV_SPC2;
689                         cpi->ccb_h.status = CAM_REQ_CMP;
690                         xpt_done(ccb);
691                 }
692                 break;
693         case XPT_GET_TRAN_SETTINGS:
694                 {
695                         struct ccb_trans_settings *general_settings = &ccb->cts;
696                         struct ccb_trans_settings_sas *sas_settings =
697                             &general_settings->xport_specific.sas;
698                         struct ccb_trans_settings_scsi *scsi_settings =
699                             &general_settings->proto_specific.scsi;
700                         struct ISCI_REMOTE_DEVICE *remote_device;
701
702                         remote_device = controller->remote_device[ccb->ccb_h.target_id];
703
704                         if (remote_device == NULL) {
705                                 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
706                                 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
707                                 ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
708                                 xpt_done(ccb);
709                                 break;
710                         }
711
712                         general_settings->protocol = PROTO_SCSI;
713                         general_settings->transport = XPORT_SAS;
714                         general_settings->protocol_version = SCSI_REV_SPC2;
715                         general_settings->transport_version = 0;
716                         scsi_settings->valid = CTS_SCSI_VALID_TQ;
717                         scsi_settings->flags = CTS_SCSI_FLAGS_TAG_ENB;
718                         ccb->ccb_h.status &= ~CAM_STATUS_MASK;
719                         ccb->ccb_h.status |= CAM_REQ_CMP;
720
721                         sas_settings->bitrate =
722                             isci_remote_device_get_bitrate(remote_device);
723
724                         if (sas_settings->bitrate != 0)
725                                 sas_settings->valid = CTS_SAS_VALID_SPEED;
726
727                         xpt_done(ccb);
728                 }
729                 break;
730         case XPT_SCSI_IO:
731                 if (ccb->ccb_h.flags & CAM_CDB_PHYS) {
732                         ccb->ccb_h.status = CAM_REQ_INVALID;
733                         xpt_done(ccb);
734                         break;
735                 }
736                 isci_io_request_execute_scsi_io(ccb, controller);
737                 break;
738         case XPT_SMP_IO:
739                 isci_io_request_execute_smp_io(ccb, controller);
740                 break;
741         case XPT_SET_TRAN_SETTINGS:
742                 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
743                 ccb->ccb_h.status |= CAM_REQ_CMP;
744                 xpt_done(ccb);
745                 break;
746         case XPT_CALC_GEOMETRY:
747                 cam_calc_geometry(&ccb->ccg, /*extended*/1);
748                 xpt_done(ccb);
749                 break;
750         case XPT_RESET_DEV:
751                 {
752                         struct ISCI_REMOTE_DEVICE *remote_device =
753                             controller->remote_device[ccb->ccb_h.target_id];
754
755                         if (remote_device != NULL)
756                                 isci_remote_device_reset(remote_device, ccb);
757                         else {
758                                 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
759                                 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
760                                 ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
761                                 xpt_done(ccb);
762                         }
763                 }
764                 break;
765         case XPT_RESET_BUS:
766                 ccb->ccb_h.status = CAM_REQ_CMP;
767                 xpt_done(ccb);
768                 break;
769         default:
770                 isci_log_message(0, "ISCI", "Unhandled func_code 0x%x\n",
771                     ccb->ccb_h.func_code);
772                 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
773                 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
774                 ccb->ccb_h.status |= CAM_REQ_INVALID;
775                 xpt_done(ccb);
776                 break;
777         }
778 }
779
780 /*
781  * Unfortunately, SCIL doesn't cleanly handle retry conditions.
782  *  CAM_REQUEUE_REQ works only when no one is using the pass(4) interface.  So
783  *  when SCIL denotes an I/O needs to be retried (typically because of mixing
784  *  tagged/non-tagged ATA commands, or running out of NCQ slots), we queue
785  *  these I/O internally.  Once SCIL completes an I/O to this device, or we get
786  *  a ready notification, we will retry the first I/O on the queue.
787  *  Unfortunately, SCIL also doesn't cleanly handle starting the new I/O within
788  *  the context of the completion handler, so we need to retry these I/O after
789  *  the completion handler is done executing.
790  */
791 void
792 isci_controller_release_queued_ccbs(struct ISCI_CONTROLLER *controller)
793 {
794         struct ISCI_REMOTE_DEVICE *dev;
795         struct ccb_hdr *ccb_h;
796         uint8_t *ptr;
797         int dev_idx;
798
799         KASSERT(mtx_owned(&controller->lock), ("controller lock not owned"));
800
801         controller->release_queued_ccbs = FALSE;
802         for (dev_idx = 0;
803              dev_idx < SCI_MAX_REMOTE_DEVICES;
804              dev_idx++) {
805
806                 dev = controller->remote_device[dev_idx];
807                 if (dev != NULL &&
808                     dev->release_queued_ccb == TRUE &&
809                     dev->queued_ccb_in_progress == NULL) {
810                         dev->release_queued_ccb = FALSE;
811                         ccb_h = TAILQ_FIRST(&dev->queued_ccbs);
812
813                         if (ccb_h == NULL)
814                                 continue;
815
816                         ptr = scsiio_cdb_ptr(&((union ccb *)ccb_h)->csio);
817                         isci_log_message(1, "ISCI", "release %p %x\n", ccb_h, *ptr);
818
819                         dev->queued_ccb_in_progress = (union ccb *)ccb_h;
820                         isci_io_request_execute_scsi_io(
821                             (union ccb *)ccb_h, controller);
822                 }
823         }
824 }