]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/cam/cam_xpt.c
CAM: Replace random sbuf_printf() with cheaper cat/putc.
[FreeBSD/FreeBSD.git] / sys / cam / cam_xpt.c
1 /*-
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * SPDX-License-Identifier: BSD-2-Clause
5  *
6  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
7  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions, and the following disclaimer,
15  *    without modification, immediately at the beginning of the file.
16  * 2. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31
32 #include "opt_printf.h"
33
34 #include <sys/param.h>
35 #include <sys/bio.h>
36 #include <sys/bus.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/malloc.h>
40 #include <sys/kernel.h>
41 #include <sys/time.h>
42 #include <sys/conf.h>
43 #include <sys/fcntl.h>
44 #include <sys/proc.h>
45 #include <sys/sbuf.h>
46 #include <sys/smp.h>
47 #include <sys/taskqueue.h>
48
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/sysctl.h>
52 #include <sys/kthread.h>
53
54 #include <cam/cam.h>
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_iosched.h>
57 #include <cam/cam_periph.h>
58 #include <cam/cam_queue.h>
59 #include <cam/cam_sim.h>
60 #include <cam/cam_xpt.h>
61 #include <cam/cam_xpt_sim.h>
62 #include <cam/cam_xpt_periph.h>
63 #include <cam/cam_xpt_internal.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_compat.h>
66
67 #include <cam/scsi/scsi_all.h>
68 #include <cam/scsi/scsi_message.h>
69 #include <cam/scsi/scsi_pass.h>
70
71 #include <machine/stdarg.h>     /* for xpt_print below */
72
73 /* Wild guess based on not wanting to grow the stack too much */
74 #define XPT_PRINT_MAXLEN        512
75 #ifdef PRINTF_BUFR_SIZE
76 #define XPT_PRINT_LEN   PRINTF_BUFR_SIZE
77 #else
78 #define XPT_PRINT_LEN   128
79 #endif
80 _Static_assert(XPT_PRINT_LEN <= XPT_PRINT_MAXLEN, "XPT_PRINT_LEN is too large");
81
82 /*
83  * This is the maximum number of high powered commands (e.g. start unit)
84  * that can be outstanding at a particular time.
85  */
86 #ifndef CAM_MAX_HIGHPOWER
87 #define CAM_MAX_HIGHPOWER  4
88 #endif
89
90 /* Datastructures internal to the xpt layer */
91 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
92 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices");
93 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs");
94 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths");
95
96 struct xpt_softc {
97         uint32_t                xpt_generation;
98
99         /* number of high powered commands that can go through right now */
100         struct mtx              xpt_highpower_lock;
101         STAILQ_HEAD(highpowerlist, cam_ed)      highpowerq;
102         int                     num_highpower;
103
104         /* queue for handling async rescan requests. */
105         TAILQ_HEAD(, ccb_hdr) ccb_scanq;
106         int buses_to_config;
107         int buses_config_done;
108
109         /*
110          * Registered buses
111          *
112          * N.B., "busses" is an archaic spelling of "buses".  In new code
113          * "buses" is preferred.
114          */
115         TAILQ_HEAD(,cam_eb)     xpt_busses;
116         u_int                   bus_generation;
117
118         int                     boot_delay;
119         struct callout          boot_callout;
120         struct task             boot_task;
121         struct root_hold_token  xpt_rootmount;
122
123         struct mtx              xpt_topo_lock;
124         struct taskqueue        *xpt_taskq;
125 };
126
127 typedef enum {
128         DM_RET_COPY             = 0x01,
129         DM_RET_FLAG_MASK        = 0x0f,
130         DM_RET_NONE             = 0x00,
131         DM_RET_STOP             = 0x10,
132         DM_RET_DESCEND          = 0x20,
133         DM_RET_ERROR            = 0x30,
134         DM_RET_ACTION_MASK      = 0xf0
135 } dev_match_ret;
136
137 typedef enum {
138         XPT_DEPTH_BUS,
139         XPT_DEPTH_TARGET,
140         XPT_DEPTH_DEVICE,
141         XPT_DEPTH_PERIPH
142 } xpt_traverse_depth;
143
144 struct xpt_traverse_config {
145         xpt_traverse_depth      depth;
146         void                    *tr_func;
147         void                    *tr_arg;
148 };
149
150 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
151 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
152 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
153 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
154 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
155
156 /* Transport layer configuration information */
157 static struct xpt_softc xsoftc;
158
159 MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF);
160
161 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
162            &xsoftc.boot_delay, 0, "Bus registration wait time");
163 SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD,
164             &xsoftc.xpt_generation, 0, "CAM peripheral generation count");
165
166 struct cam_doneq {
167         struct mtx_padalign     cam_doneq_mtx;
168         STAILQ_HEAD(, ccb_hdr)  cam_doneq;
169         int                     cam_doneq_sleep;
170 };
171
172 static struct cam_doneq cam_doneqs[MAXCPU];
173 static u_int __read_mostly cam_num_doneqs;
174 static struct proc *cam_proc;
175 static struct cam_doneq cam_async;
176
177 SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
178            &cam_num_doneqs, 0, "Number of completion queues/threads");
179
180 struct cam_periph *xpt_periph;
181
182 static periph_init_t xpt_periph_init;
183
184 static struct periph_driver xpt_driver =
185 {
186         xpt_periph_init, "xpt",
187         TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
188         CAM_PERIPH_DRV_EARLY
189 };
190
191 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
192
193 static d_open_t xptopen;
194 static d_close_t xptclose;
195 static d_ioctl_t xptioctl;
196 static d_ioctl_t xptdoioctl;
197
198 static struct cdevsw xpt_cdevsw = {
199         .d_version =    D_VERSION,
200         .d_flags =      0,
201         .d_open =       xptopen,
202         .d_close =      xptclose,
203         .d_ioctl =      xptioctl,
204         .d_name =       "xpt",
205 };
206
207 /* Storage for debugging datastructures */
208 struct cam_path *cam_dpath;
209 uint32_t __read_mostly cam_dflags = CAM_DEBUG_FLAGS;
210 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN,
211         &cam_dflags, 0, "Enabled debug flags");
212 uint32_t cam_debug_delay = CAM_DEBUG_DELAY;
213 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN,
214         &cam_debug_delay, 0, "Delay in us after each debug message");
215
216 /* Our boot-time initialization hook */
217 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
218
219 static moduledata_t cam_moduledata = {
220         "cam",
221         cam_module_event_handler,
222         NULL
223 };
224
225 static int      xpt_init(void *);
226
227 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
228 MODULE_VERSION(cam, 1);
229
230 static void             xpt_async_bcast(struct async_list *async_head,
231                                         uint32_t async_code,
232                                         struct cam_path *path,
233                                         void *async_arg);
234 static path_id_t xptnextfreepathid(void);
235 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
236 static union ccb *xpt_get_ccb(struct cam_periph *periph);
237 static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph);
238 static void      xpt_run_allocq(struct cam_periph *periph, int sleep);
239 static void      xpt_run_allocq_task(void *context, int pending);
240 static void      xpt_run_devq(struct cam_devq *devq);
241 static callout_func_t xpt_release_devq_timeout;
242 static void      xpt_acquire_bus(struct cam_eb *bus);
243 static void      xpt_release_bus(struct cam_eb *bus);
244 static uint32_t  xpt_freeze_devq_device(struct cam_ed *dev, u_int count);
245 static int       xpt_release_devq_device(struct cam_ed *dev, u_int count,
246                     int run_queue);
247 static struct cam_et*
248                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
249 static void      xpt_acquire_target(struct cam_et *target);
250 static void      xpt_release_target(struct cam_et *target);
251 static struct cam_eb*
252                  xpt_find_bus(path_id_t path_id);
253 static struct cam_et*
254                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
255 static struct cam_ed*
256                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
257 static void      xpt_config(void *arg);
258 static void      xpt_hold_boot_locked(void);
259 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
260                                  uint32_t new_priority);
261 static xpt_devicefunc_t xptpassannouncefunc;
262 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
263 static void      xptpoll(struct cam_sim *sim);
264 static void      camisr_runqueue(void);
265 static void      xpt_done_process(struct ccb_hdr *ccb_h);
266 static void      xpt_done_td(void *);
267 static void      xpt_async_td(void *);
268 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
269                                     u_int num_patterns, struct cam_eb *bus);
270 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
271                                        u_int num_patterns,
272                                        struct cam_ed *device);
273 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
274                                        u_int num_patterns,
275                                        struct cam_periph *periph);
276 static xpt_busfunc_t    xptedtbusfunc;
277 static xpt_targetfunc_t xptedttargetfunc;
278 static xpt_devicefunc_t xptedtdevicefunc;
279 static xpt_periphfunc_t xptedtperiphfunc;
280 static xpt_pdrvfunc_t   xptplistpdrvfunc;
281 static xpt_periphfunc_t xptplistperiphfunc;
282 static int              xptedtmatch(struct ccb_dev_match *cdm);
283 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
284 static int              xptbustraverse(struct cam_eb *start_bus,
285                                        xpt_busfunc_t *tr_func, void *arg);
286 static int              xpttargettraverse(struct cam_eb *bus,
287                                           struct cam_et *start_target,
288                                           xpt_targetfunc_t *tr_func, void *arg);
289 static int              xptdevicetraverse(struct cam_et *target,
290                                           struct cam_ed *start_device,
291                                           xpt_devicefunc_t *tr_func, void *arg);
292 static int              xptperiphtraverse(struct cam_ed *device,
293                                           struct cam_periph *start_periph,
294                                           xpt_periphfunc_t *tr_func, void *arg);
295 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
296                                         xpt_pdrvfunc_t *tr_func, void *arg);
297 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
298                                             struct cam_periph *start_periph,
299                                             xpt_periphfunc_t *tr_func,
300                                             void *arg);
301 static xpt_busfunc_t    xptdefbusfunc;
302 static xpt_targetfunc_t xptdeftargetfunc;
303 static xpt_devicefunc_t xptdefdevicefunc;
304 static xpt_periphfunc_t xptdefperiphfunc;
305 static void             xpt_finishconfig_task(void *context, int pending);
306 static void             xpt_dev_async_default(uint32_t async_code,
307                                               struct cam_eb *bus,
308                                               struct cam_et *target,
309                                               struct cam_ed *device,
310                                               void *async_arg);
311 static struct cam_ed *  xpt_alloc_device_default(struct cam_eb *bus,
312                                                  struct cam_et *target,
313                                                  lun_id_t lun_id);
314 static xpt_devicefunc_t xptsetasyncfunc;
315 static xpt_busfunc_t    xptsetasyncbusfunc;
316 static cam_status       xptregister(struct cam_periph *periph,
317                                     void *arg);
318
319 static __inline int
320 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
321 {
322         int     retval;
323
324         mtx_assert(&devq->send_mtx, MA_OWNED);
325         if ((dev->ccbq.queue.entries > 0) &&
326             (dev->ccbq.dev_openings > 0) &&
327             (dev->ccbq.queue.qfrozen_cnt == 0)) {
328                 /*
329                  * The priority of a device waiting for controller
330                  * resources is that of the highest priority CCB
331                  * enqueued.
332                  */
333                 retval =
334                     xpt_schedule_dev(&devq->send_queue,
335                                      &dev->devq_entry,
336                                      CAMQ_GET_PRIO(&dev->ccbq.queue));
337         } else {
338                 retval = 0;
339         }
340         return (retval);
341 }
342
343 static __inline int
344 device_is_queued(struct cam_ed *device)
345 {
346         return (device->devq_entry.index != CAM_UNQUEUED_INDEX);
347 }
348
349 static void
350 xpt_periph_init(void)
351 {
352         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
353 }
354
355 static int
356 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
357 {
358
359         /*
360          * Only allow read-write access.
361          */
362         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
363                 return(EPERM);
364
365         /*
366          * We don't allow nonblocking access.
367          */
368         if ((flags & O_NONBLOCK) != 0) {
369                 printf("%s: can't do nonblocking access\n", devtoname(dev));
370                 return(ENODEV);
371         }
372
373         return(0);
374 }
375
376 static int
377 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
378 {
379
380         return(0);
381 }
382
383 /*
384  * Don't automatically grab the xpt softc lock here even though this is going
385  * through the xpt device.  The xpt device is really just a back door for
386  * accessing other devices and SIMs, so the right thing to do is to grab
387  * the appropriate SIM lock once the bus/SIM is located.
388  */
389 static int
390 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
391 {
392         int error;
393
394         if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
395                 error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl);
396         }
397         return (error);
398 }
399
400 static int
401 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
402 {
403         int error;
404
405         error = 0;
406
407         switch(cmd) {
408         /*
409          * For the transport layer CAMIOCOMMAND ioctl, we really only want
410          * to accept CCB types that don't quite make sense to send through a
411          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
412          * in the CAM spec.
413          */
414         case CAMIOCOMMAND: {
415                 union ccb *ccb;
416                 union ccb *inccb;
417                 struct cam_eb *bus;
418
419                 inccb = (union ccb *)addr;
420 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
421                 if (inccb->ccb_h.func_code == XPT_SCSI_IO)
422                         inccb->csio.bio = NULL;
423 #endif
424
425                 if (inccb->ccb_h.flags & CAM_UNLOCKED)
426                         return (EINVAL);
427
428                 bus = xpt_find_bus(inccb->ccb_h.path_id);
429                 if (bus == NULL)
430                         return (EINVAL);
431
432                 switch (inccb->ccb_h.func_code) {
433                 case XPT_SCAN_BUS:
434                 case XPT_RESET_BUS:
435                         if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
436                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
437                                 xpt_release_bus(bus);
438                                 return (EINVAL);
439                         }
440                         break;
441                 case XPT_SCAN_TGT:
442                         if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
443                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
444                                 xpt_release_bus(bus);
445                                 return (EINVAL);
446                         }
447                         break;
448                 default:
449                         break;
450                 }
451
452                 switch(inccb->ccb_h.func_code) {
453                 case XPT_SCAN_BUS:
454                 case XPT_RESET_BUS:
455                 case XPT_PATH_INQ:
456                 case XPT_ENG_INQ:
457                 case XPT_SCAN_LUN:
458                 case XPT_SCAN_TGT:
459
460                         ccb = xpt_alloc_ccb();
461
462                         /*
463                          * Create a path using the bus, target, and lun the
464                          * user passed in.
465                          */
466                         if (xpt_create_path(&ccb->ccb_h.path, NULL,
467                                             inccb->ccb_h.path_id,
468                                             inccb->ccb_h.target_id,
469                                             inccb->ccb_h.target_lun) !=
470                                             CAM_REQ_CMP){
471                                 error = EINVAL;
472                                 xpt_free_ccb(ccb);
473                                 break;
474                         }
475                         /* Ensure all of our fields are correct */
476                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
477                                       inccb->ccb_h.pinfo.priority);
478                         xpt_merge_ccb(ccb, inccb);
479                         xpt_path_lock(ccb->ccb_h.path);
480                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
481                         xpt_path_unlock(ccb->ccb_h.path);
482                         bcopy(ccb, inccb, sizeof(union ccb));
483                         xpt_free_path(ccb->ccb_h.path);
484                         xpt_free_ccb(ccb);
485                         break;
486
487                 case XPT_DEBUG: {
488                         union ccb ccb;
489
490                         /*
491                          * This is an immediate CCB, so it's okay to
492                          * allocate it on the stack.
493                          */
494                         memset(&ccb, 0, sizeof(ccb));
495
496                         /*
497                          * Create a path using the bus, target, and lun the
498                          * user passed in.
499                          */
500                         if (xpt_create_path(&ccb.ccb_h.path, NULL,
501                                             inccb->ccb_h.path_id,
502                                             inccb->ccb_h.target_id,
503                                             inccb->ccb_h.target_lun) !=
504                                             CAM_REQ_CMP){
505                                 error = EINVAL;
506                                 break;
507                         }
508                         /* Ensure all of our fields are correct */
509                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
510                                       inccb->ccb_h.pinfo.priority);
511                         xpt_merge_ccb(&ccb, inccb);
512                         xpt_action(&ccb);
513                         bcopy(&ccb, inccb, sizeof(union ccb));
514                         xpt_free_path(ccb.ccb_h.path);
515                         break;
516                 }
517                 case XPT_DEV_MATCH: {
518                         struct cam_periph_map_info mapinfo;
519                         struct cam_path *old_path;
520
521                         /*
522                          * We can't deal with physical addresses for this
523                          * type of transaction.
524                          */
525                         if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
526                             CAM_DATA_VADDR) {
527                                 error = EINVAL;
528                                 break;
529                         }
530
531                         /*
532                          * Save this in case the caller had it set to
533                          * something in particular.
534                          */
535                         old_path = inccb->ccb_h.path;
536
537                         /*
538                          * We really don't need a path for the matching
539                          * code.  The path is needed because of the
540                          * debugging statements in xpt_action().  They
541                          * assume that the CCB has a valid path.
542                          */
543                         inccb->ccb_h.path = xpt_periph->path;
544
545                         bzero(&mapinfo, sizeof(mapinfo));
546
547                         /*
548                          * Map the pattern and match buffers into kernel
549                          * virtual address space.
550                          */
551                         error = cam_periph_mapmem(inccb, &mapinfo, maxphys);
552
553                         if (error) {
554                                 inccb->ccb_h.path = old_path;
555                                 break;
556                         }
557
558                         /*
559                          * This is an immediate CCB, we can send it on directly.
560                          */
561                         xpt_action(inccb);
562
563                         /*
564                          * Map the buffers back into user space.
565                          */
566                         cam_periph_unmapmem(inccb, &mapinfo);
567
568                         inccb->ccb_h.path = old_path;
569
570                         error = 0;
571                         break;
572                 }
573                 default:
574                         error = ENOTSUP;
575                         break;
576                 }
577                 xpt_release_bus(bus);
578                 break;
579         }
580         /*
581          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
582          * with the periphal driver name and unit name filled in.  The other
583          * fields don't really matter as input.  The passthrough driver name
584          * ("pass"), and unit number are passed back in the ccb.  The current
585          * device generation number, and the index into the device peripheral
586          * driver list, and the status are also passed back.  Note that
587          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
588          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
589          * (or rather should be) impossible for the device peripheral driver
590          * list to change since we look at the whole thing in one pass, and
591          * we do it with lock protection.
592          *
593          */
594         case CAMGETPASSTHRU: {
595                 union ccb *ccb;
596                 struct cam_periph *periph;
597                 struct periph_driver **p_drv;
598                 char   *name;
599                 u_int unit;
600                 bool base_periph_found;
601
602                 ccb = (union ccb *)addr;
603                 unit = ccb->cgdl.unit_number;
604                 name = ccb->cgdl.periph_name;
605                 base_periph_found = false;
606 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
607                 if (ccb->ccb_h.func_code == XPT_SCSI_IO)
608                         ccb->csio.bio = NULL;
609 #endif
610
611                 /*
612                  * Sanity check -- make sure we don't get a null peripheral
613                  * driver name.
614                  */
615                 if (*ccb->cgdl.periph_name == '\0') {
616                         error = EINVAL;
617                         break;
618                 }
619
620                 /* Keep the list from changing while we traverse it */
621                 xpt_lock_buses();
622
623                 /* first find our driver in the list of drivers */
624                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
625                         if (strcmp((*p_drv)->driver_name, name) == 0)
626                                 break;
627
628                 if (*p_drv == NULL) {
629                         xpt_unlock_buses();
630                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
631                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
632                         *ccb->cgdl.periph_name = '\0';
633                         ccb->cgdl.unit_number = 0;
634                         error = ENOENT;
635                         break;
636                 }
637
638                 /*
639                  * Run through every peripheral instance of this driver
640                  * and check to see whether it matches the unit passed
641                  * in by the user.  If it does, get out of the loops and
642                  * find the passthrough driver associated with that
643                  * peripheral driver.
644                  */
645                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
646                      periph = TAILQ_NEXT(periph, unit_links)) {
647                         if (periph->unit_number == unit)
648                                 break;
649                 }
650                 /*
651                  * If we found the peripheral driver that the user passed
652                  * in, go through all of the peripheral drivers for that
653                  * particular device and look for a passthrough driver.
654                  */
655                 if (periph != NULL) {
656                         struct cam_ed *device;
657                         int i;
658
659                         base_periph_found = true;
660                         device = periph->path->device;
661                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
662                              periph != NULL;
663                              periph = SLIST_NEXT(periph, periph_links), i++) {
664                                 /*
665                                  * Check to see whether we have a
666                                  * passthrough device or not.
667                                  */
668                                 if (strcmp(periph->periph_name, "pass") == 0) {
669                                         /*
670                                          * Fill in the getdevlist fields.
671                                          */
672                                         strlcpy(ccb->cgdl.periph_name,
673                                                periph->periph_name,
674                                                sizeof(ccb->cgdl.periph_name));
675                                         ccb->cgdl.unit_number =
676                                                 periph->unit_number;
677                                         if (SLIST_NEXT(periph, periph_links))
678                                                 ccb->cgdl.status =
679                                                         CAM_GDEVLIST_MORE_DEVS;
680                                         else
681                                                 ccb->cgdl.status =
682                                                        CAM_GDEVLIST_LAST_DEVICE;
683                                         ccb->cgdl.generation =
684                                                 device->generation;
685                                         ccb->cgdl.index = i;
686                                         /*
687                                          * Fill in some CCB header fields
688                                          * that the user may want.
689                                          */
690                                         ccb->ccb_h.path_id =
691                                                 periph->path->bus->path_id;
692                                         ccb->ccb_h.target_id =
693                                                 periph->path->target->target_id;
694                                         ccb->ccb_h.target_lun =
695                                                 periph->path->device->lun_id;
696                                         ccb->ccb_h.status = CAM_REQ_CMP;
697                                         break;
698                                 }
699                         }
700                 }
701
702                 /*
703                  * If the periph is null here, one of two things has
704                  * happened.  The first possibility is that we couldn't
705                  * find the unit number of the particular peripheral driver
706                  * that the user is asking about.  e.g. the user asks for
707                  * the passthrough driver for "da11".  We find the list of
708                  * "da" peripherals all right, but there is no unit 11.
709                  * The other possibility is that we went through the list
710                  * of peripheral drivers attached to the device structure,
711                  * but didn't find one with the name "pass".  Either way,
712                  * we return ENOENT, since we couldn't find something.
713                  */
714                 if (periph == NULL) {
715                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
716                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
717                         *ccb->cgdl.periph_name = '\0';
718                         ccb->cgdl.unit_number = 0;
719                         error = ENOENT;
720                         /*
721                          * It is unfortunate that this is even necessary,
722                          * but there are many, many clueless users out there.
723                          * If this is true, the user is looking for the
724                          * passthrough driver, but doesn't have one in his
725                          * kernel.
726                          */
727                         if (base_periph_found) {
728                                 printf("xptioctl: pass driver is not in the "
729                                        "kernel\n");
730                                 printf("xptioctl: put \"device pass\" in "
731                                        "your kernel config file\n");
732                         }
733                 }
734                 xpt_unlock_buses();
735                 break;
736                 }
737         default:
738                 error = ENOTTY;
739                 break;
740         }
741
742         return(error);
743 }
744
745 static int
746 cam_module_event_handler(module_t mod, int what, void *arg)
747 {
748         int error;
749
750         switch (what) {
751         case MOD_LOAD:
752                 if ((error = xpt_init(NULL)) != 0)
753                         return (error);
754                 break;
755         case MOD_UNLOAD:
756                 return EBUSY;
757         default:
758                 return EOPNOTSUPP;
759         }
760
761         return 0;
762 }
763
764 static struct xpt_proto *
765 xpt_proto_find(cam_proto proto)
766 {
767         struct xpt_proto **pp;
768
769         SET_FOREACH(pp, cam_xpt_proto_set) {
770                 if ((*pp)->proto == proto)
771                         return *pp;
772         }
773
774         return NULL;
775 }
776
777 static void
778 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
779 {
780
781         if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
782                 xpt_free_path(done_ccb->ccb_h.path);
783                 xpt_free_ccb(done_ccb);
784         } else {
785                 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
786                 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
787         }
788         xpt_release_boot();
789 }
790
791 /* thread to handle bus rescans */
792 static void
793 xpt_scanner_thread(void *dummy)
794 {
795         union ccb       *ccb;
796         struct mtx      *mtx;
797         struct cam_ed   *device;
798
799         xpt_lock_buses();
800         for (;;) {
801                 if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
802                         msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
803                                "-", 0);
804                 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
805                         TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
806                         xpt_unlock_buses();
807
808                         /*
809                          * We need to lock the device's mutex which we use as
810                          * the path mutex. We can't do it directly because the
811                          * cam_path in the ccb may wind up going away because
812                          * the path lock may be dropped and the path retired in
813                          * the completion callback. We do this directly to keep
814                          * the reference counts in cam_path sane. We also have
815                          * to copy the device pointer because ccb_h.path may
816                          * be freed in the callback.
817                          */
818                         mtx = xpt_path_mtx(ccb->ccb_h.path);
819                         device = ccb->ccb_h.path->device;
820                         xpt_acquire_device(device);
821                         mtx_lock(mtx);
822                         xpt_action(ccb);
823                         mtx_unlock(mtx);
824                         xpt_release_device(device);
825
826                         xpt_lock_buses();
827                 }
828         }
829 }
830
831 void
832 xpt_rescan(union ccb *ccb)
833 {
834         struct ccb_hdr *hdr;
835
836         /* Prepare request */
837         if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
838             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
839                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
840         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
841             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
842                 ccb->ccb_h.func_code = XPT_SCAN_TGT;
843         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
844             ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
845                 ccb->ccb_h.func_code = XPT_SCAN_LUN;
846         else {
847                 xpt_print(ccb->ccb_h.path, "illegal scan path\n");
848                 xpt_free_path(ccb->ccb_h.path);
849                 xpt_free_ccb(ccb);
850                 return;
851         }
852         CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
853             ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code,
854                 xpt_action_name(ccb->ccb_h.func_code)));
855
856         ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
857         ccb->ccb_h.cbfcnp = xpt_rescan_done;
858         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
859         /* Don't make duplicate entries for the same paths. */
860         xpt_lock_buses();
861         if (ccb->ccb_h.ppriv_ptr1 == NULL) {
862                 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
863                         if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
864                                 wakeup(&xsoftc.ccb_scanq);
865                                 xpt_unlock_buses();
866                                 xpt_print(ccb->ccb_h.path, "rescan already queued\n");
867                                 xpt_free_path(ccb->ccb_h.path);
868                                 xpt_free_ccb(ccb);
869                                 return;
870                         }
871                 }
872         }
873         TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
874         xpt_hold_boot_locked();
875         wakeup(&xsoftc.ccb_scanq);
876         xpt_unlock_buses();
877 }
878
879 /* Functions accessed by the peripheral drivers */
880 static int
881 xpt_init(void *dummy)
882 {
883         struct cam_sim *xpt_sim;
884         struct cam_path *path;
885         struct cam_devq *devq;
886         cam_status status;
887         int error, i;
888
889         TAILQ_INIT(&xsoftc.xpt_busses);
890         TAILQ_INIT(&xsoftc.ccb_scanq);
891         STAILQ_INIT(&xsoftc.highpowerq);
892         xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
893
894         mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF);
895         xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK,
896             taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq);
897
898 #ifdef CAM_BOOT_DELAY
899         /*
900          * Override this value at compile time to assist our users
901          * who don't use loader to boot a kernel.
902          */
903         xsoftc.boot_delay = CAM_BOOT_DELAY;
904 #endif
905
906         /*
907          * The xpt layer is, itself, the equivalent of a SIM.
908          * Allow 16 ccbs in the ccb pool for it.  This should
909          * give decent parallelism when we probe buses and
910          * perform other XPT functions.
911          */
912         devq = cam_simq_alloc(16);
913         xpt_sim = cam_sim_alloc(xptaction,
914                                 xptpoll,
915                                 "xpt",
916                                 /*softc*/NULL,
917                                 /*unit*/0,
918                                 /*mtx*/NULL,
919                                 /*max_dev_transactions*/0,
920                                 /*max_tagged_dev_transactions*/0,
921                                 devq);
922         if (xpt_sim == NULL)
923                 return (ENOMEM);
924
925         if ((error = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
926                 printf("xpt_init: xpt_bus_register failed with errno %d,"
927                        " failing attach\n", error);
928                 return (EINVAL);
929         }
930
931         /*
932          * Looking at the XPT from the SIM layer, the XPT is
933          * the equivalent of a peripheral driver.  Allocate
934          * a peripheral driver entry for us.
935          */
936         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
937                                       CAM_TARGET_WILDCARD,
938                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
939                 printf("xpt_init: xpt_create_path failed with status %#x,"
940                        " failing attach\n", status);
941                 return (EINVAL);
942         }
943         xpt_path_lock(path);
944         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
945                          path, NULL, 0, xpt_sim);
946         xpt_path_unlock(path);
947         xpt_free_path(path);
948
949         if (cam_num_doneqs < 1)
950                 cam_num_doneqs = 1 + mp_ncpus / 6;
951         else if (cam_num_doneqs > MAXCPU)
952                 cam_num_doneqs = MAXCPU;
953         for (i = 0; i < cam_num_doneqs; i++) {
954                 mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL,
955                     MTX_DEF);
956                 STAILQ_INIT(&cam_doneqs[i].cam_doneq);
957                 error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i],
958                     &cam_proc, NULL, 0, 0, "cam", "doneq%d", i);
959                 if (error != 0) {
960                         cam_num_doneqs = i;
961                         break;
962                 }
963         }
964         if (cam_num_doneqs < 1) {
965                 printf("xpt_init: Cannot init completion queues "
966                        "- failing attach\n");
967                 return (ENOMEM);
968         }
969
970         mtx_init(&cam_async.cam_doneq_mtx, "CAM async", NULL, MTX_DEF);
971         STAILQ_INIT(&cam_async.cam_doneq);
972         if (kproc_kthread_add(xpt_async_td, &cam_async,
973                 &cam_proc, NULL, 0, 0, "cam", "async") != 0) {
974                 printf("xpt_init: Cannot init async thread "
975                        "- failing attach\n");
976                 return (ENOMEM);
977         }
978
979         /*
980          * Register a callback for when interrupts are enabled.
981          */
982         config_intrhook_oneshot(xpt_config, NULL);
983
984         return (0);
985 }
986
987 static cam_status
988 xptregister(struct cam_periph *periph, void *arg)
989 {
990         struct cam_sim *xpt_sim;
991
992         if (periph == NULL) {
993                 printf("xptregister: periph was NULL!!\n");
994                 return(CAM_REQ_CMP_ERR);
995         }
996
997         xpt_sim = (struct cam_sim *)arg;
998         xpt_sim->softc = periph;
999         xpt_periph = periph;
1000         periph->softc = NULL;
1001
1002         return(CAM_REQ_CMP);
1003 }
1004
1005 int32_t
1006 xpt_add_periph(struct cam_periph *periph)
1007 {
1008         struct cam_ed *device;
1009         int32_t  status;
1010
1011         TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph);
1012         device = periph->path->device;
1013         status = CAM_REQ_CMP;
1014         if (device != NULL) {
1015                 mtx_lock(&device->target->bus->eb_mtx);
1016                 device->generation++;
1017                 SLIST_INSERT_HEAD(&device->periphs, periph, periph_links);
1018                 mtx_unlock(&device->target->bus->eb_mtx);
1019                 atomic_add_32(&xsoftc.xpt_generation, 1);
1020         }
1021
1022         return (status);
1023 }
1024
1025 void
1026 xpt_remove_periph(struct cam_periph *periph)
1027 {
1028         struct cam_ed *device;
1029
1030         device = periph->path->device;
1031         if (device != NULL) {
1032                 mtx_lock(&device->target->bus->eb_mtx);
1033                 device->generation++;
1034                 SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links);
1035                 mtx_unlock(&device->target->bus->eb_mtx);
1036                 atomic_add_32(&xsoftc.xpt_generation, 1);
1037         }
1038 }
1039
1040 void
1041 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1042 {
1043         char buf[128];
1044         struct sbuf sb;
1045
1046         (void)sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL);
1047         sbuf_set_drain(&sb, sbuf_printf_drain, NULL);
1048         xpt_announce_periph_sbuf(periph, &sb, announce_string);
1049         (void)sbuf_finish(&sb);
1050 }
1051
1052 void
1053 xpt_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb,
1054     char *announce_string)
1055 {
1056         struct  cam_path *path = periph->path;
1057         struct  xpt_proto *proto;
1058
1059         cam_periph_assert(periph, MA_OWNED);
1060         periph->flags |= CAM_PERIPH_ANNOUNCED;
1061
1062         sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1063             periph->periph_name, periph->unit_number,
1064             path->bus->sim->sim_name,
1065             path->bus->sim->unit_number,
1066             path->bus->sim->bus_id,
1067             path->bus->path_id,
1068             path->target->target_id,
1069             (uintmax_t)path->device->lun_id);
1070         sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number);
1071         proto = xpt_proto_find(path->device->protocol);
1072         if (proto)
1073                 proto->ops->announce_sbuf(path->device, sb);
1074         else
1075                 sbuf_printf(sb, "Unknown protocol device %d\n",
1076                     path->device->protocol);
1077         if (path->device->serial_num_len > 0) {
1078                 /* Don't wrap the screen  - print only the first 60 chars */
1079                 sbuf_printf(sb, "%s%d: Serial Number %.60s\n",
1080                     periph->periph_name, periph->unit_number,
1081                     path->device->serial_num);
1082         }
1083         /* Announce transport details. */
1084         path->bus->xport->ops->announce_sbuf(periph, sb);
1085         /* Announce command queueing. */
1086         if (path->device->inq_flags & SID_CmdQue
1087          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1088                 sbuf_printf(sb, "%s%d: Command Queueing enabled\n",
1089                     periph->periph_name, periph->unit_number);
1090         }
1091         /* Announce caller's details if they've passed in. */
1092         if (announce_string != NULL)
1093                 sbuf_printf(sb, "%s%d: %s\n", periph->periph_name,
1094                     periph->unit_number, announce_string);
1095 }
1096
1097 void
1098 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string)
1099 {
1100         if (quirks != 0) {
1101                 printf("%s%d: quirks=0x%b\n", periph->periph_name,
1102                     periph->unit_number, quirks, bit_string);
1103         }
1104 }
1105
1106 void
1107 xpt_announce_quirks_sbuf(struct cam_periph *periph, struct sbuf *sb,
1108                          int quirks, char *bit_string)
1109 {
1110         if (quirks != 0) {
1111                 sbuf_printf(sb, "%s%d: quirks=0x%b\n", periph->periph_name,
1112                     periph->unit_number, quirks, bit_string);
1113         }
1114 }
1115
1116 void
1117 xpt_denounce_periph(struct cam_periph *periph)
1118 {
1119         char buf[128];
1120         struct sbuf sb;
1121
1122         (void)sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL);
1123         sbuf_set_drain(&sb, sbuf_printf_drain, NULL);
1124         xpt_denounce_periph_sbuf(periph, &sb);
1125         (void)sbuf_finish(&sb);
1126 }
1127
1128 void
1129 xpt_denounce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb)
1130 {
1131         struct cam_path *path = periph->path;
1132         struct xpt_proto *proto;
1133
1134         cam_periph_assert(periph, MA_OWNED);
1135
1136         sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1137             periph->periph_name, periph->unit_number,
1138             path->bus->sim->sim_name,
1139             path->bus->sim->unit_number,
1140             path->bus->sim->bus_id,
1141             path->bus->path_id,
1142             path->target->target_id,
1143             (uintmax_t)path->device->lun_id);
1144         sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number);
1145         proto = xpt_proto_find(path->device->protocol);
1146         if (proto)
1147                 proto->ops->denounce_sbuf(path->device, sb);
1148         else
1149                 sbuf_printf(sb, "Unknown protocol device %d",
1150                     path->device->protocol);
1151         if (path->device->serial_num_len > 0)
1152                 sbuf_printf(sb, " s/n %.60s", path->device->serial_num);
1153         sbuf_cat(sb, " detached\n");
1154 }
1155
1156 int
1157 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
1158 {
1159         int ret = -1, l, o;
1160         struct ccb_dev_advinfo cdai;
1161         struct scsi_vpd_device_id *did;
1162         struct scsi_vpd_id_descriptor *idd;
1163
1164         xpt_path_assert(path, MA_OWNED);
1165
1166         memset(&cdai, 0, sizeof(cdai));
1167         xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
1168         cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
1169         cdai.flags = CDAI_FLAG_NONE;
1170         cdai.bufsiz = len;
1171         cdai.buf = buf;
1172
1173         if (!strcmp(attr, "GEOM::ident"))
1174                 cdai.buftype = CDAI_TYPE_SERIAL_NUM;
1175         else if (!strcmp(attr, "GEOM::physpath"))
1176                 cdai.buftype = CDAI_TYPE_PHYS_PATH;
1177         else if (strcmp(attr, "GEOM::lunid") == 0 ||
1178                  strcmp(attr, "GEOM::lunname") == 0) {
1179                 cdai.buftype = CDAI_TYPE_SCSI_DEVID;
1180                 cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
1181                 cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT);
1182                 if (cdai.buf == NULL) {
1183                         ret = ENOMEM;
1184                         goto out;
1185                 }
1186         } else
1187                 goto out;
1188
1189         xpt_action((union ccb *)&cdai); /* can only be synchronous */
1190         if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
1191                 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
1192         if (cdai.provsiz == 0)
1193                 goto out;
1194         switch(cdai.buftype) {
1195         case CDAI_TYPE_SCSI_DEVID:
1196                 did = (struct scsi_vpd_device_id *)cdai.buf;
1197                 if (strcmp(attr, "GEOM::lunid") == 0) {
1198                         idd = scsi_get_devid(did, cdai.provsiz,
1199                             scsi_devid_is_lun_naa);
1200                         if (idd == NULL)
1201                                 idd = scsi_get_devid(did, cdai.provsiz,
1202                                     scsi_devid_is_lun_eui64);
1203                         if (idd == NULL)
1204                                 idd = scsi_get_devid(did, cdai.provsiz,
1205                                     scsi_devid_is_lun_uuid);
1206                         if (idd == NULL)
1207                                 idd = scsi_get_devid(did, cdai.provsiz,
1208                                     scsi_devid_is_lun_md5);
1209                 } else
1210                         idd = NULL;
1211
1212                 if (idd == NULL)
1213                         idd = scsi_get_devid(did, cdai.provsiz,
1214                             scsi_devid_is_lun_t10);
1215                 if (idd == NULL)
1216                         idd = scsi_get_devid(did, cdai.provsiz,
1217                             scsi_devid_is_lun_name);
1218                 if (idd == NULL)
1219                         break;
1220
1221                 ret = 0;
1222                 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) ==
1223                     SVPD_ID_CODESET_ASCII) {
1224                         if (idd->length < len) {
1225                                 for (l = 0; l < idd->length; l++)
1226                                         buf[l] = idd->identifier[l] ?
1227                                             idd->identifier[l] : ' ';
1228                                 buf[l] = 0;
1229                         } else
1230                                 ret = EFAULT;
1231                         break;
1232                 }
1233                 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) ==
1234                     SVPD_ID_CODESET_UTF8) {
1235                         l = strnlen(idd->identifier, idd->length);
1236                         if (l < len) {
1237                                 bcopy(idd->identifier, buf, l);
1238                                 buf[l] = 0;
1239                         } else
1240                                 ret = EFAULT;
1241                         break;
1242                 }
1243                 if ((idd->id_type & SVPD_ID_TYPE_MASK) ==
1244                     SVPD_ID_TYPE_UUID && idd->identifier[0] == 0x10) {
1245                         if ((idd->length - 2) * 2 + 4 >= len) {
1246                                 ret = EFAULT;
1247                                 break;
1248                         }
1249                         for (l = 2, o = 0; l < idd->length; l++) {
1250                                 if (l == 6 || l == 8 || l == 10 || l == 12)
1251                                     o += sprintf(buf + o, "-");
1252                                 o += sprintf(buf + o, "%02x",
1253                                     idd->identifier[l]);
1254                         }
1255                         break;
1256                 }
1257                 if (idd->length * 2 < len) {
1258                         for (l = 0; l < idd->length; l++)
1259                                 sprintf(buf + l * 2, "%02x",
1260                                     idd->identifier[l]);
1261                 } else
1262                                 ret = EFAULT;
1263                 break;
1264         default:
1265                 if (cdai.provsiz < len) {
1266                         cdai.buf[cdai.provsiz] = 0;
1267                         ret = 0;
1268                 } else
1269                         ret = EFAULT;
1270                 break;
1271         }
1272
1273 out:
1274         if ((char *)cdai.buf != buf)
1275                 free(cdai.buf, M_CAMXPT);
1276         return ret;
1277 }
1278
1279 static dev_match_ret
1280 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1281             struct cam_eb *bus)
1282 {
1283         dev_match_ret retval;
1284         u_int i;
1285
1286         retval = DM_RET_NONE;
1287
1288         /*
1289          * If we aren't given something to match against, that's an error.
1290          */
1291         if (bus == NULL)
1292                 return(DM_RET_ERROR);
1293
1294         /*
1295          * If there are no match entries, then this bus matches no
1296          * matter what.
1297          */
1298         if ((patterns == NULL) || (num_patterns == 0))
1299                 return(DM_RET_DESCEND | DM_RET_COPY);
1300
1301         for (i = 0; i < num_patterns; i++) {
1302                 struct bus_match_pattern *cur_pattern;
1303                 struct device_match_pattern *dp = &patterns[i].pattern.device_pattern;
1304                 struct periph_match_pattern *pp = &patterns[i].pattern.periph_pattern;
1305
1306                 /*
1307                  * If the pattern in question isn't for a bus node, we
1308                  * aren't interested.  However, we do indicate to the
1309                  * calling routine that we should continue descending the
1310                  * tree, since the user wants to match against lower-level
1311                  * EDT elements.
1312                  */
1313                 if (patterns[i].type == DEV_MATCH_DEVICE &&
1314                     (dp->flags & DEV_MATCH_PATH) != 0 &&
1315                     dp->path_id != bus->path_id)
1316                         continue;
1317                 if (patterns[i].type == DEV_MATCH_PERIPH &&
1318                     (pp->flags & PERIPH_MATCH_PATH) != 0 &&
1319                     pp->path_id != bus->path_id)
1320                         continue;
1321                 if (patterns[i].type != DEV_MATCH_BUS) {
1322                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1323                                 retval |= DM_RET_DESCEND;
1324                         continue;
1325                 }
1326
1327                 cur_pattern = &patterns[i].pattern.bus_pattern;
1328
1329                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1330                  && (cur_pattern->path_id != bus->path_id))
1331                         continue;
1332
1333                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1334                  && (cur_pattern->bus_id != bus->sim->bus_id))
1335                         continue;
1336
1337                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1338                  && (cur_pattern->unit_number != bus->sim->unit_number))
1339                         continue;
1340
1341                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1342                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1343                              DEV_IDLEN) != 0))
1344                         continue;
1345
1346                 /*
1347                  * If we get to this point, the user definitely wants
1348                  * information on this bus.  So tell the caller to copy the
1349                  * data out.
1350                  */
1351                 retval |= DM_RET_COPY;
1352
1353                 /*
1354                  * If the return action has been set to descend, then we
1355                  * know that we've already seen a non-bus matching
1356                  * expression, therefore we need to further descend the tree.
1357                  * This won't change by continuing around the loop, so we
1358                  * go ahead and return.  If we haven't seen a non-bus
1359                  * matching expression, we keep going around the loop until
1360                  * we exhaust the matching expressions.  We'll set the stop
1361                  * flag once we fall out of the loop.
1362                  */
1363                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1364                         return(retval);
1365         }
1366
1367         /*
1368          * If the return action hasn't been set to descend yet, that means
1369          * we haven't seen anything other than bus matching patterns.  So
1370          * tell the caller to stop descending the tree -- the user doesn't
1371          * want to match against lower level tree elements.
1372          */
1373         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1374                 retval |= DM_RET_STOP;
1375
1376         return(retval);
1377 }
1378
1379 static dev_match_ret
1380 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1381                struct cam_ed *device)
1382 {
1383         dev_match_ret retval;
1384         u_int i;
1385
1386         retval = DM_RET_NONE;
1387
1388         /*
1389          * If we aren't given something to match against, that's an error.
1390          */
1391         if (device == NULL)
1392                 return(DM_RET_ERROR);
1393
1394         /*
1395          * If there are no match entries, then this device matches no
1396          * matter what.
1397          */
1398         if ((patterns == NULL) || (num_patterns == 0))
1399                 return(DM_RET_DESCEND | DM_RET_COPY);
1400
1401         for (i = 0; i < num_patterns; i++) {
1402                 struct device_match_pattern *cur_pattern;
1403                 struct scsi_vpd_device_id *device_id_page;
1404                 struct periph_match_pattern *pp = &patterns[i].pattern.periph_pattern;
1405
1406                 /*
1407                  * If the pattern in question isn't for a device node, we
1408                  * aren't interested.
1409                  */
1410                 if (patterns[i].type == DEV_MATCH_PERIPH &&
1411                     (pp->flags & PERIPH_MATCH_TARGET) != 0 &&
1412                     pp->target_id != device->target->target_id)
1413                         continue;
1414                 if (patterns[i].type == DEV_MATCH_PERIPH &&
1415                     (pp->flags & PERIPH_MATCH_LUN) != 0 &&
1416                     pp->target_lun != device->lun_id)
1417                         continue;
1418                 if (patterns[i].type != DEV_MATCH_DEVICE) {
1419                         if ((patterns[i].type == DEV_MATCH_PERIPH)
1420                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1421                                 retval |= DM_RET_DESCEND;
1422                         continue;
1423                 }
1424
1425                 cur_pattern = &patterns[i].pattern.device_pattern;
1426
1427                 /* Error out if mutually exclusive options are specified. */
1428                 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1429                  == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1430                         return(DM_RET_ERROR);
1431
1432                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1433                  && (cur_pattern->path_id != device->target->bus->path_id))
1434                         continue;
1435
1436                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1437                  && (cur_pattern->target_id != device->target->target_id))
1438                         continue;
1439
1440                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1441                  && (cur_pattern->target_lun != device->lun_id))
1442                         continue;
1443
1444                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1445                  && (cam_quirkmatch((caddr_t)&device->inq_data,
1446                                     (caddr_t)&cur_pattern->data.inq_pat,
1447                                     1, sizeof(cur_pattern->data.inq_pat),
1448                                     scsi_static_inquiry_match) == NULL))
1449                         continue;
1450
1451                 device_id_page = (struct scsi_vpd_device_id *)device->device_id;
1452                 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
1453                  && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
1454                   || scsi_devid_match((uint8_t *)device_id_page->desc_list,
1455                                       device->device_id_len
1456                                     - SVPD_DEVICE_ID_HDR_LEN,
1457                                       cur_pattern->data.devid_pat.id,
1458                                       cur_pattern->data.devid_pat.id_len) != 0))
1459                         continue;
1460
1461                 /*
1462                  * If we get to this point, the user definitely wants
1463                  * information on this device.  So tell the caller to copy
1464                  * the data out.
1465                  */
1466                 retval |= DM_RET_COPY;
1467
1468                 /*
1469                  * If the return action has been set to descend, then we
1470                  * know that we've already seen a peripheral matching
1471                  * expression, therefore we need to further descend the tree.
1472                  * This won't change by continuing around the loop, so we
1473                  * go ahead and return.  If we haven't seen a peripheral
1474                  * matching expression, we keep going around the loop until
1475                  * we exhaust the matching expressions.  We'll set the stop
1476                  * flag once we fall out of the loop.
1477                  */
1478                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1479                         return(retval);
1480         }
1481
1482         /*
1483          * If the return action hasn't been set to descend yet, that means
1484          * we haven't seen any peripheral matching patterns.  So tell the
1485          * caller to stop descending the tree -- the user doesn't want to
1486          * match against lower level tree elements.
1487          */
1488         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1489                 retval |= DM_RET_STOP;
1490
1491         return(retval);
1492 }
1493
1494 /*
1495  * Match a single peripheral against any number of match patterns.
1496  */
1497 static dev_match_ret
1498 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1499                struct cam_periph *periph)
1500 {
1501         dev_match_ret retval;
1502         u_int i;
1503
1504         /*
1505          * If we aren't given something to match against, that's an error.
1506          */
1507         if (periph == NULL)
1508                 return(DM_RET_ERROR);
1509
1510         /*
1511          * If there are no match entries, then this peripheral matches no
1512          * matter what.
1513          */
1514         if ((patterns == NULL) || (num_patterns == 0))
1515                 return(DM_RET_STOP | DM_RET_COPY);
1516
1517         /*
1518          * There aren't any nodes below a peripheral node, so there's no
1519          * reason to descend the tree any further.
1520          */
1521         retval = DM_RET_STOP;
1522
1523         for (i = 0; i < num_patterns; i++) {
1524                 struct periph_match_pattern *cur_pattern;
1525
1526                 /*
1527                  * If the pattern in question isn't for a peripheral, we
1528                  * aren't interested.
1529                  */
1530                 if (patterns[i].type != DEV_MATCH_PERIPH)
1531                         continue;
1532
1533                 cur_pattern = &patterns[i].pattern.periph_pattern;
1534
1535                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1536                  && (cur_pattern->path_id != periph->path->bus->path_id))
1537                         continue;
1538
1539                 /*
1540                  * For the target and lun id's, we have to make sure the
1541                  * target and lun pointers aren't NULL.  The xpt peripheral
1542                  * has a wildcard target and device.
1543                  */
1544                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1545                  && ((periph->path->target == NULL)
1546                  ||(cur_pattern->target_id != periph->path->target->target_id)))
1547                         continue;
1548
1549                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1550                  && ((periph->path->device == NULL)
1551                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
1552                         continue;
1553
1554                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1555                  && (cur_pattern->unit_number != periph->unit_number))
1556                         continue;
1557
1558                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1559                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
1560                              DEV_IDLEN) != 0))
1561                         continue;
1562
1563                 /*
1564                  * If we get to this point, the user definitely wants
1565                  * information on this peripheral.  So tell the caller to
1566                  * copy the data out.
1567                  */
1568                 retval |= DM_RET_COPY;
1569
1570                 /*
1571                  * The return action has already been set to stop, since
1572                  * peripherals don't have any nodes below them in the EDT.
1573                  */
1574                 return(retval);
1575         }
1576
1577         /*
1578          * If we get to this point, the peripheral that was passed in
1579          * doesn't match any of the patterns.
1580          */
1581         return(retval);
1582 }
1583
1584 static int
1585 xptedtbusfunc(struct cam_eb *bus, void *arg)
1586 {
1587         struct ccb_dev_match *cdm;
1588         struct cam_et *target;
1589         dev_match_ret retval;
1590
1591         cdm = (struct ccb_dev_match *)arg;
1592
1593         /*
1594          * If our position is for something deeper in the tree, that means
1595          * that we've already seen this node.  So, we keep going down.
1596          */
1597         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1598          && (cdm->pos.cookie.bus == bus)
1599          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1600          && (cdm->pos.cookie.target != NULL))
1601                 retval = DM_RET_DESCEND;
1602         else
1603                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1604
1605         /*
1606          * If we got an error, bail out of the search.
1607          */
1608         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1609                 cdm->status = CAM_DEV_MATCH_ERROR;
1610                 return(0);
1611         }
1612
1613         /*
1614          * If the copy flag is set, copy this bus out.
1615          */
1616         if (retval & DM_RET_COPY) {
1617                 int spaceleft, j;
1618
1619                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1620                         sizeof(struct dev_match_result));
1621
1622                 /*
1623                  * If we don't have enough space to put in another
1624                  * match result, save our position and tell the
1625                  * user there are more devices to check.
1626                  */
1627                 if (spaceleft < sizeof(struct dev_match_result)) {
1628                         bzero(&cdm->pos, sizeof(cdm->pos));
1629                         cdm->pos.position_type =
1630                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1631
1632                         cdm->pos.cookie.bus = bus;
1633                         cdm->pos.generations[CAM_BUS_GENERATION]=
1634                                 xsoftc.bus_generation;
1635                         cdm->status = CAM_DEV_MATCH_MORE;
1636                         return(0);
1637                 }
1638                 j = cdm->num_matches;
1639                 cdm->num_matches++;
1640                 cdm->matches[j].type = DEV_MATCH_BUS;
1641                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
1642                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1643                 cdm->matches[j].result.bus_result.unit_number =
1644                         bus->sim->unit_number;
1645                 strlcpy(cdm->matches[j].result.bus_result.dev_name,
1646                         bus->sim->sim_name,
1647                         sizeof(cdm->matches[j].result.bus_result.dev_name));
1648         }
1649
1650         /*
1651          * If the user is only interested in buses, there's no
1652          * reason to descend to the next level in the tree.
1653          */
1654         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1655                 return(1);
1656
1657         /*
1658          * If there is a target generation recorded, check it to
1659          * make sure the target list hasn't changed.
1660          */
1661         mtx_lock(&bus->eb_mtx);
1662         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1663          && (cdm->pos.cookie.bus == bus)
1664          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1665          && (cdm->pos.cookie.target != NULL)) {
1666                 if ((cdm->pos.generations[CAM_TARGET_GENERATION] !=
1667                     bus->generation)) {
1668                         mtx_unlock(&bus->eb_mtx);
1669                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1670                         return (0);
1671                 }
1672                 target = (struct cam_et *)cdm->pos.cookie.target;
1673                 target->refcount++;
1674         } else
1675                 target = NULL;
1676         mtx_unlock(&bus->eb_mtx);
1677
1678         return (xpttargettraverse(bus, target, xptedttargetfunc, arg));
1679 }
1680
1681 static int
1682 xptedttargetfunc(struct cam_et *target, void *arg)
1683 {
1684         struct ccb_dev_match *cdm;
1685         struct cam_eb *bus;
1686         struct cam_ed *device;
1687
1688         cdm = (struct ccb_dev_match *)arg;
1689         bus = target->bus;
1690
1691         /*
1692          * If there is a device list generation recorded, check it to
1693          * make sure the device list hasn't changed.
1694          */
1695         mtx_lock(&bus->eb_mtx);
1696         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1697          && (cdm->pos.cookie.bus == bus)
1698          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1699          && (cdm->pos.cookie.target == target)
1700          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1701          && (cdm->pos.cookie.device != NULL)) {
1702                 if (cdm->pos.generations[CAM_DEV_GENERATION] !=
1703                     target->generation) {
1704                         mtx_unlock(&bus->eb_mtx);
1705                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1706                         return(0);
1707                 }
1708                 device = (struct cam_ed *)cdm->pos.cookie.device;
1709                 device->refcount++;
1710         } else
1711                 device = NULL;
1712         mtx_unlock(&bus->eb_mtx);
1713
1714         return (xptdevicetraverse(target, device, xptedtdevicefunc, arg));
1715 }
1716
1717 static int
1718 xptedtdevicefunc(struct cam_ed *device, void *arg)
1719 {
1720         struct cam_eb *bus;
1721         struct cam_periph *periph;
1722         struct ccb_dev_match *cdm;
1723         dev_match_ret retval;
1724
1725         cdm = (struct ccb_dev_match *)arg;
1726         bus = device->target->bus;
1727
1728         /*
1729          * If our position is for something deeper in the tree, that means
1730          * that we've already seen this node.  So, we keep going down.
1731          */
1732         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1733          && (cdm->pos.cookie.device == device)
1734          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1735          && (cdm->pos.cookie.periph != NULL))
1736                 retval = DM_RET_DESCEND;
1737         else
1738                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
1739                                         device);
1740
1741         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1742                 cdm->status = CAM_DEV_MATCH_ERROR;
1743                 return(0);
1744         }
1745
1746         /*
1747          * If the copy flag is set, copy this device out.
1748          */
1749         if (retval & DM_RET_COPY) {
1750                 int spaceleft, j;
1751
1752                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1753                         sizeof(struct dev_match_result));
1754
1755                 /*
1756                  * If we don't have enough space to put in another
1757                  * match result, save our position and tell the
1758                  * user there are more devices to check.
1759                  */
1760                 if (spaceleft < sizeof(struct dev_match_result)) {
1761                         bzero(&cdm->pos, sizeof(cdm->pos));
1762                         cdm->pos.position_type =
1763                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1764                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
1765
1766                         cdm->pos.cookie.bus = device->target->bus;
1767                         cdm->pos.generations[CAM_BUS_GENERATION]=
1768                                 xsoftc.bus_generation;
1769                         cdm->pos.cookie.target = device->target;
1770                         cdm->pos.generations[CAM_TARGET_GENERATION] =
1771                                 device->target->bus->generation;
1772                         cdm->pos.cookie.device = device;
1773                         cdm->pos.generations[CAM_DEV_GENERATION] =
1774                                 device->target->generation;
1775                         cdm->status = CAM_DEV_MATCH_MORE;
1776                         return(0);
1777                 }
1778                 j = cdm->num_matches;
1779                 cdm->num_matches++;
1780                 cdm->matches[j].type = DEV_MATCH_DEVICE;
1781                 cdm->matches[j].result.device_result.path_id =
1782                         device->target->bus->path_id;
1783                 cdm->matches[j].result.device_result.target_id =
1784                         device->target->target_id;
1785                 cdm->matches[j].result.device_result.target_lun =
1786                         device->lun_id;
1787                 cdm->matches[j].result.device_result.protocol =
1788                         device->protocol;
1789                 bcopy(&device->inq_data,
1790                       &cdm->matches[j].result.device_result.inq_data,
1791                       sizeof(struct scsi_inquiry_data));
1792                 bcopy(&device->ident_data,
1793                       &cdm->matches[j].result.device_result.ident_data,
1794                       sizeof(struct ata_params));
1795
1796                 /* Let the user know whether this device is unconfigured */
1797                 if (device->flags & CAM_DEV_UNCONFIGURED)
1798                         cdm->matches[j].result.device_result.flags =
1799                                 DEV_RESULT_UNCONFIGURED;
1800                 else
1801                         cdm->matches[j].result.device_result.flags =
1802                                 DEV_RESULT_NOFLAG;
1803         }
1804
1805         /*
1806          * If the user isn't interested in peripherals, don't descend
1807          * the tree any further.
1808          */
1809         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1810                 return(1);
1811
1812         /*
1813          * If there is a peripheral list generation recorded, make sure
1814          * it hasn't changed.
1815          */
1816         xpt_lock_buses();
1817         mtx_lock(&bus->eb_mtx);
1818         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1819          && (cdm->pos.cookie.bus == bus)
1820          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1821          && (cdm->pos.cookie.target == device->target)
1822          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1823          && (cdm->pos.cookie.device == device)
1824          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1825          && (cdm->pos.cookie.periph != NULL)) {
1826                 if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1827                     device->generation) {
1828                         mtx_unlock(&bus->eb_mtx);
1829                         xpt_unlock_buses();
1830                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1831                         return(0);
1832                 }
1833                 periph = (struct cam_periph *)cdm->pos.cookie.periph;
1834                 periph->refcount++;
1835         } else
1836                 periph = NULL;
1837         mtx_unlock(&bus->eb_mtx);
1838         xpt_unlock_buses();
1839
1840         return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg));
1841 }
1842
1843 static int
1844 xptedtperiphfunc(struct cam_periph *periph, void *arg)
1845 {
1846         struct ccb_dev_match *cdm;
1847         dev_match_ret retval;
1848
1849         cdm = (struct ccb_dev_match *)arg;
1850
1851         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1852
1853         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1854                 cdm->status = CAM_DEV_MATCH_ERROR;
1855                 return(0);
1856         }
1857
1858         /*
1859          * If the copy flag is set, copy this peripheral out.
1860          */
1861         if (retval & DM_RET_COPY) {
1862                 int spaceleft, j;
1863                 size_t l;
1864
1865                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1866                         sizeof(struct dev_match_result));
1867
1868                 /*
1869                  * If we don't have enough space to put in another
1870                  * match result, save our position and tell the
1871                  * user there are more devices to check.
1872                  */
1873                 if (spaceleft < sizeof(struct dev_match_result)) {
1874                         bzero(&cdm->pos, sizeof(cdm->pos));
1875                         cdm->pos.position_type =
1876                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1877                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
1878                                 CAM_DEV_POS_PERIPH;
1879
1880                         cdm->pos.cookie.bus = periph->path->bus;
1881                         cdm->pos.generations[CAM_BUS_GENERATION]=
1882                                 xsoftc.bus_generation;
1883                         cdm->pos.cookie.target = periph->path->target;
1884                         cdm->pos.generations[CAM_TARGET_GENERATION] =
1885                                 periph->path->bus->generation;
1886                         cdm->pos.cookie.device = periph->path->device;
1887                         cdm->pos.generations[CAM_DEV_GENERATION] =
1888                                 periph->path->target->generation;
1889                         cdm->pos.cookie.periph = periph;
1890                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
1891                                 periph->path->device->generation;
1892                         cdm->status = CAM_DEV_MATCH_MORE;
1893                         return(0);
1894                 }
1895
1896                 j = cdm->num_matches;
1897                 cdm->num_matches++;
1898                 cdm->matches[j].type = DEV_MATCH_PERIPH;
1899                 cdm->matches[j].result.periph_result.path_id =
1900                         periph->path->bus->path_id;
1901                 cdm->matches[j].result.periph_result.target_id =
1902                         periph->path->target->target_id;
1903                 cdm->matches[j].result.periph_result.target_lun =
1904                         periph->path->device->lun_id;
1905                 cdm->matches[j].result.periph_result.unit_number =
1906                         periph->unit_number;
1907                 l = sizeof(cdm->matches[j].result.periph_result.periph_name);
1908                 strlcpy(cdm->matches[j].result.periph_result.periph_name,
1909                         periph->periph_name, l);
1910         }
1911
1912         return(1);
1913 }
1914
1915 static int
1916 xptedtmatch(struct ccb_dev_match *cdm)
1917 {
1918         struct cam_eb *bus;
1919         int ret;
1920
1921         cdm->num_matches = 0;
1922
1923         /*
1924          * Check the bus list generation.  If it has changed, the user
1925          * needs to reset everything and start over.
1926          */
1927         xpt_lock_buses();
1928         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1929          && (cdm->pos.cookie.bus != NULL)) {
1930                 if (cdm->pos.generations[CAM_BUS_GENERATION] !=
1931                     xsoftc.bus_generation) {
1932                         xpt_unlock_buses();
1933                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1934                         return(0);
1935                 }
1936                 bus = (struct cam_eb *)cdm->pos.cookie.bus;
1937                 bus->refcount++;
1938         } else
1939                 bus = NULL;
1940         xpt_unlock_buses();
1941
1942         ret = xptbustraverse(bus, xptedtbusfunc, cdm);
1943
1944         /*
1945          * If we get back 0, that means that we had to stop before fully
1946          * traversing the EDT.  It also means that one of the subroutines
1947          * has set the status field to the proper value.  If we get back 1,
1948          * we've fully traversed the EDT and copied out any matching entries.
1949          */
1950         if (ret == 1)
1951                 cdm->status = CAM_DEV_MATCH_LAST;
1952
1953         return(ret);
1954 }
1955
1956 static int
1957 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
1958 {
1959         struct cam_periph *periph;
1960         struct ccb_dev_match *cdm;
1961
1962         cdm = (struct ccb_dev_match *)arg;
1963
1964         xpt_lock_buses();
1965         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
1966          && (cdm->pos.cookie.pdrv == pdrv)
1967          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1968          && (cdm->pos.cookie.periph != NULL)) {
1969                 if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1970                     (*pdrv)->generation) {
1971                         xpt_unlock_buses();
1972                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1973                         return(0);
1974                 }
1975                 periph = (struct cam_periph *)cdm->pos.cookie.periph;
1976                 periph->refcount++;
1977         } else
1978                 periph = NULL;
1979         xpt_unlock_buses();
1980
1981         return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg));
1982 }
1983
1984 static int
1985 xptplistperiphfunc(struct cam_periph *periph, void *arg)
1986 {
1987         struct ccb_dev_match *cdm;
1988         dev_match_ret retval;
1989
1990         cdm = (struct ccb_dev_match *)arg;
1991
1992         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1993
1994         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1995                 cdm->status = CAM_DEV_MATCH_ERROR;
1996                 return(0);
1997         }
1998
1999         /*
2000          * If the copy flag is set, copy this peripheral out.
2001          */
2002         if (retval & DM_RET_COPY) {
2003                 int spaceleft, j;
2004                 size_t l;
2005
2006                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2007                         sizeof(struct dev_match_result));
2008
2009                 /*
2010                  * If we don't have enough space to put in another
2011                  * match result, save our position and tell the
2012                  * user there are more devices to check.
2013                  */
2014                 if (spaceleft < sizeof(struct dev_match_result)) {
2015                         struct periph_driver **pdrv;
2016
2017                         pdrv = NULL;
2018                         bzero(&cdm->pos, sizeof(cdm->pos));
2019                         cdm->pos.position_type =
2020                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2021                                 CAM_DEV_POS_PERIPH;
2022
2023                         /*
2024                          * This may look a bit non-sensical, but it is
2025                          * actually quite logical.  There are very few
2026                          * peripheral drivers, and bloating every peripheral
2027                          * structure with a pointer back to its parent
2028                          * peripheral driver linker set entry would cost
2029                          * more in the long run than doing this quick lookup.
2030                          */
2031                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2032                                 if (strcmp((*pdrv)->driver_name,
2033                                     periph->periph_name) == 0)
2034                                         break;
2035                         }
2036
2037                         if (*pdrv == NULL) {
2038                                 cdm->status = CAM_DEV_MATCH_ERROR;
2039                                 return(0);
2040                         }
2041
2042                         cdm->pos.cookie.pdrv = pdrv;
2043                         /*
2044                          * The periph generation slot does double duty, as
2045                          * does the periph pointer slot.  They are used for
2046                          * both edt and pdrv lookups and positioning.
2047                          */
2048                         cdm->pos.cookie.periph = periph;
2049                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2050                                 (*pdrv)->generation;
2051                         cdm->status = CAM_DEV_MATCH_MORE;
2052                         return(0);
2053                 }
2054
2055                 j = cdm->num_matches;
2056                 cdm->num_matches++;
2057                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2058                 cdm->matches[j].result.periph_result.path_id =
2059                         periph->path->bus->path_id;
2060
2061                 /*
2062                  * The transport layer peripheral doesn't have a target or
2063                  * lun.
2064                  */
2065                 if (periph->path->target)
2066                         cdm->matches[j].result.periph_result.target_id =
2067                                 periph->path->target->target_id;
2068                 else
2069                         cdm->matches[j].result.periph_result.target_id =
2070                                 CAM_TARGET_WILDCARD;
2071
2072                 if (periph->path->device)
2073                         cdm->matches[j].result.periph_result.target_lun =
2074                                 periph->path->device->lun_id;
2075                 else
2076                         cdm->matches[j].result.periph_result.target_lun =
2077                                 CAM_LUN_WILDCARD;
2078
2079                 cdm->matches[j].result.periph_result.unit_number =
2080                         periph->unit_number;
2081                 l = sizeof(cdm->matches[j].result.periph_result.periph_name);
2082                 strlcpy(cdm->matches[j].result.periph_result.periph_name,
2083                         periph->periph_name, l);
2084         }
2085
2086         return(1);
2087 }
2088
2089 static int
2090 xptperiphlistmatch(struct ccb_dev_match *cdm)
2091 {
2092         int ret;
2093
2094         cdm->num_matches = 0;
2095
2096         /*
2097          * At this point in the edt traversal function, we check the bus
2098          * list generation to make sure that no buses have been added or
2099          * removed since the user last sent a XPT_DEV_MATCH ccb through.
2100          * For the peripheral driver list traversal function, however, we
2101          * don't have to worry about new peripheral driver types coming or
2102          * going; they're in a linker set, and therefore can't change
2103          * without a recompile.
2104          */
2105
2106         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2107          && (cdm->pos.cookie.pdrv != NULL))
2108                 ret = xptpdrvtraverse(
2109                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
2110                                 xptplistpdrvfunc, cdm);
2111         else
2112                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2113
2114         /*
2115          * If we get back 0, that means that we had to stop before fully
2116          * traversing the peripheral driver tree.  It also means that one of
2117          * the subroutines has set the status field to the proper value.  If
2118          * we get back 1, we've fully traversed the EDT and copied out any
2119          * matching entries.
2120          */
2121         if (ret == 1)
2122                 cdm->status = CAM_DEV_MATCH_LAST;
2123
2124         return(ret);
2125 }
2126
2127 static int
2128 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2129 {
2130         struct cam_eb *bus, *next_bus;
2131         int retval;
2132
2133         retval = 1;
2134         if (start_bus)
2135                 bus = start_bus;
2136         else {
2137                 xpt_lock_buses();
2138                 bus = TAILQ_FIRST(&xsoftc.xpt_busses);
2139                 if (bus == NULL) {
2140                         xpt_unlock_buses();
2141                         return (retval);
2142                 }
2143                 bus->refcount++;
2144                 xpt_unlock_buses();
2145         }
2146         for (; bus != NULL; bus = next_bus) {
2147                 retval = tr_func(bus, arg);
2148                 if (retval == 0) {
2149                         xpt_release_bus(bus);
2150                         break;
2151                 }
2152                 xpt_lock_buses();
2153                 next_bus = TAILQ_NEXT(bus, links);
2154                 if (next_bus)
2155                         next_bus->refcount++;
2156                 xpt_unlock_buses();
2157                 xpt_release_bus(bus);
2158         }
2159         return(retval);
2160 }
2161
2162 static int
2163 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2164                   xpt_targetfunc_t *tr_func, void *arg)
2165 {
2166         struct cam_et *target, *next_target;
2167         int retval;
2168
2169         retval = 1;
2170         if (start_target)
2171                 target = start_target;
2172         else {
2173                 mtx_lock(&bus->eb_mtx);
2174                 target = TAILQ_FIRST(&bus->et_entries);
2175                 if (target == NULL) {
2176                         mtx_unlock(&bus->eb_mtx);
2177                         return (retval);
2178                 }
2179                 target->refcount++;
2180                 mtx_unlock(&bus->eb_mtx);
2181         }
2182         for (; target != NULL; target = next_target) {
2183                 retval = tr_func(target, arg);
2184                 if (retval == 0) {
2185                         xpt_release_target(target);
2186                         break;
2187                 }
2188                 mtx_lock(&bus->eb_mtx);
2189                 next_target = TAILQ_NEXT(target, links);
2190                 if (next_target)
2191                         next_target->refcount++;
2192                 mtx_unlock(&bus->eb_mtx);
2193                 xpt_release_target(target);
2194         }
2195         return(retval);
2196 }
2197
2198 static int
2199 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2200                   xpt_devicefunc_t *tr_func, void *arg)
2201 {
2202         struct cam_eb *bus;
2203         struct cam_ed *device, *next_device;
2204         int retval;
2205
2206         retval = 1;
2207         bus = target->bus;
2208         if (start_device)
2209                 device = start_device;
2210         else {
2211                 mtx_lock(&bus->eb_mtx);
2212                 device = TAILQ_FIRST(&target->ed_entries);
2213                 if (device == NULL) {
2214                         mtx_unlock(&bus->eb_mtx);
2215                         return (retval);
2216                 }
2217                 device->refcount++;
2218                 mtx_unlock(&bus->eb_mtx);
2219         }
2220         for (; device != NULL; device = next_device) {
2221                 mtx_lock(&device->device_mtx);
2222                 retval = tr_func(device, arg);
2223                 mtx_unlock(&device->device_mtx);
2224                 if (retval == 0) {
2225                         xpt_release_device(device);
2226                         break;
2227                 }
2228                 mtx_lock(&bus->eb_mtx);
2229                 next_device = TAILQ_NEXT(device, links);
2230                 if (next_device)
2231                         next_device->refcount++;
2232                 mtx_unlock(&bus->eb_mtx);
2233                 xpt_release_device(device);
2234         }
2235         return(retval);
2236 }
2237
2238 static int
2239 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2240                   xpt_periphfunc_t *tr_func, void *arg)
2241 {
2242         struct cam_eb *bus;
2243         struct cam_periph *periph, *next_periph;
2244         int retval;
2245
2246         retval = 1;
2247
2248         bus = device->target->bus;
2249         if (start_periph)
2250                 periph = start_periph;
2251         else {
2252                 xpt_lock_buses();
2253                 mtx_lock(&bus->eb_mtx);
2254                 periph = SLIST_FIRST(&device->periphs);
2255                 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2256                         periph = SLIST_NEXT(periph, periph_links);
2257                 if (periph == NULL) {
2258                         mtx_unlock(&bus->eb_mtx);
2259                         xpt_unlock_buses();
2260                         return (retval);
2261                 }
2262                 periph->refcount++;
2263                 mtx_unlock(&bus->eb_mtx);
2264                 xpt_unlock_buses();
2265         }
2266         for (; periph != NULL; periph = next_periph) {
2267                 retval = tr_func(periph, arg);
2268                 if (retval == 0) {
2269                         cam_periph_release_locked(periph);
2270                         break;
2271                 }
2272                 xpt_lock_buses();
2273                 mtx_lock(&bus->eb_mtx);
2274                 next_periph = SLIST_NEXT(periph, periph_links);
2275                 while (next_periph != NULL &&
2276                     (next_periph->flags & CAM_PERIPH_FREE) != 0)
2277                         next_periph = SLIST_NEXT(next_periph, periph_links);
2278                 if (next_periph)
2279                         next_periph->refcount++;
2280                 mtx_unlock(&bus->eb_mtx);
2281                 xpt_unlock_buses();
2282                 cam_periph_release_locked(periph);
2283         }
2284         return(retval);
2285 }
2286
2287 static int
2288 xptpdrvtraverse(struct periph_driver **start_pdrv,
2289                 xpt_pdrvfunc_t *tr_func, void *arg)
2290 {
2291         struct periph_driver **pdrv;
2292         int retval;
2293
2294         retval = 1;
2295
2296         /*
2297          * We don't traverse the peripheral driver list like we do the
2298          * other lists, because it is a linker set, and therefore cannot be
2299          * changed during runtime.  If the peripheral driver list is ever
2300          * re-done to be something other than a linker set (i.e. it can
2301          * change while the system is running), the list traversal should
2302          * be modified to work like the other traversal functions.
2303          */
2304         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2305              *pdrv != NULL; pdrv++) {
2306                 retval = tr_func(pdrv, arg);
2307
2308                 if (retval == 0)
2309                         return(retval);
2310         }
2311
2312         return(retval);
2313 }
2314
2315 static int
2316 xptpdperiphtraverse(struct periph_driver **pdrv,
2317                     struct cam_periph *start_periph,
2318                     xpt_periphfunc_t *tr_func, void *arg)
2319 {
2320         struct cam_periph *periph, *next_periph;
2321         int retval;
2322
2323         retval = 1;
2324
2325         if (start_periph)
2326                 periph = start_periph;
2327         else {
2328                 xpt_lock_buses();
2329                 periph = TAILQ_FIRST(&(*pdrv)->units);
2330                 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2331                         periph = TAILQ_NEXT(periph, unit_links);
2332                 if (periph == NULL) {
2333                         xpt_unlock_buses();
2334                         return (retval);
2335                 }
2336                 periph->refcount++;
2337                 xpt_unlock_buses();
2338         }
2339         for (; periph != NULL; periph = next_periph) {
2340                 cam_periph_lock(periph);
2341                 retval = tr_func(periph, arg);
2342                 cam_periph_unlock(periph);
2343                 if (retval == 0) {
2344                         cam_periph_release(periph);
2345                         break;
2346                 }
2347                 xpt_lock_buses();
2348                 next_periph = TAILQ_NEXT(periph, unit_links);
2349                 while (next_periph != NULL &&
2350                     (next_periph->flags & CAM_PERIPH_FREE) != 0)
2351                         next_periph = TAILQ_NEXT(next_periph, unit_links);
2352                 if (next_periph)
2353                         next_periph->refcount++;
2354                 xpt_unlock_buses();
2355                 cam_periph_release(periph);
2356         }
2357         return(retval);
2358 }
2359
2360 static int
2361 xptdefbusfunc(struct cam_eb *bus, void *arg)
2362 {
2363         struct xpt_traverse_config *tr_config;
2364
2365         tr_config = (struct xpt_traverse_config *)arg;
2366
2367         if (tr_config->depth == XPT_DEPTH_BUS) {
2368                 xpt_busfunc_t *tr_func;
2369
2370                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2371
2372                 return(tr_func(bus, tr_config->tr_arg));
2373         } else
2374                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2375 }
2376
2377 static int
2378 xptdeftargetfunc(struct cam_et *target, void *arg)
2379 {
2380         struct xpt_traverse_config *tr_config;
2381
2382         tr_config = (struct xpt_traverse_config *)arg;
2383
2384         if (tr_config->depth == XPT_DEPTH_TARGET) {
2385                 xpt_targetfunc_t *tr_func;
2386
2387                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2388
2389                 return(tr_func(target, tr_config->tr_arg));
2390         } else
2391                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2392 }
2393
2394 static int
2395 xptdefdevicefunc(struct cam_ed *device, void *arg)
2396 {
2397         struct xpt_traverse_config *tr_config;
2398
2399         tr_config = (struct xpt_traverse_config *)arg;
2400
2401         if (tr_config->depth == XPT_DEPTH_DEVICE) {
2402                 xpt_devicefunc_t *tr_func;
2403
2404                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2405
2406                 return(tr_func(device, tr_config->tr_arg));
2407         } else
2408                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2409 }
2410
2411 static int
2412 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2413 {
2414         struct xpt_traverse_config *tr_config;
2415         xpt_periphfunc_t *tr_func;
2416
2417         tr_config = (struct xpt_traverse_config *)arg;
2418
2419         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2420
2421         /*
2422          * Unlike the other default functions, we don't check for depth
2423          * here.  The peripheral driver level is the last level in the EDT,
2424          * so if we're here, we should execute the function in question.
2425          */
2426         return(tr_func(periph, tr_config->tr_arg));
2427 }
2428
2429 /*
2430  * Execute the given function for every bus in the EDT.
2431  */
2432 static int
2433 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2434 {
2435         struct xpt_traverse_config tr_config;
2436
2437         tr_config.depth = XPT_DEPTH_BUS;
2438         tr_config.tr_func = tr_func;
2439         tr_config.tr_arg = arg;
2440
2441         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2442 }
2443
2444 /*
2445  * Execute the given function for every device in the EDT.
2446  */
2447 static int
2448 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2449 {
2450         struct xpt_traverse_config tr_config;
2451
2452         tr_config.depth = XPT_DEPTH_DEVICE;
2453         tr_config.tr_func = tr_func;
2454         tr_config.tr_arg = arg;
2455
2456         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2457 }
2458
2459 static int
2460 xptsetasyncfunc(struct cam_ed *device, void *arg)
2461 {
2462         struct cam_path path;
2463         struct ccb_getdev cgd;
2464         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2465
2466         /*
2467          * Don't report unconfigured devices (Wildcard devs,
2468          * devices only for target mode, device instances
2469          * that have been invalidated but are waiting for
2470          * their last reference count to be released).
2471          */
2472         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2473                 return (1);
2474
2475         memset(&cgd, 0, sizeof(cgd));
2476         xpt_compile_path(&path,
2477                          NULL,
2478                          device->target->bus->path_id,
2479                          device->target->target_id,
2480                          device->lun_id);
2481         xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
2482         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2483         xpt_action((union ccb *)&cgd);
2484         csa->callback(csa->callback_arg,
2485                             AC_FOUND_DEVICE,
2486                             &path, &cgd);
2487         xpt_release_path(&path);
2488
2489         return(1);
2490 }
2491
2492 static int
2493 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2494 {
2495         struct cam_path path;
2496         struct ccb_pathinq cpi;
2497         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2498
2499         xpt_compile_path(&path, /*periph*/NULL,
2500                          bus->path_id,
2501                          CAM_TARGET_WILDCARD,
2502                          CAM_LUN_WILDCARD);
2503         xpt_path_lock(&path);
2504         xpt_path_inq(&cpi, &path);
2505         csa->callback(csa->callback_arg,
2506                             AC_PATH_REGISTERED,
2507                             &path, &cpi);
2508         xpt_path_unlock(&path);
2509         xpt_release_path(&path);
2510
2511         return(1);
2512 }
2513
2514 void
2515 xpt_action(union ccb *start_ccb)
2516 {
2517
2518         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE,
2519             ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code,
2520                 xpt_action_name(start_ccb->ccb_h.func_code)));
2521
2522         start_ccb->ccb_h.status = CAM_REQ_INPROG;
2523         (*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb);
2524 }
2525
2526 void
2527 xpt_action_default(union ccb *start_ccb)
2528 {
2529         struct cam_path *path;
2530         struct cam_sim *sim;
2531         struct mtx *mtx;
2532
2533         path = start_ccb->ccb_h.path;
2534         CAM_DEBUG(path, CAM_DEBUG_TRACE,
2535             ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code,
2536                 xpt_action_name(start_ccb->ccb_h.func_code)));
2537
2538         switch (start_ccb->ccb_h.func_code) {
2539         case XPT_SCSI_IO:
2540         {
2541                 struct cam_ed *device;
2542
2543                 /*
2544                  * For the sake of compatibility with SCSI-1
2545                  * devices that may not understand the identify
2546                  * message, we include lun information in the
2547                  * second byte of all commands.  SCSI-1 specifies
2548                  * that luns are a 3 bit value and reserves only 3
2549                  * bits for lun information in the CDB.  Later
2550                  * revisions of the SCSI spec allow for more than 8
2551                  * luns, but have deprecated lun information in the
2552                  * CDB.  So, if the lun won't fit, we must omit.
2553                  *
2554                  * Also be aware that during initial probing for devices,
2555                  * the inquiry information is unknown but initialized to 0.
2556                  * This means that this code will be exercised while probing
2557                  * devices with an ANSI revision greater than 2.
2558                  */
2559                 device = path->device;
2560                 if (device->protocol_version <= SCSI_REV_2
2561                  && start_ccb->ccb_h.target_lun < 8
2562                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2563                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
2564                             start_ccb->ccb_h.target_lun << 5;
2565                 }
2566                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2567         }
2568         /* FALLTHROUGH */
2569         case XPT_TARGET_IO:
2570         case XPT_CONT_TARGET_IO:
2571                 start_ccb->csio.sense_resid = 0;
2572                 start_ccb->csio.resid = 0;
2573                 /* FALLTHROUGH */
2574         case XPT_ATA_IO:
2575                 if (start_ccb->ccb_h.func_code == XPT_ATA_IO)
2576                         start_ccb->ataio.resid = 0;
2577                 /* FALLTHROUGH */
2578         case XPT_NVME_IO:
2579         case XPT_NVME_ADMIN:
2580         case XPT_MMC_IO:
2581         case XPT_MMC_GET_TRAN_SETTINGS:
2582         case XPT_MMC_SET_TRAN_SETTINGS:
2583         case XPT_RESET_DEV:
2584         case XPT_ENG_EXEC:
2585         case XPT_SMP_IO:
2586         {
2587                 struct cam_devq *devq;
2588
2589                 devq = path->bus->sim->devq;
2590                 mtx_lock(&devq->send_mtx);
2591                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2592                 if (xpt_schedule_devq(devq, path->device) != 0)
2593                         xpt_run_devq(devq);
2594                 mtx_unlock(&devq->send_mtx);
2595                 break;
2596         }
2597         case XPT_CALC_GEOMETRY:
2598                 /* Filter out garbage */
2599                 if (start_ccb->ccg.block_size == 0
2600                  || start_ccb->ccg.volume_size == 0) {
2601                         start_ccb->ccg.cylinders = 0;
2602                         start_ccb->ccg.heads = 0;
2603                         start_ccb->ccg.secs_per_track = 0;
2604                         start_ccb->ccb_h.status = CAM_REQ_CMP;
2605                         break;
2606                 }
2607                 goto call_sim;
2608         case XPT_ABORT:
2609         {
2610                 union ccb* abort_ccb;
2611
2612                 abort_ccb = start_ccb->cab.abort_ccb;
2613                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2614                         struct cam_ed *device;
2615                         struct cam_devq *devq;
2616
2617                         device = abort_ccb->ccb_h.path->device;
2618                         devq = device->sim->devq;
2619
2620                         mtx_lock(&devq->send_mtx);
2621                         if (abort_ccb->ccb_h.pinfo.index > 0) {
2622                                 cam_ccbq_remove_ccb(&device->ccbq, abort_ccb);
2623                                 abort_ccb->ccb_h.status =
2624                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2625                                 xpt_freeze_devq_device(device, 1);
2626                                 mtx_unlock(&devq->send_mtx);
2627                                 xpt_done(abort_ccb);
2628                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2629                                 break;
2630                         }
2631                         mtx_unlock(&devq->send_mtx);
2632
2633                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2634                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2635                                 /*
2636                                  * We've caught this ccb en route to
2637                                  * the SIM.  Flag it for abort and the
2638                                  * SIM will do so just before starting
2639                                  * real work on the CCB.
2640                                  */
2641                                 abort_ccb->ccb_h.status =
2642                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2643                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2644                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2645                                 break;
2646                         }
2647                 }
2648                 if (XPT_FC_IS_QUEUED(abort_ccb)
2649                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2650                         /*
2651                          * It's already completed but waiting
2652                          * for our SWI to get to it.
2653                          */
2654                         start_ccb->ccb_h.status = CAM_UA_ABORT;
2655                         break;
2656                 }
2657                 /*
2658                  * If we weren't able to take care of the abort request
2659                  * in the XPT, pass the request down to the SIM for processing.
2660                  */
2661         }
2662         /* FALLTHROUGH */
2663         case XPT_ACCEPT_TARGET_IO:
2664         case XPT_EN_LUN:
2665         case XPT_IMMED_NOTIFY:
2666         case XPT_NOTIFY_ACK:
2667         case XPT_RESET_BUS:
2668         case XPT_IMMEDIATE_NOTIFY:
2669         case XPT_NOTIFY_ACKNOWLEDGE:
2670         case XPT_GET_SIM_KNOB_OLD:
2671         case XPT_GET_SIM_KNOB:
2672         case XPT_SET_SIM_KNOB:
2673         case XPT_GET_TRAN_SETTINGS:
2674         case XPT_SET_TRAN_SETTINGS:
2675         case XPT_PATH_INQ:
2676 call_sim:
2677                 sim = path->bus->sim;
2678                 mtx = sim->mtx;
2679                 if (mtx && !mtx_owned(mtx))
2680                         mtx_lock(mtx);
2681                 else
2682                         mtx = NULL;
2683
2684                 CAM_DEBUG(path, CAM_DEBUG_TRACE,
2685                     ("Calling sim->sim_action(): func=%#x\n", start_ccb->ccb_h.func_code));
2686                 (*(sim->sim_action))(sim, start_ccb);
2687                 CAM_DEBUG(path, CAM_DEBUG_TRACE,
2688                     ("sim->sim_action returned: status=%#x\n", start_ccb->ccb_h.status));
2689                 if (mtx)
2690                         mtx_unlock(mtx);
2691                 break;
2692         case XPT_PATH_STATS:
2693                 start_ccb->cpis.last_reset = path->bus->last_reset;
2694                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2695                 break;
2696         case XPT_GDEV_TYPE:
2697         {
2698                 struct cam_ed *dev;
2699
2700                 dev = path->device;
2701                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2702                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2703                 } else {
2704                         struct ccb_getdev *cgd;
2705
2706                         cgd = &start_ccb->cgd;
2707                         cgd->protocol = dev->protocol;
2708                         cgd->inq_data = dev->inq_data;
2709                         cgd->ident_data = dev->ident_data;
2710                         cgd->inq_flags = dev->inq_flags;
2711                         cgd->ccb_h.status = CAM_REQ_CMP;
2712                         cgd->serial_num_len = dev->serial_num_len;
2713                         if ((dev->serial_num_len > 0)
2714                          && (dev->serial_num != NULL))
2715                                 bcopy(dev->serial_num, cgd->serial_num,
2716                                       dev->serial_num_len);
2717                 }
2718                 break;
2719         }
2720         case XPT_GDEV_STATS:
2721         {
2722                 struct ccb_getdevstats *cgds = &start_ccb->cgds;
2723                 struct cam_ed *dev = path->device;
2724                 struct cam_eb *bus = path->bus;
2725                 struct cam_et *tar = path->target;
2726                 struct cam_devq *devq = bus->sim->devq;
2727
2728                 mtx_lock(&devq->send_mtx);
2729                 cgds->dev_openings = dev->ccbq.dev_openings;
2730                 cgds->dev_active = dev->ccbq.dev_active;
2731                 cgds->allocated = dev->ccbq.allocated;
2732                 cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq);
2733                 cgds->held = cgds->allocated - cgds->dev_active - cgds->queued;
2734                 cgds->last_reset = tar->last_reset;
2735                 cgds->maxtags = dev->maxtags;
2736                 cgds->mintags = dev->mintags;
2737                 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2738                         cgds->last_reset = bus->last_reset;
2739                 mtx_unlock(&devq->send_mtx);
2740                 cgds->ccb_h.status = CAM_REQ_CMP;
2741                 break;
2742         }
2743         case XPT_GDEVLIST:
2744         {
2745                 struct cam_periph       *nperiph;
2746                 struct periph_list      *periph_head;
2747                 struct ccb_getdevlist   *cgdl;
2748                 u_int                   i;
2749                 struct cam_ed           *device;
2750                 bool                    found;
2751
2752                 found = false;
2753
2754                 /*
2755                  * Don't want anyone mucking with our data.
2756                  */
2757                 device = path->device;
2758                 periph_head = &device->periphs;
2759                 cgdl = &start_ccb->cgdl;
2760
2761                 /*
2762                  * Check and see if the list has changed since the user
2763                  * last requested a list member.  If so, tell them that the
2764                  * list has changed, and therefore they need to start over
2765                  * from the beginning.
2766                  */
2767                 if ((cgdl->index != 0) &&
2768                     (cgdl->generation != device->generation)) {
2769                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2770                         break;
2771                 }
2772
2773                 /*
2774                  * Traverse the list of peripherals and attempt to find
2775                  * the requested peripheral.
2776                  */
2777                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
2778                      (nperiph != NULL) && (i <= cgdl->index);
2779                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
2780                         if (i == cgdl->index) {
2781                                 strlcpy(cgdl->periph_name,
2782                                         nperiph->periph_name,
2783                                         sizeof(cgdl->periph_name));
2784                                 cgdl->unit_number = nperiph->unit_number;
2785                                 found = true;
2786                         }
2787                 }
2788                 if (!found) {
2789                         cgdl->status = CAM_GDEVLIST_ERROR;
2790                         break;
2791                 }
2792
2793                 if (nperiph == NULL)
2794                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
2795                 else
2796                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
2797
2798                 cgdl->index++;
2799                 cgdl->generation = device->generation;
2800
2801                 cgdl->ccb_h.status = CAM_REQ_CMP;
2802                 break;
2803         }
2804         case XPT_DEV_MATCH:
2805         {
2806                 dev_pos_type position_type;
2807                 struct ccb_dev_match *cdm;
2808
2809                 cdm = &start_ccb->cdm;
2810
2811                 /*
2812                  * There are two ways of getting at information in the EDT.
2813                  * The first way is via the primary EDT tree.  It starts
2814                  * with a list of buses, then a list of targets on a bus,
2815                  * then devices/luns on a target, and then peripherals on a
2816                  * device/lun.  The "other" way is by the peripheral driver
2817                  * lists.  The peripheral driver lists are organized by
2818                  * peripheral driver.  (obviously)  So it makes sense to
2819                  * use the peripheral driver list if the user is looking
2820                  * for something like "da1", or all "da" devices.  If the
2821                  * user is looking for something on a particular bus/target
2822                  * or lun, it's generally better to go through the EDT tree.
2823                  */
2824
2825                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
2826                         position_type = cdm->pos.position_type;
2827                 else {
2828                         u_int i;
2829
2830                         position_type = CAM_DEV_POS_NONE;
2831
2832                         for (i = 0; i < cdm->num_patterns; i++) {
2833                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
2834                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
2835                                         position_type = CAM_DEV_POS_EDT;
2836                                         break;
2837                                 }
2838                         }
2839
2840                         if (cdm->num_patterns == 0)
2841                                 position_type = CAM_DEV_POS_EDT;
2842                         else if (position_type == CAM_DEV_POS_NONE)
2843                                 position_type = CAM_DEV_POS_PDRV;
2844                 }
2845
2846                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
2847                 case CAM_DEV_POS_EDT:
2848                         xptedtmatch(cdm);
2849                         break;
2850                 case CAM_DEV_POS_PDRV:
2851                         xptperiphlistmatch(cdm);
2852                         break;
2853                 default:
2854                         cdm->status = CAM_DEV_MATCH_ERROR;
2855                         break;
2856                 }
2857
2858                 if (cdm->status == CAM_DEV_MATCH_ERROR)
2859                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2860                 else
2861                         start_ccb->ccb_h.status = CAM_REQ_CMP;
2862
2863                 break;
2864         }
2865         case XPT_SASYNC_CB:
2866         {
2867                 struct ccb_setasync *csa;
2868                 struct async_node *cur_entry;
2869                 struct async_list *async_head;
2870                 uint32_t added;
2871
2872                 csa = &start_ccb->csa;
2873                 added = csa->event_enable;
2874                 async_head = &path->device->asyncs;
2875
2876                 /*
2877                  * If there is already an entry for us, simply
2878                  * update it.
2879                  */
2880                 cur_entry = SLIST_FIRST(async_head);
2881                 while (cur_entry != NULL) {
2882                         if ((cur_entry->callback_arg == csa->callback_arg)
2883                          && (cur_entry->callback == csa->callback))
2884                                 break;
2885                         cur_entry = SLIST_NEXT(cur_entry, links);
2886                 }
2887
2888                 if (cur_entry != NULL) {
2889                         /*
2890                          * If the request has no flags set,
2891                          * remove the entry.
2892                          */
2893                         added &= ~cur_entry->event_enable;
2894                         if (csa->event_enable == 0) {
2895                                 SLIST_REMOVE(async_head, cur_entry,
2896                                              async_node, links);
2897                                 xpt_release_device(path->device);
2898                                 free(cur_entry, M_CAMXPT);
2899                         } else {
2900                                 cur_entry->event_enable = csa->event_enable;
2901                         }
2902                         csa->event_enable = added;
2903                 } else {
2904                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
2905                                            M_NOWAIT);
2906                         if (cur_entry == NULL) {
2907                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
2908                                 break;
2909                         }
2910                         cur_entry->event_enable = csa->event_enable;
2911                         cur_entry->event_lock = (path->bus->sim->mtx &&
2912                             mtx_owned(path->bus->sim->mtx)) ? 1 : 0;
2913                         cur_entry->callback_arg = csa->callback_arg;
2914                         cur_entry->callback = csa->callback;
2915                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
2916                         xpt_acquire_device(path->device);
2917                 }
2918                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2919                 break;
2920         }
2921         case XPT_REL_SIMQ:
2922         {
2923                 struct ccb_relsim *crs;
2924                 struct cam_ed *dev;
2925
2926                 crs = &start_ccb->crs;
2927                 dev = path->device;
2928                 if (dev == NULL) {
2929                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
2930                         break;
2931                 }
2932
2933                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
2934                         /* Don't ever go below one opening */
2935                         if (crs->openings > 0) {
2936                                 xpt_dev_ccbq_resize(path, crs->openings);
2937                                 if (bootverbose) {
2938                                         xpt_print(path,
2939                                             "number of openings is now %d\n",
2940                                             crs->openings);
2941                                 }
2942                         }
2943                 }
2944
2945                 mtx_lock(&dev->sim->devq->send_mtx);
2946                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
2947                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
2948                                 /*
2949                                  * Just extend the old timeout and decrement
2950                                  * the freeze count so that a single timeout
2951                                  * is sufficient for releasing the queue.
2952                                  */
2953                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2954                                 callout_stop(&dev->callout);
2955                         } else {
2956                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2957                         }
2958
2959                         callout_reset_sbt(&dev->callout,
2960                             SBT_1MS * crs->release_timeout, SBT_1MS,
2961                             xpt_release_devq_timeout, dev, 0);
2962
2963                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
2964                 }
2965
2966                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
2967                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
2968                                 /*
2969                                  * Decrement the freeze count so that a single
2970                                  * completion is still sufficient to unfreeze
2971                                  * the queue.
2972                                  */
2973                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2974                         } else {
2975                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
2976                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2977                         }
2978                 }
2979
2980                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
2981                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
2982                          || (dev->ccbq.dev_active == 0)) {
2983                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2984                         } else {
2985                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
2986                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2987                         }
2988                 }
2989                 mtx_unlock(&dev->sim->devq->send_mtx);
2990
2991                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0)
2992                         xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
2993                 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt;
2994                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2995                 break;
2996         }
2997         case XPT_DEBUG: {
2998                 struct cam_path *oldpath;
2999
3000                 /* Check that all request bits are supported. */
3001                 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
3002                         start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3003                         break;
3004                 }
3005
3006                 cam_dflags = CAM_DEBUG_NONE;
3007                 if (cam_dpath != NULL) {
3008                         oldpath = cam_dpath;
3009                         cam_dpath = NULL;
3010                         xpt_free_path(oldpath);
3011                 }
3012                 if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
3013                         if (xpt_create_path(&cam_dpath, NULL,
3014                                             start_ccb->ccb_h.path_id,
3015                                             start_ccb->ccb_h.target_id,
3016                                             start_ccb->ccb_h.target_lun) !=
3017                                             CAM_REQ_CMP) {
3018                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3019                         } else {
3020                                 cam_dflags = start_ccb->cdbg.flags;
3021                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3022                                 xpt_print(cam_dpath, "debugging flags now %x\n",
3023                                     cam_dflags);
3024                         }
3025                 } else
3026                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3027                 break;
3028         }
3029         case XPT_NOOP:
3030                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3031                         xpt_freeze_devq(path, 1);
3032                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3033                 break;
3034         case XPT_REPROBE_LUN:
3035                 xpt_async(AC_INQ_CHANGED, path, NULL);
3036                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3037                 xpt_done(start_ccb);
3038                 break;
3039         case XPT_ASYNC:
3040                 /*
3041                  * Queue the async operation so it can be run from a sleepable
3042                  * context.
3043                  */
3044                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3045                 mtx_lock(&cam_async.cam_doneq_mtx);
3046                 STAILQ_INSERT_TAIL(&cam_async.cam_doneq, &start_ccb->ccb_h, sim_links.stqe);
3047                 start_ccb->ccb_h.pinfo.index = CAM_ASYNC_INDEX;
3048                 mtx_unlock(&cam_async.cam_doneq_mtx);
3049                 wakeup(&cam_async.cam_doneq);
3050                 break;
3051         default:
3052         case XPT_SDEV_TYPE:
3053         case XPT_TERM_IO:
3054         case XPT_ENG_INQ:
3055                 /* XXX Implement */
3056                 xpt_print(start_ccb->ccb_h.path,
3057                     "%s: CCB type %#x %s not supported\n", __func__,
3058                     start_ccb->ccb_h.func_code,
3059                     xpt_action_name(start_ccb->ccb_h.func_code));
3060                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3061                 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
3062                         xpt_done(start_ccb);
3063                 }
3064                 break;
3065         }
3066         CAM_DEBUG(path, CAM_DEBUG_TRACE,
3067             ("xpt_action_default: func= %#x %s status %#x\n",
3068                 start_ccb->ccb_h.func_code,
3069                 xpt_action_name(start_ccb->ccb_h.func_code),
3070                 start_ccb->ccb_h.status));
3071 }
3072
3073 /*
3074  * Call the sim poll routine to allow the sim to complete
3075  * any inflight requests, then call camisr_runqueue to
3076  * complete any CCB that the polling completed.
3077  */
3078 void
3079 xpt_sim_poll(struct cam_sim *sim)
3080 {
3081         struct mtx *mtx;
3082
3083         KASSERT(cam_sim_pollable(sim), ("%s: non-pollable sim", __func__));
3084         mtx = sim->mtx;
3085         if (mtx)
3086                 mtx_lock(mtx);
3087         (*(sim->sim_poll))(sim);
3088         if (mtx)
3089                 mtx_unlock(mtx);
3090         camisr_runqueue();
3091 }
3092
3093 uint32_t
3094 xpt_poll_setup(union ccb *start_ccb)
3095 {
3096         uint32_t timeout;
3097         struct    cam_sim *sim;
3098         struct    cam_devq *devq;
3099         struct    cam_ed *dev;
3100
3101         timeout = start_ccb->ccb_h.timeout * 10;
3102         sim = start_ccb->ccb_h.path->bus->sim;
3103         devq = sim->devq;
3104         dev = start_ccb->ccb_h.path->device;
3105
3106         KASSERT(cam_sim_pollable(sim), ("%s: non-pollable sim", __func__));
3107
3108         /*
3109          * Steal an opening so that no other queued requests
3110          * can get it before us while we simulate interrupts.
3111          */
3112         mtx_lock(&devq->send_mtx);
3113         dev->ccbq.dev_openings--;
3114         while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) &&
3115             (--timeout > 0)) {
3116                 mtx_unlock(&devq->send_mtx);
3117                 DELAY(100);
3118                 xpt_sim_poll(sim);
3119                 mtx_lock(&devq->send_mtx);
3120         }
3121         dev->ccbq.dev_openings++;
3122         mtx_unlock(&devq->send_mtx);
3123
3124         return (timeout);
3125 }
3126
3127 void
3128 xpt_pollwait(union ccb *start_ccb, uint32_t timeout)
3129 {
3130
3131         KASSERT(cam_sim_pollable(start_ccb->ccb_h.path->bus->sim),
3132             ("%s: non-pollable sim", __func__));
3133         while (--timeout > 0) {
3134                 xpt_sim_poll(start_ccb->ccb_h.path->bus->sim);
3135                 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3136                     != CAM_REQ_INPROG)
3137                         break;
3138                 DELAY(100);
3139         }
3140
3141         if (timeout == 0) {
3142                 /*
3143                  * XXX Is it worth adding a sim_timeout entry
3144                  * point so we can attempt recovery?  If
3145                  * this is only used for dumps, I don't think
3146                  * it is.
3147                  */
3148                 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3149         }
3150 }
3151
3152 /*
3153  * Schedule a peripheral driver to receive a ccb when its
3154  * target device has space for more transactions.
3155  */
3156 void
3157 xpt_schedule(struct cam_periph *periph, uint32_t new_priority)
3158 {
3159
3160         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3161         cam_periph_assert(periph, MA_OWNED);
3162         if (new_priority < periph->scheduled_priority) {
3163                 periph->scheduled_priority = new_priority;
3164                 xpt_run_allocq(periph, 0);
3165         }
3166 }
3167
3168 /*
3169  * Schedule a device to run on a given queue.
3170  * If the device was inserted as a new entry on the queue,
3171  * return 1 meaning the device queue should be run. If we
3172  * were already queued, implying someone else has already
3173  * started the queue, return 0 so the caller doesn't attempt
3174  * to run the queue.
3175  */
3176 static int
3177 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3178                  uint32_t new_priority)
3179 {
3180         int retval;
3181         uint32_t old_priority;
3182
3183         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3184
3185         old_priority = pinfo->priority;
3186
3187         /*
3188          * Are we already queued?
3189          */
3190         if (pinfo->index != CAM_UNQUEUED_INDEX) {
3191                 /* Simply reorder based on new priority */
3192                 if (new_priority < old_priority) {
3193                         camq_change_priority(queue, pinfo->index,
3194                                              new_priority);
3195                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3196                                         ("changed priority to %d\n",
3197                                          new_priority));
3198                         retval = 1;
3199                 } else
3200                         retval = 0;
3201         } else {
3202                 /* New entry on the queue */
3203                 if (new_priority < old_priority)
3204                         pinfo->priority = new_priority;
3205
3206                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3207                                 ("Inserting onto queue\n"));
3208                 pinfo->generation = ++queue->generation;
3209                 camq_insert(queue, pinfo);
3210                 retval = 1;
3211         }
3212         return (retval);
3213 }
3214
3215 static void
3216 xpt_run_allocq_task(void *context, int pending)
3217 {
3218         struct cam_periph *periph = context;
3219
3220         cam_periph_lock(periph);
3221         periph->flags &= ~CAM_PERIPH_RUN_TASK;
3222         xpt_run_allocq(periph, 1);
3223         cam_periph_unlock(periph);
3224         cam_periph_release(periph);
3225 }
3226
3227 static void
3228 xpt_run_allocq(struct cam_periph *periph, int sleep)
3229 {
3230         struct cam_ed   *device;
3231         union ccb       *ccb;
3232         uint32_t         prio;
3233
3234         cam_periph_assert(periph, MA_OWNED);
3235         if (periph->periph_allocating)
3236                 return;
3237         cam_periph_doacquire(periph);
3238         periph->periph_allocating = 1;
3239         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph));
3240         device = periph->path->device;
3241         ccb = NULL;
3242 restart:
3243         while ((prio = min(periph->scheduled_priority,
3244             periph->immediate_priority)) != CAM_PRIORITY_NONE &&
3245             (periph->periph_allocated - (ccb != NULL ? 1 : 0) <
3246              device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) {
3247                 if (ccb == NULL &&
3248                     (ccb = xpt_get_ccb_nowait(periph)) == NULL) {
3249                         if (sleep) {
3250                                 ccb = xpt_get_ccb(periph);
3251                                 goto restart;
3252                         }
3253                         if (periph->flags & CAM_PERIPH_RUN_TASK)
3254                                 break;
3255                         cam_periph_doacquire(periph);
3256                         periph->flags |= CAM_PERIPH_RUN_TASK;
3257                         taskqueue_enqueue(xsoftc.xpt_taskq,
3258                             &periph->periph_run_task);
3259                         break;
3260                 }
3261                 xpt_setup_ccb(&ccb->ccb_h, periph->path, prio);
3262                 if (prio == periph->immediate_priority) {
3263                         periph->immediate_priority = CAM_PRIORITY_NONE;
3264                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3265                                         ("waking cam_periph_getccb()\n"));
3266                         SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h,
3267                                           periph_links.sle);
3268                         wakeup(&periph->ccb_list);
3269                 } else {
3270                         periph->scheduled_priority = CAM_PRIORITY_NONE;
3271                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3272                                         ("calling periph_start()\n"));
3273                         periph->periph_start(periph, ccb);
3274                 }
3275                 ccb = NULL;
3276         }
3277         if (ccb != NULL)
3278                 xpt_release_ccb(ccb);
3279         periph->periph_allocating = 0;
3280         cam_periph_release_locked(periph);
3281 }
3282
3283 static void
3284 xpt_run_devq(struct cam_devq *devq)
3285 {
3286         struct mtx *mtx;
3287
3288         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n"));
3289
3290         devq->send_queue.qfrozen_cnt++;
3291         while ((devq->send_queue.entries > 0)
3292             && (devq->send_openings > 0)
3293             && (devq->send_queue.qfrozen_cnt <= 1)) {
3294                 struct  cam_ed *device;
3295                 union ccb *work_ccb;
3296                 struct  cam_sim *sim;
3297                 struct xpt_proto *proto;
3298
3299                 device = (struct cam_ed *)camq_remove(&devq->send_queue,
3300                                                            CAMQ_HEAD);
3301                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3302                                 ("running device %p\n", device));
3303
3304                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3305                 if (work_ccb == NULL) {
3306                         printf("device on run queue with no ccbs???\n");
3307                         continue;
3308                 }
3309
3310                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3311                         mtx_lock(&xsoftc.xpt_highpower_lock);
3312                         if (xsoftc.num_highpower <= 0) {
3313                                 /*
3314                                  * We got a high power command, but we
3315                                  * don't have any available slots.  Freeze
3316                                  * the device queue until we have a slot
3317                                  * available.
3318                                  */
3319                                 xpt_freeze_devq_device(device, 1);
3320                                 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device,
3321                                                    highpowerq_entry);
3322
3323                                 mtx_unlock(&xsoftc.xpt_highpower_lock);
3324                                 continue;
3325                         } else {
3326                                 /*
3327                                  * Consume a high power slot while
3328                                  * this ccb runs.
3329                                  */
3330                                 xsoftc.num_highpower--;
3331                         }
3332                         mtx_unlock(&xsoftc.xpt_highpower_lock);
3333                 }
3334                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3335                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3336                 devq->send_openings--;
3337                 devq->send_active++;
3338                 xpt_schedule_devq(devq, device);
3339                 mtx_unlock(&devq->send_mtx);
3340
3341                 if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) {
3342                         /*
3343                          * The client wants to freeze the queue
3344                          * after this CCB is sent.
3345                          */
3346                         xpt_freeze_devq(work_ccb->ccb_h.path, 1);
3347                 }
3348
3349                 /* In Target mode, the peripheral driver knows best... */
3350                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3351                         if ((device->inq_flags & SID_CmdQue) != 0
3352                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3353                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3354                         else
3355                                 /*
3356                                  * Clear this in case of a retried CCB that
3357                                  * failed due to a rejected tag.
3358                                  */
3359                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3360                 }
3361
3362                 KASSERT(device == work_ccb->ccb_h.path->device,
3363                     ("device (%p) / path->device (%p) mismatch",
3364                         device, work_ccb->ccb_h.path->device));
3365                 proto = xpt_proto_find(device->protocol);
3366                 if (proto && proto->ops->debug_out)
3367                         proto->ops->debug_out(work_ccb);
3368
3369                 /*
3370                  * Device queues can be shared among multiple SIM instances
3371                  * that reside on different buses.  Use the SIM from the
3372                  * queued device, rather than the one from the calling bus.
3373                  */
3374                 sim = device->sim;
3375                 mtx = sim->mtx;
3376                 if (mtx && !mtx_owned(mtx))
3377                         mtx_lock(mtx);
3378                 else
3379                         mtx = NULL;
3380                 work_ccb->ccb_h.qos.periph_data = cam_iosched_now();
3381                 (*(sim->sim_action))(sim, work_ccb);
3382                 if (mtx)
3383                         mtx_unlock(mtx);
3384                 mtx_lock(&devq->send_mtx);
3385         }
3386         devq->send_queue.qfrozen_cnt--;
3387 }
3388
3389 /*
3390  * This function merges stuff from the src ccb into the dst ccb, while keeping
3391  * important fields in the dst ccb constant.
3392  */
3393 void
3394 xpt_merge_ccb(union ccb *dst_ccb, union ccb *src_ccb)
3395 {
3396
3397         /*
3398          * Pull fields that are valid for peripheral drivers to set
3399          * into the dst CCB along with the CCB "payload".
3400          */
3401         dst_ccb->ccb_h.retry_count = src_ccb->ccb_h.retry_count;
3402         dst_ccb->ccb_h.func_code = src_ccb->ccb_h.func_code;
3403         dst_ccb->ccb_h.timeout = src_ccb->ccb_h.timeout;
3404         dst_ccb->ccb_h.flags = src_ccb->ccb_h.flags;
3405         bcopy(&(&src_ccb->ccb_h)[1], &(&dst_ccb->ccb_h)[1],
3406               sizeof(union ccb) - sizeof(struct ccb_hdr));
3407 }
3408
3409 void
3410 xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path,
3411                     uint32_t priority, uint32_t flags)
3412 {
3413
3414         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3415         ccb_h->pinfo.priority = priority;
3416         ccb_h->path = path;
3417         ccb_h->path_id = path->bus->path_id;
3418         if (path->target)
3419                 ccb_h->target_id = path->target->target_id;
3420         else
3421                 ccb_h->target_id = CAM_TARGET_WILDCARD;
3422         if (path->device) {
3423                 ccb_h->target_lun = path->device->lun_id;
3424                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3425         } else {
3426                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3427         }
3428         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3429         ccb_h->flags = flags;
3430         ccb_h->xflags = 0;
3431 }
3432
3433 void
3434 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, uint32_t priority)
3435 {
3436         xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0);
3437 }
3438
3439 /* Path manipulation functions */
3440 cam_status
3441 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3442                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3443 {
3444         struct     cam_path *path;
3445         cam_status status;
3446
3447         path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3448
3449         if (path == NULL) {
3450                 status = CAM_RESRC_UNAVAIL;
3451                 return(status);
3452         }
3453         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3454         if (status != CAM_REQ_CMP) {
3455                 free(path, M_CAMPATH);
3456                 path = NULL;
3457         }
3458         *new_path_ptr = path;
3459         return (status);
3460 }
3461
3462 cam_status
3463 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3464                          struct cam_periph *periph, path_id_t path_id,
3465                          target_id_t target_id, lun_id_t lun_id)
3466 {
3467
3468         return (xpt_create_path(new_path_ptr, periph, path_id, target_id,
3469             lun_id));
3470 }
3471
3472 cam_status
3473 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3474                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3475 {
3476         struct       cam_eb *bus;
3477         struct       cam_et *target;
3478         struct       cam_ed *device;
3479         cam_status   status;
3480
3481         status = CAM_REQ_CMP;   /* Completed without error */
3482         target = NULL;          /* Wildcarded */
3483         device = NULL;          /* Wildcarded */
3484
3485         /*
3486          * We will potentially modify the EDT, so block interrupts
3487          * that may attempt to create cam paths.
3488          */
3489         bus = xpt_find_bus(path_id);
3490         if (bus == NULL) {
3491                 status = CAM_PATH_INVALID;
3492         } else {
3493                 xpt_lock_buses();
3494                 mtx_lock(&bus->eb_mtx);
3495                 target = xpt_find_target(bus, target_id);
3496                 if (target == NULL) {
3497                         /* Create one */
3498                         struct cam_et *new_target;
3499
3500                         new_target = xpt_alloc_target(bus, target_id);
3501                         if (new_target == NULL) {
3502                                 status = CAM_RESRC_UNAVAIL;
3503                         } else {
3504                                 target = new_target;
3505                         }
3506                 }
3507                 xpt_unlock_buses();
3508                 if (target != NULL) {
3509                         device = xpt_find_device(target, lun_id);
3510                         if (device == NULL) {
3511                                 /* Create one */
3512                                 struct cam_ed *new_device;
3513
3514                                 new_device =
3515                                     (*(bus->xport->ops->alloc_device))(bus,
3516                                                                        target,
3517                                                                        lun_id);
3518                                 if (new_device == NULL) {
3519                                         status = CAM_RESRC_UNAVAIL;
3520                                 } else {
3521                                         device = new_device;
3522                                 }
3523                         }
3524                 }
3525                 mtx_unlock(&bus->eb_mtx);
3526         }
3527
3528         /*
3529          * Only touch the user's data if we are successful.
3530          */
3531         if (status == CAM_REQ_CMP) {
3532                 new_path->periph = perph;
3533                 new_path->bus = bus;
3534                 new_path->target = target;
3535                 new_path->device = device;
3536                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3537         } else {
3538                 if (device != NULL)
3539                         xpt_release_device(device);
3540                 if (target != NULL)
3541                         xpt_release_target(target);
3542                 if (bus != NULL)
3543                         xpt_release_bus(bus);
3544         }
3545         return (status);
3546 }
3547
3548 int
3549 xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path)
3550 {
3551         struct     cam_path *new_path;
3552
3553         new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3554         if (new_path == NULL)
3555                 return (ENOMEM);
3556         *new_path = *path;
3557         if (path->bus != NULL)
3558                 xpt_acquire_bus(path->bus);
3559         if (path->target != NULL)
3560                 xpt_acquire_target(path->target);
3561         if (path->device != NULL)
3562                 xpt_acquire_device(path->device);
3563         *new_path_ptr = new_path;
3564         return (0);
3565 }
3566
3567 void
3568 xpt_release_path(struct cam_path *path)
3569 {
3570         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3571         if (path->device != NULL) {
3572                 xpt_release_device(path->device);
3573                 path->device = NULL;
3574         }
3575         if (path->target != NULL) {
3576                 xpt_release_target(path->target);
3577                 path->target = NULL;
3578         }
3579         if (path->bus != NULL) {
3580                 xpt_release_bus(path->bus);
3581                 path->bus = NULL;
3582         }
3583 }
3584
3585 void
3586 xpt_free_path(struct cam_path *path)
3587 {
3588
3589         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3590         xpt_release_path(path);
3591         free(path, M_CAMPATH);
3592 }
3593
3594 void
3595 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
3596     uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
3597 {
3598
3599         xpt_lock_buses();
3600         if (bus_ref) {
3601                 if (path->bus)
3602                         *bus_ref = path->bus->refcount;
3603                 else
3604                         *bus_ref = 0;
3605         }
3606         if (periph_ref) {
3607                 if (path->periph)
3608                         *periph_ref = path->periph->refcount;
3609                 else
3610                         *periph_ref = 0;
3611         }
3612         xpt_unlock_buses();
3613         if (target_ref) {
3614                 if (path->target)
3615                         *target_ref = path->target->refcount;
3616                 else
3617                         *target_ref = 0;
3618         }
3619         if (device_ref) {
3620                 if (path->device)
3621                         *device_ref = path->device->refcount;
3622                 else
3623                         *device_ref = 0;
3624         }
3625 }
3626
3627 /*
3628  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3629  * in path1, 2 for match with wildcards in path2.
3630  */
3631 int
3632 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3633 {
3634         int retval = 0;
3635
3636         if (path1->bus != path2->bus) {
3637                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
3638                         retval = 1;
3639                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3640                         retval = 2;
3641                 else
3642                         return (-1);
3643         }
3644         if (path1->target != path2->target) {
3645                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3646                         if (retval == 0)
3647                                 retval = 1;
3648                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3649                         retval = 2;
3650                 else
3651                         return (-1);
3652         }
3653         if (path1->device != path2->device) {
3654                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3655                         if (retval == 0)
3656                                 retval = 1;
3657                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3658                         retval = 2;
3659                 else
3660                         return (-1);
3661         }
3662         return (retval);
3663 }
3664
3665 int
3666 xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev)
3667 {
3668         int retval = 0;
3669
3670         if (path->bus != dev->target->bus) {
3671                 if (path->bus->path_id == CAM_BUS_WILDCARD)
3672                         retval = 1;
3673                 else if (dev->target->bus->path_id == CAM_BUS_WILDCARD)
3674                         retval = 2;
3675                 else
3676                         return (-1);
3677         }
3678         if (path->target != dev->target) {
3679                 if (path->target->target_id == CAM_TARGET_WILDCARD) {
3680                         if (retval == 0)
3681                                 retval = 1;
3682                 } else if (dev->target->target_id == CAM_TARGET_WILDCARD)
3683                         retval = 2;
3684                 else
3685                         return (-1);
3686         }
3687         if (path->device != dev) {
3688                 if (path->device->lun_id == CAM_LUN_WILDCARD) {
3689                         if (retval == 0)
3690                                 retval = 1;
3691                 } else if (dev->lun_id == CAM_LUN_WILDCARD)
3692                         retval = 2;
3693                 else
3694                         return (-1);
3695         }
3696         return (retval);
3697 }
3698
3699 void
3700 xpt_print_path(struct cam_path *path)
3701 {
3702         struct sbuf sb;
3703         char buffer[XPT_PRINT_LEN];
3704
3705         sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN);
3706         xpt_path_sbuf(path, &sb);
3707         sbuf_finish(&sb);
3708         printf("%s", sbuf_data(&sb));
3709         sbuf_delete(&sb);
3710 }
3711
3712 static void
3713 xpt_device_sbuf(struct cam_ed *device, struct sbuf *sb)
3714 {
3715         if (device == NULL)
3716                 sbuf_cat(sb, "(nopath): ");
3717         else {
3718                 sbuf_printf(sb, "(noperiph:%s%d:%d:%d:%jx): ",
3719                     device->sim->sim_name,
3720                     device->sim->unit_number,
3721                     device->sim->bus_id,
3722                     device->target->target_id,
3723                     (uintmax_t)device->lun_id);
3724         }
3725 }
3726
3727 void
3728 xpt_print(struct cam_path *path, const char *fmt, ...)
3729 {
3730         va_list ap;
3731         struct sbuf sb;
3732         char buffer[XPT_PRINT_LEN];
3733
3734         sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN);
3735
3736         xpt_path_sbuf(path, &sb);
3737         va_start(ap, fmt);
3738         sbuf_vprintf(&sb, fmt, ap);
3739         va_end(ap);
3740
3741         sbuf_finish(&sb);
3742         printf("%s", sbuf_data(&sb));
3743         sbuf_delete(&sb);
3744 }
3745
3746 char *
3747 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
3748 {
3749         struct sbuf sb;
3750
3751         sbuf_new(&sb, str, str_len, 0);
3752         xpt_path_sbuf(path, &sb);
3753         sbuf_finish(&sb);
3754         return (str);
3755 }
3756
3757 void
3758 xpt_path_sbuf(struct cam_path *path, struct sbuf *sb)
3759 {
3760
3761         if (path == NULL)
3762                 sbuf_cat(sb, "(nopath): ");
3763         else {
3764                 if (path->periph != NULL)
3765                         sbuf_printf(sb, "(%s%d:", path->periph->periph_name,
3766                                     path->periph->unit_number);
3767                 else
3768                         sbuf_cat(sb, "(noperiph:");
3769
3770                 if (path->bus != NULL)
3771                         sbuf_printf(sb, "%s%d:%d:", path->bus->sim->sim_name,
3772                                     path->bus->sim->unit_number,
3773                                     path->bus->sim->bus_id);
3774                 else
3775                         sbuf_cat(sb, "nobus:");
3776
3777                 if (path->target != NULL)
3778                         sbuf_printf(sb, "%d:", path->target->target_id);
3779                 else
3780                         sbuf_cat(sb, "X:");
3781
3782                 if (path->device != NULL)
3783                         sbuf_printf(sb, "%jx): ",
3784                             (uintmax_t)path->device->lun_id);
3785                 else
3786                         sbuf_cat(sb, "X): ");
3787         }
3788 }
3789
3790 path_id_t
3791 xpt_path_path_id(struct cam_path *path)
3792 {
3793         return(path->bus->path_id);
3794 }
3795
3796 target_id_t
3797 xpt_path_target_id(struct cam_path *path)
3798 {
3799         if (path->target != NULL)
3800                 return (path->target->target_id);
3801         else
3802                 return (CAM_TARGET_WILDCARD);
3803 }
3804
3805 lun_id_t
3806 xpt_path_lun_id(struct cam_path *path)
3807 {
3808         if (path->device != NULL)
3809                 return (path->device->lun_id);
3810         else
3811                 return (CAM_LUN_WILDCARD);
3812 }
3813
3814 struct cam_sim *
3815 xpt_path_sim(struct cam_path *path)
3816 {
3817
3818         return (path->bus->sim);
3819 }
3820
3821 struct cam_periph*
3822 xpt_path_periph(struct cam_path *path)
3823 {
3824
3825         return (path->periph);
3826 }
3827
3828 /*
3829  * Release a CAM control block for the caller.  Remit the cost of the structure
3830  * to the device referenced by the path.  If the this device had no 'credits'
3831  * and peripheral drivers have registered async callbacks for this notification
3832  * call them now.
3833  */
3834 void
3835 xpt_release_ccb(union ccb *free_ccb)
3836 {
3837         struct   cam_ed *device;
3838         struct   cam_periph *periph;
3839
3840         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3841         xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED);
3842         device = free_ccb->ccb_h.path->device;
3843         periph = free_ccb->ccb_h.path->periph;
3844
3845         xpt_free_ccb(free_ccb);
3846         periph->periph_allocated--;
3847         cam_ccbq_release_opening(&device->ccbq);
3848         xpt_run_allocq(periph, 0);
3849 }
3850
3851 /* Functions accessed by SIM drivers */
3852
3853 static struct xpt_xport_ops xport_default_ops = {
3854         .alloc_device = xpt_alloc_device_default,
3855         .action = xpt_action_default,
3856         .async = xpt_dev_async_default,
3857 };
3858 static struct xpt_xport xport_default = {
3859         .xport = XPORT_UNKNOWN,
3860         .name = "unknown",
3861         .ops = &xport_default_ops,
3862 };
3863
3864 CAM_XPT_XPORT(xport_default);
3865
3866 /*
3867  * A sim structure, listing the SIM entry points and instance
3868  * identification info is passed to xpt_bus_register to hook the SIM
3869  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
3870  * for this new bus and places it in the array of buses and assigns
3871  * it a path_id.  The path_id may be influenced by "hard wiring"
3872  * information specified by the user.  Once interrupt services are
3873  * available, the bus will be probed.
3874  */
3875 int
3876 xpt_bus_register(struct cam_sim *sim, device_t parent, uint32_t bus)
3877 {
3878         struct cam_eb *new_bus;
3879         struct cam_eb *old_bus;
3880         struct ccb_pathinq cpi;
3881         struct cam_path *path;
3882         cam_status status;
3883
3884         sim->bus_id = bus;
3885         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
3886                                           M_CAMXPT, M_NOWAIT|M_ZERO);
3887         if (new_bus == NULL) {
3888                 /* Couldn't satisfy request */
3889                 return (ENOMEM);
3890         }
3891
3892         mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF);
3893         TAILQ_INIT(&new_bus->et_entries);
3894         cam_sim_hold(sim);
3895         new_bus->sim = sim;
3896         timevalclear(&new_bus->last_reset);
3897         new_bus->flags = 0;
3898         new_bus->refcount = 1;  /* Held until a bus_deregister event */
3899         new_bus->generation = 0;
3900         new_bus->parent_dev = parent;
3901
3902         xpt_lock_buses();
3903         sim->path_id = new_bus->path_id =
3904             xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
3905         old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3906         while (old_bus != NULL
3907             && old_bus->path_id < new_bus->path_id)
3908                 old_bus = TAILQ_NEXT(old_bus, links);
3909         if (old_bus != NULL)
3910                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
3911         else
3912                 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
3913         xsoftc.bus_generation++;
3914         xpt_unlock_buses();
3915
3916         /*
3917          * Set a default transport so that a PATH_INQ can be issued to
3918          * the SIM.  This will then allow for probing and attaching of
3919          * a more appropriate transport.
3920          */
3921         new_bus->xport = &xport_default;
3922
3923         status = xpt_create_path(&path, /*periph*/NULL, sim->path_id,
3924                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3925         if (status != CAM_REQ_CMP) {
3926                 xpt_release_bus(new_bus);
3927                 return (ENOMEM);
3928         }
3929
3930         xpt_path_inq(&cpi, path);
3931
3932         /*
3933          * Use the results of PATH_INQ to pick a transport.  Note that
3934          * the xpt bus (which uses XPORT_UNSPECIFIED) always uses
3935          * xport_default instead of a transport from
3936          * cam_xpt_port_set.
3937          */
3938         if (cam_ccb_success((union ccb *)&cpi) &&
3939             cpi.transport != XPORT_UNSPECIFIED) {
3940                 struct xpt_xport **xpt;
3941
3942                 SET_FOREACH(xpt, cam_xpt_xport_set) {
3943                         if ((*xpt)->xport == cpi.transport) {
3944                                 new_bus->xport = *xpt;
3945                                 break;
3946                         }
3947                 }
3948                 if (new_bus->xport == &xport_default) {
3949                         xpt_print(path,
3950                             "No transport found for %d\n", cpi.transport);
3951                         xpt_release_bus(new_bus);
3952                         xpt_free_path(path);
3953                         return (EINVAL);
3954                 }
3955         }
3956
3957         /* Notify interested parties */
3958         if (sim->path_id != CAM_XPT_PATH_ID) {
3959                 xpt_async(AC_PATH_REGISTERED, path, &cpi);
3960                 if ((cpi.hba_misc & PIM_NOSCAN) == 0) {
3961                         union   ccb *scan_ccb;
3962
3963                         /* Initiate bus rescan. */
3964                         scan_ccb = xpt_alloc_ccb_nowait();
3965                         if (scan_ccb != NULL) {
3966                                 scan_ccb->ccb_h.path = path;
3967                                 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
3968                                 scan_ccb->crcn.flags = 0;
3969                                 xpt_rescan(scan_ccb);
3970                         } else {
3971                                 xpt_print(path,
3972                                           "Can't allocate CCB to scan bus\n");
3973                                 xpt_free_path(path);
3974                         }
3975                 } else
3976                         xpt_free_path(path);
3977         } else
3978                 xpt_free_path(path);
3979         return (CAM_SUCCESS);
3980 }
3981
3982 int
3983 xpt_bus_deregister(path_id_t pathid)
3984 {
3985         struct cam_path bus_path;
3986         cam_status status;
3987
3988         status = xpt_compile_path(&bus_path, NULL, pathid,
3989                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3990         if (status != CAM_REQ_CMP)
3991                 return (ENOMEM);
3992
3993         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
3994         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
3995
3996         /* Release the reference count held while registered. */
3997         xpt_release_bus(bus_path.bus);
3998         xpt_release_path(&bus_path);
3999
4000         return (CAM_SUCCESS);
4001 }
4002
4003 static path_id_t
4004 xptnextfreepathid(void)
4005 {
4006         struct cam_eb *bus;
4007         path_id_t pathid;
4008         const char *strval;
4009
4010         mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4011         pathid = 0;
4012         bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4013 retry:
4014         /* Find an unoccupied pathid */
4015         while (bus != NULL && bus->path_id <= pathid) {
4016                 if (bus->path_id == pathid)
4017                         pathid++;
4018                 bus = TAILQ_NEXT(bus, links);
4019         }
4020
4021         /*
4022          * Ensure that this pathid is not reserved for
4023          * a bus that may be registered in the future.
4024          */
4025         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4026                 ++pathid;
4027                 /* Start the search over */
4028                 goto retry;
4029         }
4030         return (pathid);
4031 }
4032
4033 static path_id_t
4034 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4035 {
4036         path_id_t pathid;
4037         int i, dunit, val;
4038         char buf[32];
4039         const char *dname;
4040
4041         pathid = CAM_XPT_PATH_ID;
4042         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4043         if (strcmp(buf, "xpt0") == 0 && sim_bus == 0)
4044                 return (pathid);
4045         i = 0;
4046         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4047                 if (strcmp(dname, "scbus")) {
4048                         /* Avoid a bit of foot shooting. */
4049                         continue;
4050                 }
4051                 if (dunit < 0)          /* unwired?! */
4052                         continue;
4053                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4054                         if (sim_bus == val) {
4055                                 pathid = dunit;
4056                                 break;
4057                         }
4058                 } else if (sim_bus == 0) {
4059                         /* Unspecified matches bus 0 */
4060                         pathid = dunit;
4061                         break;
4062                 } else {
4063                         printf("Ambiguous scbus configuration for %s%d "
4064                                "bus %d, cannot wire down.  The kernel "
4065                                "config entry for scbus%d should "
4066                                "specify a controller bus.\n"
4067                                "Scbus will be assigned dynamically.\n",
4068                                sim_name, sim_unit, sim_bus, dunit);
4069                         break;
4070                 }
4071         }
4072
4073         if (pathid == CAM_XPT_PATH_ID)
4074                 pathid = xptnextfreepathid();
4075         return (pathid);
4076 }
4077
4078 static const char *
4079 xpt_async_string(uint32_t async_code)
4080 {
4081
4082         switch (async_code) {
4083         case AC_BUS_RESET: return ("AC_BUS_RESET");
4084         case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
4085         case AC_SCSI_AEN: return ("AC_SCSI_AEN");
4086         case AC_SENT_BDR: return ("AC_SENT_BDR");
4087         case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
4088         case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
4089         case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
4090         case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
4091         case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
4092         case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
4093         case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
4094         case AC_CONTRACT: return ("AC_CONTRACT");
4095         case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED");
4096         case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION");
4097         }
4098         return ("AC_UNKNOWN");
4099 }
4100
4101 static int
4102 xpt_async_size(uint32_t async_code)
4103 {
4104
4105         switch (async_code) {
4106         case AC_BUS_RESET: return (0);
4107         case AC_UNSOL_RESEL: return (0);
4108         case AC_SCSI_AEN: return (0);
4109         case AC_SENT_BDR: return (0);
4110         case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq));
4111         case AC_PATH_DEREGISTERED: return (0);
4112         case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev));
4113         case AC_LOST_DEVICE: return (0);
4114         case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings));
4115         case AC_INQ_CHANGED: return (0);
4116         case AC_GETDEV_CHANGED: return (0);
4117         case AC_CONTRACT: return (sizeof(struct ac_contract));
4118         case AC_ADVINFO_CHANGED: return (-1);
4119         case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio));
4120         }
4121         return (0);
4122 }
4123
4124 static int
4125 xpt_async_process_dev(struct cam_ed *device, void *arg)
4126 {
4127         union ccb *ccb = arg;
4128         struct cam_path *path = ccb->ccb_h.path;
4129         void *async_arg = ccb->casync.async_arg_ptr;
4130         uint32_t async_code = ccb->casync.async_code;
4131         bool relock;
4132
4133         if (path->device != device
4134          && path->device->lun_id != CAM_LUN_WILDCARD
4135          && device->lun_id != CAM_LUN_WILDCARD)
4136                 return (1);
4137
4138         /*
4139          * The async callback could free the device.
4140          * If it is a broadcast async, it doesn't hold
4141          * device reference, so take our own reference.
4142          */
4143         xpt_acquire_device(device);
4144
4145         /*
4146          * If async for specific device is to be delivered to
4147          * the wildcard client, take the specific device lock.
4148          * XXX: We may need a way for client to specify it.
4149          */
4150         if ((device->lun_id == CAM_LUN_WILDCARD &&
4151              path->device->lun_id != CAM_LUN_WILDCARD) ||
4152             (device->target->target_id == CAM_TARGET_WILDCARD &&
4153              path->target->target_id != CAM_TARGET_WILDCARD) ||
4154             (device->target->bus->path_id == CAM_BUS_WILDCARD &&
4155              path->target->bus->path_id != CAM_BUS_WILDCARD)) {
4156                 mtx_unlock(&device->device_mtx);
4157                 xpt_path_lock(path);
4158                 relock = true;
4159         } else
4160                 relock = false;
4161
4162         (*(device->target->bus->xport->ops->async))(async_code,
4163             device->target->bus, device->target, device, async_arg);
4164         xpt_async_bcast(&device->asyncs, async_code, path, async_arg);
4165
4166         if (relock) {
4167                 xpt_path_unlock(path);
4168                 mtx_lock(&device->device_mtx);
4169         }
4170         xpt_release_device(device);
4171         return (1);
4172 }
4173
4174 static int
4175 xpt_async_process_tgt(struct cam_et *target, void *arg)
4176 {
4177         union ccb *ccb = arg;
4178         struct cam_path *path = ccb->ccb_h.path;
4179
4180         if (path->target != target
4181          && path->target->target_id != CAM_TARGET_WILDCARD
4182          && target->target_id != CAM_TARGET_WILDCARD)
4183                 return (1);
4184
4185         if (ccb->casync.async_code == AC_SENT_BDR) {
4186                 /* Update our notion of when the last reset occurred */
4187                 microtime(&target->last_reset);
4188         }
4189
4190         return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb));
4191 }
4192
4193 static void
4194 xpt_async_process(struct cam_periph *periph, union ccb *ccb)
4195 {
4196         struct cam_eb *bus;
4197         struct cam_path *path;
4198         void *async_arg;
4199         uint32_t async_code;
4200
4201         path = ccb->ccb_h.path;
4202         async_code = ccb->casync.async_code;
4203         async_arg = ccb->casync.async_arg_ptr;
4204         CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
4205             ("xpt_async(%s)\n", xpt_async_string(async_code)));
4206         bus = path->bus;
4207
4208         if (async_code == AC_BUS_RESET) {
4209                 /* Update our notion of when the last reset occurred */
4210                 microtime(&bus->last_reset);
4211         }
4212
4213         xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb);
4214
4215         /*
4216          * If this wasn't a fully wildcarded async, tell all
4217          * clients that want all async events.
4218          */
4219         if (bus != xpt_periph->path->bus) {
4220                 xpt_path_lock(xpt_periph->path);
4221                 xpt_async_process_dev(xpt_periph->path->device, ccb);
4222                 xpt_path_unlock(xpt_periph->path);
4223         }
4224
4225         if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4226                 xpt_release_devq(path, 1, TRUE);
4227         else
4228                 xpt_release_simq(path->bus->sim, TRUE);
4229         if (ccb->casync.async_arg_size > 0)
4230                 free(async_arg, M_CAMXPT);
4231         xpt_free_path(path);
4232         xpt_free_ccb(ccb);
4233 }
4234
4235 static void
4236 xpt_async_bcast(struct async_list *async_head,
4237                 uint32_t async_code,
4238                 struct cam_path *path, void *async_arg)
4239 {
4240         struct async_node *cur_entry;
4241         struct mtx *mtx;
4242
4243         cur_entry = SLIST_FIRST(async_head);
4244         while (cur_entry != NULL) {
4245                 struct async_node *next_entry;
4246                 /*
4247                  * Grab the next list entry before we call the current
4248                  * entry's callback.  This is because the callback function
4249                  * can delete its async callback entry.
4250                  */
4251                 next_entry = SLIST_NEXT(cur_entry, links);
4252                 if ((cur_entry->event_enable & async_code) != 0) {
4253                         mtx = cur_entry->event_lock ?
4254                             path->device->sim->mtx : NULL;
4255                         if (mtx)
4256                                 mtx_lock(mtx);
4257                         cur_entry->callback(cur_entry->callback_arg,
4258                                             async_code, path,
4259                                             async_arg);
4260                         if (mtx)
4261                                 mtx_unlock(mtx);
4262                 }
4263                 cur_entry = next_entry;
4264         }
4265 }
4266
4267 void
4268 xpt_async(uint32_t async_code, struct cam_path *path, void *async_arg)
4269 {
4270         union ccb *ccb;
4271         int size;
4272
4273         ccb = xpt_alloc_ccb_nowait();
4274         if (ccb == NULL) {
4275                 xpt_print(path, "Can't allocate CCB to send %s\n",
4276                     xpt_async_string(async_code));
4277                 return;
4278         }
4279
4280         if (xpt_clone_path(&ccb->ccb_h.path, path) != 0) {
4281                 xpt_print(path, "Can't allocate path to send %s\n",
4282                     xpt_async_string(async_code));
4283                 xpt_free_ccb(ccb);
4284                 return;
4285         }
4286         ccb->ccb_h.path->periph = NULL;
4287         ccb->ccb_h.func_code = XPT_ASYNC;
4288         ccb->ccb_h.cbfcnp = xpt_async_process;
4289         ccb->ccb_h.flags |= CAM_UNLOCKED;
4290         ccb->casync.async_code = async_code;
4291         ccb->casync.async_arg_size = 0;
4292         size = xpt_async_size(async_code);
4293         CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
4294             ("xpt_async: func %#x %s aync_code %d %s\n",
4295                 ccb->ccb_h.func_code,
4296                 xpt_action_name(ccb->ccb_h.func_code),
4297                 async_code,
4298                 xpt_async_string(async_code)));
4299         if (size > 0 && async_arg != NULL) {
4300                 ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT);
4301                 if (ccb->casync.async_arg_ptr == NULL) {
4302                         xpt_print(path, "Can't allocate argument to send %s\n",
4303                             xpt_async_string(async_code));
4304                         xpt_free_path(ccb->ccb_h.path);
4305                         xpt_free_ccb(ccb);
4306                         return;
4307                 }
4308                 memcpy(ccb->casync.async_arg_ptr, async_arg, size);
4309                 ccb->casync.async_arg_size = size;
4310         } else if (size < 0) {
4311                 ccb->casync.async_arg_ptr = async_arg;
4312                 ccb->casync.async_arg_size = size;
4313         }
4314         if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4315                 xpt_freeze_devq(path, 1);
4316         else
4317                 xpt_freeze_simq(path->bus->sim, 1);
4318         xpt_action(ccb);
4319 }
4320
4321 static void
4322 xpt_dev_async_default(uint32_t async_code, struct cam_eb *bus,
4323                       struct cam_et *target, struct cam_ed *device,
4324                       void *async_arg)
4325 {
4326
4327         /*
4328          * We only need to handle events for real devices.
4329          */
4330         if (target->target_id == CAM_TARGET_WILDCARD
4331          || device->lun_id == CAM_LUN_WILDCARD)
4332                 return;
4333
4334         printf("%s called\n", __func__);
4335 }
4336
4337 static uint32_t
4338 xpt_freeze_devq_device(struct cam_ed *dev, u_int count)
4339 {
4340         struct cam_devq *devq;
4341         uint32_t freeze;
4342
4343         devq = dev->sim->devq;
4344         mtx_assert(&devq->send_mtx, MA_OWNED);
4345         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4346             ("xpt_freeze_devq_device(%d) %u->%u\n", count,
4347             dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count));
4348         freeze = (dev->ccbq.queue.qfrozen_cnt += count);
4349         /* Remove frozen device from sendq. */
4350         if (device_is_queued(dev))
4351                 camq_remove(&devq->send_queue, dev->devq_entry.index);
4352         return (freeze);
4353 }
4354
4355 uint32_t
4356 xpt_freeze_devq(struct cam_path *path, u_int count)
4357 {
4358         struct cam_ed   *dev = path->device;
4359         struct cam_devq *devq;
4360         uint32_t         freeze;
4361
4362         devq = dev->sim->devq;
4363         mtx_lock(&devq->send_mtx);
4364         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count));
4365         freeze = xpt_freeze_devq_device(dev, count);
4366         mtx_unlock(&devq->send_mtx);
4367         return (freeze);
4368 }
4369
4370 uint32_t
4371 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4372 {
4373         struct cam_devq *devq;
4374         uint32_t         freeze;
4375
4376         devq = sim->devq;
4377         mtx_lock(&devq->send_mtx);
4378         freeze = (devq->send_queue.qfrozen_cnt += count);
4379         mtx_unlock(&devq->send_mtx);
4380         return (freeze);
4381 }
4382
4383 static void
4384 xpt_release_devq_timeout(void *arg)
4385 {
4386         struct cam_ed *dev;
4387         struct cam_devq *devq;
4388
4389         dev = (struct cam_ed *)arg;
4390         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n"));
4391         devq = dev->sim->devq;
4392         mtx_assert(&devq->send_mtx, MA_OWNED);
4393         if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE))
4394                 xpt_run_devq(devq);
4395 }
4396
4397 void
4398 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4399 {
4400         struct cam_ed *dev;
4401         struct cam_devq *devq;
4402
4403         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n",
4404             count, run_queue));
4405         dev = path->device;
4406         devq = dev->sim->devq;
4407         mtx_lock(&devq->send_mtx);
4408         if (xpt_release_devq_device(dev, count, run_queue))
4409                 xpt_run_devq(dev->sim->devq);
4410         mtx_unlock(&devq->send_mtx);
4411 }
4412
4413 static int
4414 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4415 {
4416
4417         mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED);
4418         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4419             ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue,
4420             dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count));
4421         if (count > dev->ccbq.queue.qfrozen_cnt) {
4422 #ifdef INVARIANTS
4423                 printf("xpt_release_devq(): requested %u > present %u\n",
4424                     count, dev->ccbq.queue.qfrozen_cnt);
4425 #endif
4426                 count = dev->ccbq.queue.qfrozen_cnt;
4427         }
4428         dev->ccbq.queue.qfrozen_cnt -= count;
4429         if (dev->ccbq.queue.qfrozen_cnt == 0) {
4430                 /*
4431                  * No longer need to wait for a successful
4432                  * command completion.
4433                  */
4434                 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4435                 /*
4436                  * Remove any timeouts that might be scheduled
4437                  * to release this queue.
4438                  */
4439                 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4440                         callout_stop(&dev->callout);
4441                         dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4442                 }
4443                 /*
4444                  * Now that we are unfrozen schedule the
4445                  * device so any pending transactions are
4446                  * run.
4447                  */
4448                 xpt_schedule_devq(dev->sim->devq, dev);
4449         } else
4450                 run_queue = 0;
4451         return (run_queue);
4452 }
4453
4454 void
4455 xpt_release_simq(struct cam_sim *sim, int run_queue)
4456 {
4457         struct cam_devq *devq;
4458
4459         devq = sim->devq;
4460         mtx_lock(&devq->send_mtx);
4461         if (devq->send_queue.qfrozen_cnt <= 0) {
4462 #ifdef INVARIANTS
4463                 printf("xpt_release_simq: requested 1 > present %u\n",
4464                     devq->send_queue.qfrozen_cnt);
4465 #endif
4466         } else
4467                 devq->send_queue.qfrozen_cnt--;
4468         if (devq->send_queue.qfrozen_cnt == 0) {
4469                 if (run_queue) {
4470                         /*
4471                          * Now that we are unfrozen run the send queue.
4472                          */
4473                         xpt_run_devq(sim->devq);
4474                 }
4475         }
4476         mtx_unlock(&devq->send_mtx);
4477 }
4478
4479 void
4480 xpt_done(union ccb *done_ccb)
4481 {
4482         struct cam_doneq *queue;
4483         int     run, hash;
4484
4485 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4486         if (done_ccb->ccb_h.func_code == XPT_SCSI_IO &&
4487             done_ccb->csio.bio != NULL)
4488                 biotrack(done_ccb->csio.bio, __func__);
4489 #endif
4490
4491         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4492             ("xpt_done: func= %#x %s status %#x\n",
4493                 done_ccb->ccb_h.func_code,
4494                 xpt_action_name(done_ccb->ccb_h.func_code),
4495                 done_ccb->ccb_h.status));
4496         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4497                 return;
4498
4499         /* Store the time the ccb was in the sim */
4500         done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data);
4501         done_ccb->ccb_h.status |= CAM_QOS_VALID;
4502         hash = (u_int)(done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id +
4503             done_ccb->ccb_h.target_lun) % cam_num_doneqs;
4504         queue = &cam_doneqs[hash];
4505         mtx_lock(&queue->cam_doneq_mtx);
4506         run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq));
4507         STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe);
4508         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4509         mtx_unlock(&queue->cam_doneq_mtx);
4510         if (run && !dumping)
4511                 wakeup(&queue->cam_doneq);
4512 }
4513
4514 void
4515 xpt_done_direct(union ccb *done_ccb)
4516 {
4517
4518         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4519             ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status));
4520         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4521                 return;
4522
4523         /* Store the time the ccb was in the sim */
4524         done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data);
4525         done_ccb->ccb_h.status |= CAM_QOS_VALID;
4526         xpt_done_process(&done_ccb->ccb_h);
4527 }
4528
4529 union ccb *
4530 xpt_alloc_ccb(void)
4531 {
4532         union ccb *new_ccb;
4533
4534         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4535         return (new_ccb);
4536 }
4537
4538 union ccb *
4539 xpt_alloc_ccb_nowait(void)
4540 {
4541         union ccb *new_ccb;
4542
4543         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4544         return (new_ccb);
4545 }
4546
4547 void
4548 xpt_free_ccb(union ccb *free_ccb)
4549 {
4550         struct cam_periph *periph;
4551
4552         if (free_ccb->ccb_h.alloc_flags & CAM_CCB_FROM_UMA) {
4553                 /*
4554                  * Looks like a CCB allocated from a periph UMA zone.
4555                  */
4556                 periph = free_ccb->ccb_h.path->periph;
4557                 uma_zfree(periph->ccb_zone, free_ccb);
4558         } else {
4559                 free(free_ccb, M_CAMCCB);
4560         }
4561 }
4562
4563 /* Private XPT functions */
4564
4565 /*
4566  * Get a CAM control block for the caller. Charge the structure to the device
4567  * referenced by the path.  If we don't have sufficient resources to allocate
4568  * more ccbs, we return NULL.
4569  */
4570 static union ccb *
4571 xpt_get_ccb_nowait(struct cam_periph *periph)
4572 {
4573         union ccb *new_ccb;
4574         int alloc_flags;
4575
4576         if (periph->ccb_zone != NULL) {
4577                 alloc_flags = CAM_CCB_FROM_UMA;
4578                 new_ccb = uma_zalloc(periph->ccb_zone, M_ZERO|M_NOWAIT);
4579         } else {
4580                 alloc_flags = 0;
4581                 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4582         }
4583         if (new_ccb == NULL)
4584                 return (NULL);
4585         new_ccb->ccb_h.alloc_flags = alloc_flags;
4586         periph->periph_allocated++;
4587         cam_ccbq_take_opening(&periph->path->device->ccbq);
4588         return (new_ccb);
4589 }
4590
4591 static union ccb *
4592 xpt_get_ccb(struct cam_periph *periph)
4593 {
4594         union ccb *new_ccb;
4595         int alloc_flags;
4596
4597         cam_periph_unlock(periph);
4598         if (periph->ccb_zone != NULL) {
4599                 alloc_flags = CAM_CCB_FROM_UMA;
4600                 new_ccb = uma_zalloc(periph->ccb_zone, M_ZERO|M_WAITOK);
4601         } else {
4602                 alloc_flags = 0;
4603                 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4604         }
4605         new_ccb->ccb_h.alloc_flags = alloc_flags;
4606         cam_periph_lock(periph);
4607         periph->periph_allocated++;
4608         cam_ccbq_take_opening(&periph->path->device->ccbq);
4609         return (new_ccb);
4610 }
4611
4612 union ccb *
4613 cam_periph_getccb(struct cam_periph *periph, uint32_t priority)
4614 {
4615         struct ccb_hdr *ccb_h;
4616
4617         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n"));
4618         cam_periph_assert(periph, MA_OWNED);
4619         while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL ||
4620             ccb_h->pinfo.priority != priority) {
4621                 if (priority < periph->immediate_priority) {
4622                         periph->immediate_priority = priority;
4623                         xpt_run_allocq(periph, 0);
4624                 } else
4625                         cam_periph_sleep(periph, &periph->ccb_list, PRIBIO,
4626                             "cgticb", 0);
4627         }
4628         SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
4629         return ((union ccb *)ccb_h);
4630 }
4631
4632 static void
4633 xpt_acquire_bus(struct cam_eb *bus)
4634 {
4635
4636         xpt_lock_buses();
4637         bus->refcount++;
4638         xpt_unlock_buses();
4639 }
4640
4641 static void
4642 xpt_release_bus(struct cam_eb *bus)
4643 {
4644
4645         xpt_lock_buses();
4646         KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
4647         if (--bus->refcount > 0) {
4648                 xpt_unlock_buses();
4649                 return;
4650         }
4651         TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4652         xsoftc.bus_generation++;
4653         xpt_unlock_buses();
4654         KASSERT(TAILQ_EMPTY(&bus->et_entries),
4655             ("destroying bus, but target list is not empty"));
4656         cam_sim_release(bus->sim);
4657         mtx_destroy(&bus->eb_mtx);
4658         free(bus, M_CAMXPT);
4659 }
4660
4661 static struct cam_et *
4662 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4663 {
4664         struct cam_et *cur_target, *target;
4665
4666         mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4667         mtx_assert(&bus->eb_mtx, MA_OWNED);
4668         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
4669                                          M_NOWAIT|M_ZERO);
4670         if (target == NULL)
4671                 return (NULL);
4672
4673         TAILQ_INIT(&target->ed_entries);
4674         target->bus = bus;
4675         target->target_id = target_id;
4676         target->refcount = 1;
4677         target->generation = 0;
4678         target->luns = NULL;
4679         mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF);
4680         timevalclear(&target->last_reset);
4681         /*
4682          * Hold a reference to our parent bus so it
4683          * will not go away before we do.
4684          */
4685         bus->refcount++;
4686
4687         /* Insertion sort into our bus's target list */
4688         cur_target = TAILQ_FIRST(&bus->et_entries);
4689         while (cur_target != NULL && cur_target->target_id < target_id)
4690                 cur_target = TAILQ_NEXT(cur_target, links);
4691         if (cur_target != NULL) {
4692                 TAILQ_INSERT_BEFORE(cur_target, target, links);
4693         } else {
4694                 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4695         }
4696         bus->generation++;
4697         return (target);
4698 }
4699
4700 static void
4701 xpt_acquire_target(struct cam_et *target)
4702 {
4703         struct cam_eb *bus = target->bus;
4704
4705         mtx_lock(&bus->eb_mtx);
4706         target->refcount++;
4707         mtx_unlock(&bus->eb_mtx);
4708 }
4709
4710 static void
4711 xpt_release_target(struct cam_et *target)
4712 {
4713         struct cam_eb *bus = target->bus;
4714
4715         mtx_lock(&bus->eb_mtx);
4716         if (--target->refcount > 0) {
4717                 mtx_unlock(&bus->eb_mtx);
4718                 return;
4719         }
4720         TAILQ_REMOVE(&bus->et_entries, target, links);
4721         bus->generation++;
4722         mtx_unlock(&bus->eb_mtx);
4723         KASSERT(TAILQ_EMPTY(&target->ed_entries),
4724             ("destroying target, but device list is not empty"));
4725         xpt_release_bus(bus);
4726         mtx_destroy(&target->luns_mtx);
4727         if (target->luns)
4728                 free(target->luns, M_CAMXPT);
4729         free(target, M_CAMXPT);
4730 }
4731
4732 static struct cam_ed *
4733 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
4734                          lun_id_t lun_id)
4735 {
4736         struct cam_ed *device;
4737
4738         device = xpt_alloc_device(bus, target, lun_id);
4739         if (device == NULL)
4740                 return (NULL);
4741
4742         device->mintags = 1;
4743         device->maxtags = 1;
4744         return (device);
4745 }
4746
4747 static void
4748 xpt_destroy_device(void *context, int pending)
4749 {
4750         struct cam_ed   *device = context;
4751
4752         mtx_lock(&device->device_mtx);
4753         mtx_destroy(&device->device_mtx);
4754         free(device, M_CAMDEV);
4755 }
4756
4757 struct cam_ed *
4758 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4759 {
4760         struct cam_ed   *cur_device, *device;
4761         struct cam_devq *devq;
4762         cam_status status;
4763
4764         mtx_assert(&bus->eb_mtx, MA_OWNED);
4765         /* Make space for us in the device queue on our bus */
4766         devq = bus->sim->devq;
4767         mtx_lock(&devq->send_mtx);
4768         status = cam_devq_resize(devq, devq->send_queue.array_size + 1);
4769         mtx_unlock(&devq->send_mtx);
4770         if (status != CAM_REQ_CMP)
4771                 return (NULL);
4772
4773         device = (struct cam_ed *)malloc(sizeof(*device),
4774                                          M_CAMDEV, M_NOWAIT|M_ZERO);
4775         if (device == NULL)
4776                 return (NULL);
4777
4778         cam_init_pinfo(&device->devq_entry);
4779         device->target = target;
4780         device->lun_id = lun_id;
4781         device->sim = bus->sim;
4782         if (cam_ccbq_init(&device->ccbq,
4783                           bus->sim->max_dev_openings) != 0) {
4784                 free(device, M_CAMDEV);
4785                 return (NULL);
4786         }
4787         SLIST_INIT(&device->asyncs);
4788         SLIST_INIT(&device->periphs);
4789         device->generation = 0;
4790         device->flags = CAM_DEV_UNCONFIGURED;
4791         device->tag_delay_count = 0;
4792         device->tag_saved_openings = 0;
4793         device->refcount = 1;
4794         mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF);
4795         callout_init_mtx(&device->callout, &devq->send_mtx, 0);
4796         TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device);
4797         /*
4798          * Hold a reference to our parent bus so it
4799          * will not go away before we do.
4800          */
4801         target->refcount++;
4802
4803         cur_device = TAILQ_FIRST(&target->ed_entries);
4804         while (cur_device != NULL && cur_device->lun_id < lun_id)
4805                 cur_device = TAILQ_NEXT(cur_device, links);
4806         if (cur_device != NULL)
4807                 TAILQ_INSERT_BEFORE(cur_device, device, links);
4808         else
4809                 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4810         target->generation++;
4811         return (device);
4812 }
4813
4814 void
4815 xpt_acquire_device(struct cam_ed *device)
4816 {
4817         struct cam_eb *bus = device->target->bus;
4818
4819         mtx_lock(&bus->eb_mtx);
4820         device->refcount++;
4821         mtx_unlock(&bus->eb_mtx);
4822 }
4823
4824 void
4825 xpt_release_device(struct cam_ed *device)
4826 {
4827         struct cam_eb *bus = device->target->bus;
4828         struct cam_devq *devq;
4829
4830         mtx_lock(&bus->eb_mtx);
4831         if (--device->refcount > 0) {
4832                 mtx_unlock(&bus->eb_mtx);
4833                 return;
4834         }
4835
4836         TAILQ_REMOVE(&device->target->ed_entries, device,links);
4837         device->target->generation++;
4838         mtx_unlock(&bus->eb_mtx);
4839
4840         /* Release our slot in the devq */
4841         devq = bus->sim->devq;
4842         mtx_lock(&devq->send_mtx);
4843         cam_devq_resize(devq, devq->send_queue.array_size - 1);
4844
4845         KASSERT(SLIST_EMPTY(&device->periphs),
4846             ("destroying device, but periphs list is not empty"));
4847         KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX,
4848             ("destroying device while still queued for ccbs"));
4849
4850         /* The send_mtx must be held when accessing the callout */
4851         if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
4852                 callout_stop(&device->callout);
4853
4854         mtx_unlock(&devq->send_mtx);
4855
4856         xpt_release_target(device->target);
4857
4858         cam_ccbq_fini(&device->ccbq);
4859         /*
4860          * Free allocated memory.  free(9) does nothing if the
4861          * supplied pointer is NULL, so it is safe to call without
4862          * checking.
4863          */
4864         free(device->supported_vpds, M_CAMXPT);
4865         free(device->device_id, M_CAMXPT);
4866         free(device->ext_inq, M_CAMXPT);
4867         free(device->physpath, M_CAMXPT);
4868         free(device->rcap_buf, M_CAMXPT);
4869         free(device->serial_num, M_CAMXPT);
4870         free(device->nvme_data, M_CAMXPT);
4871         free(device->nvme_cdata, M_CAMXPT);
4872         taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task);
4873 }
4874
4875 uint32_t
4876 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4877 {
4878         int     result;
4879         struct  cam_ed *dev;
4880
4881         dev = path->device;
4882         mtx_lock(&dev->sim->devq->send_mtx);
4883         result = cam_ccbq_resize(&dev->ccbq, newopenings);
4884         mtx_unlock(&dev->sim->devq->send_mtx);
4885         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
4886          || (dev->inq_flags & SID_CmdQue) != 0)
4887                 dev->tag_saved_openings = newopenings;
4888         return (result);
4889 }
4890
4891 static struct cam_eb *
4892 xpt_find_bus(path_id_t path_id)
4893 {
4894         struct cam_eb *bus;
4895
4896         xpt_lock_buses();
4897         for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4898              bus != NULL;
4899              bus = TAILQ_NEXT(bus, links)) {
4900                 if (bus->path_id == path_id) {
4901                         bus->refcount++;
4902                         break;
4903                 }
4904         }
4905         xpt_unlock_buses();
4906         return (bus);
4907 }
4908
4909 static struct cam_et *
4910 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
4911 {
4912         struct cam_et *target;
4913
4914         mtx_assert(&bus->eb_mtx, MA_OWNED);
4915         for (target = TAILQ_FIRST(&bus->et_entries);
4916              target != NULL;
4917              target = TAILQ_NEXT(target, links)) {
4918                 if (target->target_id == target_id) {
4919                         target->refcount++;
4920                         break;
4921                 }
4922         }
4923         return (target);
4924 }
4925
4926 static struct cam_ed *
4927 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4928 {
4929         struct cam_ed *device;
4930
4931         mtx_assert(&target->bus->eb_mtx, MA_OWNED);
4932         for (device = TAILQ_FIRST(&target->ed_entries);
4933              device != NULL;
4934              device = TAILQ_NEXT(device, links)) {
4935                 if (device->lun_id == lun_id) {
4936                         device->refcount++;
4937                         break;
4938                 }
4939         }
4940         return (device);
4941 }
4942
4943 void
4944 xpt_start_tags(struct cam_path *path)
4945 {
4946         struct ccb_relsim crs;
4947         struct cam_ed *device;
4948         struct cam_sim *sim;
4949         int    newopenings;
4950
4951         device = path->device;
4952         sim = path->bus->sim;
4953         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4954         xpt_freeze_devq(path, /*count*/1);
4955         device->inq_flags |= SID_CmdQue;
4956         if (device->tag_saved_openings != 0)
4957                 newopenings = device->tag_saved_openings;
4958         else
4959                 newopenings = min(device->maxtags,
4960                                   sim->max_tagged_dev_openings);
4961         xpt_dev_ccbq_resize(path, newopenings);
4962         xpt_async(AC_GETDEV_CHANGED, path, NULL);
4963         memset(&crs, 0, sizeof(crs));
4964         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4965         crs.ccb_h.func_code = XPT_REL_SIMQ;
4966         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4967         crs.openings
4968             = crs.release_timeout
4969             = crs.qfrozen_cnt
4970             = 0;
4971         xpt_action((union ccb *)&crs);
4972 }
4973
4974 void
4975 xpt_stop_tags(struct cam_path *path)
4976 {
4977         struct ccb_relsim crs;
4978         struct cam_ed *device;
4979         struct cam_sim *sim;
4980
4981         device = path->device;
4982         sim = path->bus->sim;
4983         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4984         device->tag_delay_count = 0;
4985         xpt_freeze_devq(path, /*count*/1);
4986         device->inq_flags &= ~SID_CmdQue;
4987         xpt_dev_ccbq_resize(path, sim->max_dev_openings);
4988         xpt_async(AC_GETDEV_CHANGED, path, NULL);
4989         memset(&crs, 0, sizeof(crs));
4990         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4991         crs.ccb_h.func_code = XPT_REL_SIMQ;
4992         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4993         crs.openings
4994             = crs.release_timeout
4995             = crs.qfrozen_cnt
4996             = 0;
4997         xpt_action((union ccb *)&crs);
4998 }
4999
5000 /*
5001  * Assume all possible buses are detected by this time, so allow boot
5002  * as soon as they all are scanned.
5003  */
5004 static void
5005 xpt_boot_delay(void *arg)
5006 {
5007
5008         xpt_release_boot();
5009 }
5010
5011 /*
5012  * Now that all config hooks have completed, start boot_delay timer,
5013  * waiting for possibly still undetected buses (USB) to appear.
5014  */
5015 static void
5016 xpt_ch_done(void *arg)
5017 {
5018
5019         callout_init(&xsoftc.boot_callout, 1);
5020         callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay,
5021             SBT_1MS, xpt_boot_delay, NULL, 0);
5022 }
5023 SYSINIT(xpt_hw_delay, SI_SUB_INT_CONFIG_HOOKS, SI_ORDER_ANY, xpt_ch_done, NULL);
5024
5025 /*
5026  * Now that interrupts are enabled, go find our devices
5027  */
5028 static void
5029 xpt_config(void *arg)
5030 {
5031         if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq"))
5032                 printf("xpt_config: failed to create taskqueue thread.\n");
5033
5034         /* Setup debugging path */
5035         if (cam_dflags != CAM_DEBUG_NONE) {
5036                 if (xpt_create_path(&cam_dpath, NULL,
5037                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
5038                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
5039                         printf("xpt_config: xpt_create_path() failed for debug"
5040                                " target %d:%d:%d, debugging disabled\n",
5041                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
5042                         cam_dflags = CAM_DEBUG_NONE;
5043                 }
5044         } else
5045                 cam_dpath = NULL;
5046
5047         periphdriver_init(1);
5048         xpt_hold_boot();
5049
5050         /* Fire up rescan thread. */
5051         if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0,
5052             "cam", "scanner")) {
5053                 printf("xpt_config: failed to create rescan thread.\n");
5054         }
5055 }
5056
5057 void
5058 xpt_hold_boot_locked(void)
5059 {
5060
5061         if (xsoftc.buses_to_config++ == 0)
5062                 root_mount_hold_token("CAM", &xsoftc.xpt_rootmount);
5063 }
5064
5065 void
5066 xpt_hold_boot(void)
5067 {
5068
5069         xpt_lock_buses();
5070         xpt_hold_boot_locked();
5071         xpt_unlock_buses();
5072 }
5073
5074 void
5075 xpt_release_boot(void)
5076 {
5077
5078         xpt_lock_buses();
5079         if (--xsoftc.buses_to_config == 0) {
5080                 if (xsoftc.buses_config_done == 0) {
5081                         xsoftc.buses_config_done = 1;
5082                         xsoftc.buses_to_config++;
5083                         TASK_INIT(&xsoftc.boot_task, 0, xpt_finishconfig_task,
5084                             NULL);
5085                         taskqueue_enqueue(taskqueue_thread, &xsoftc.boot_task);
5086                 } else
5087                         root_mount_rel(&xsoftc.xpt_rootmount);
5088         }
5089         xpt_unlock_buses();
5090 }
5091
5092 /*
5093  * If the given device only has one peripheral attached to it, and if that
5094  * peripheral is the passthrough driver, announce it.  This insures that the
5095  * user sees some sort of announcement for every peripheral in their system.
5096  */
5097 static int
5098 xptpassannouncefunc(struct cam_ed *device, void *arg)
5099 {
5100         struct cam_periph *periph;
5101         int i;
5102
5103         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
5104              periph = SLIST_NEXT(periph, periph_links), i++);
5105
5106         periph = SLIST_FIRST(&device->periphs);
5107         if ((i == 1)
5108          && (strncmp(periph->periph_name, "pass", 4) == 0))
5109                 xpt_announce_periph(periph, NULL);
5110
5111         return(1);
5112 }
5113
5114 static void
5115 xpt_finishconfig_task(void *context, int pending)
5116 {
5117
5118         periphdriver_init(2);
5119         /*
5120          * Check for devices with no "standard" peripheral driver
5121          * attached.  For any devices like that, announce the
5122          * passthrough driver so the user will see something.
5123          */
5124         if (!bootverbose)
5125                 xpt_for_all_devices(xptpassannouncefunc, NULL);
5126
5127         xpt_release_boot();
5128 }
5129
5130 cam_status
5131 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
5132                    struct cam_path *path)
5133 {
5134         struct ccb_setasync csa;
5135         cam_status status;
5136         bool xptpath = false;
5137
5138         if (path == NULL) {
5139                 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
5140                                          CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
5141                 if (status != CAM_REQ_CMP)
5142                         return (status);
5143                 xpt_path_lock(path);
5144                 xptpath = true;
5145         }
5146
5147         memset(&csa, 0, sizeof(csa));
5148         xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
5149         csa.ccb_h.func_code = XPT_SASYNC_CB;
5150         csa.event_enable = event;
5151         csa.callback = cbfunc;
5152         csa.callback_arg = cbarg;
5153         xpt_action((union ccb *)&csa);
5154         status = csa.ccb_h.status;
5155
5156         CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE,
5157             ("xpt_register_async: func %p\n", cbfunc));
5158
5159         if (xptpath) {
5160                 xpt_path_unlock(path);
5161                 xpt_free_path(path);
5162         }
5163
5164         if ((status == CAM_REQ_CMP) &&
5165             (csa.event_enable & AC_FOUND_DEVICE)) {
5166                 /*
5167                  * Get this peripheral up to date with all
5168                  * the currently existing devices.
5169                  */
5170                 xpt_for_all_devices(xptsetasyncfunc, &csa);
5171         }
5172         if ((status == CAM_REQ_CMP) &&
5173             (csa.event_enable & AC_PATH_REGISTERED)) {
5174                 /*
5175                  * Get this peripheral up to date with all
5176                  * the currently existing buses.
5177                  */
5178                 xpt_for_all_busses(xptsetasyncbusfunc, &csa);
5179         }
5180
5181         return (status);
5182 }
5183
5184 static void
5185 xptaction(struct cam_sim *sim, union ccb *work_ccb)
5186 {
5187         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
5188
5189         switch (work_ccb->ccb_h.func_code) {
5190         /* Common cases first */
5191         case XPT_PATH_INQ:              /* Path routing inquiry */
5192         {
5193                 struct ccb_pathinq *cpi;
5194
5195                 cpi = &work_ccb->cpi;
5196                 cpi->version_num = 1; /* XXX??? */
5197                 cpi->hba_inquiry = 0;
5198                 cpi->target_sprt = 0;
5199                 cpi->hba_misc = 0;
5200                 cpi->hba_eng_cnt = 0;
5201                 cpi->max_target = 0;
5202                 cpi->max_lun = 0;
5203                 cpi->initiator_id = 0;
5204                 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
5205                 strlcpy(cpi->hba_vid, "", HBA_IDLEN);
5206                 strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
5207                 cpi->unit_number = sim->unit_number;
5208                 cpi->bus_id = sim->bus_id;
5209                 cpi->base_transfer_speed = 0;
5210                 cpi->protocol = PROTO_UNSPECIFIED;
5211                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
5212                 cpi->transport = XPORT_UNSPECIFIED;
5213                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
5214                 cpi->ccb_h.status = CAM_REQ_CMP;
5215                 break;
5216         }
5217         default:
5218                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
5219                 break;
5220         }
5221         xpt_done(work_ccb);
5222 }
5223
5224 /*
5225  * The xpt as a "controller" has no interrupt sources, so polling
5226  * is a no-op.
5227  */
5228 static void
5229 xptpoll(struct cam_sim *sim)
5230 {
5231 }
5232
5233 void
5234 xpt_lock_buses(void)
5235 {
5236         mtx_lock(&xsoftc.xpt_topo_lock);
5237 }
5238
5239 void
5240 xpt_unlock_buses(void)
5241 {
5242         mtx_unlock(&xsoftc.xpt_topo_lock);
5243 }
5244
5245 struct mtx *
5246 xpt_path_mtx(struct cam_path *path)
5247 {
5248
5249         return (&path->device->device_mtx);
5250 }
5251
5252 static void
5253 xpt_done_process(struct ccb_hdr *ccb_h)
5254 {
5255         struct cam_sim *sim = NULL;
5256         struct cam_devq *devq = NULL;
5257         struct mtx *mtx = NULL;
5258
5259 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
5260         struct ccb_scsiio *csio;
5261
5262         if (ccb_h->func_code == XPT_SCSI_IO) {
5263                 csio = &((union ccb *)ccb_h)->csio;
5264                 if (csio->bio != NULL)
5265                         biotrack(csio->bio, __func__);
5266         }
5267 #endif
5268
5269         if (ccb_h->flags & CAM_HIGH_POWER) {
5270                 struct highpowerlist    *hphead;
5271                 struct cam_ed           *device;
5272
5273                 mtx_lock(&xsoftc.xpt_highpower_lock);
5274                 hphead = &xsoftc.highpowerq;
5275
5276                 device = STAILQ_FIRST(hphead);
5277
5278                 /*
5279                  * Increment the count since this command is done.
5280                  */
5281                 xsoftc.num_highpower++;
5282
5283                 /*
5284                  * Any high powered commands queued up?
5285                  */
5286                 if (device != NULL) {
5287                         STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
5288                         mtx_unlock(&xsoftc.xpt_highpower_lock);
5289
5290                         mtx_lock(&device->sim->devq->send_mtx);
5291                         xpt_release_devq_device(device,
5292                                          /*count*/1, /*runqueue*/TRUE);
5293                         mtx_unlock(&device->sim->devq->send_mtx);
5294                 } else
5295                         mtx_unlock(&xsoftc.xpt_highpower_lock);
5296         }
5297
5298         /*
5299          * Insulate against a race where the periph is destroyed but CCBs are
5300          * still not all processed. This shouldn't happen, but allows us better
5301          * bug diagnostic when it does.
5302          */
5303         if (ccb_h->path->bus)
5304                 sim = ccb_h->path->bus->sim;
5305
5306         if (ccb_h->status & CAM_RELEASE_SIMQ) {
5307                 KASSERT(sim, ("sim missing for CAM_RELEASE_SIMQ request"));
5308                 xpt_release_simq(sim, /*run_queue*/FALSE);
5309                 ccb_h->status &= ~CAM_RELEASE_SIMQ;
5310         }
5311
5312         if ((ccb_h->flags & CAM_DEV_QFRZDIS)
5313          && (ccb_h->status & CAM_DEV_QFRZN)) {
5314                 xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE);
5315                 ccb_h->status &= ~CAM_DEV_QFRZN;
5316         }
5317
5318         if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
5319                 struct cam_ed *dev = ccb_h->path->device;
5320
5321                 if (sim)
5322                         devq = sim->devq;
5323                 KASSERT(devq, ("Periph disappeared with CCB %p %s request pending.",
5324                         ccb_h, xpt_action_name(ccb_h->func_code)));
5325
5326                 mtx_lock(&devq->send_mtx);
5327                 devq->send_active--;
5328                 devq->send_openings++;
5329                 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
5330
5331                 if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
5332                   && (dev->ccbq.dev_active == 0))) {
5333                         dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
5334                         xpt_release_devq_device(dev, /*count*/1,
5335                                          /*run_queue*/FALSE);
5336                 }
5337
5338                 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
5339                   && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
5340                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
5341                         xpt_release_devq_device(dev, /*count*/1,
5342                                          /*run_queue*/FALSE);
5343                 }
5344
5345                 if (!device_is_queued(dev))
5346                         (void)xpt_schedule_devq(devq, dev);
5347                 xpt_run_devq(devq);
5348                 mtx_unlock(&devq->send_mtx);
5349
5350                 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) {
5351                         mtx = xpt_path_mtx(ccb_h->path);
5352                         mtx_lock(mtx);
5353
5354                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5355                          && (--dev->tag_delay_count == 0))
5356                                 xpt_start_tags(ccb_h->path);
5357                 }
5358         }
5359
5360         if ((ccb_h->flags & CAM_UNLOCKED) == 0) {
5361                 if (mtx == NULL) {
5362                         mtx = xpt_path_mtx(ccb_h->path);
5363                         mtx_lock(mtx);
5364                 }
5365         } else {
5366                 if (mtx != NULL) {
5367                         mtx_unlock(mtx);
5368                         mtx = NULL;
5369                 }
5370         }
5371
5372         /* Call the peripheral driver's callback */
5373         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
5374         (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
5375         if (mtx != NULL)
5376                 mtx_unlock(mtx);
5377 }
5378
5379 /*
5380  * Parameterize instead and use xpt_done_td?
5381  */
5382 static void
5383 xpt_async_td(void *arg)
5384 {
5385         struct cam_doneq *queue = arg;
5386         struct ccb_hdr *ccb_h;
5387         STAILQ_HEAD(, ccb_hdr)  doneq;
5388
5389         STAILQ_INIT(&doneq);
5390         mtx_lock(&queue->cam_doneq_mtx);
5391         while (1) {
5392                 while (STAILQ_EMPTY(&queue->cam_doneq))
5393                         msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
5394                             PRIBIO, "-", 0);
5395                 STAILQ_CONCAT(&doneq, &queue->cam_doneq);
5396                 mtx_unlock(&queue->cam_doneq_mtx);
5397
5398                 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
5399                         STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
5400                         xpt_done_process(ccb_h);
5401                 }
5402
5403                 mtx_lock(&queue->cam_doneq_mtx);
5404         }
5405 }
5406
5407 void
5408 xpt_done_td(void *arg)
5409 {
5410         struct cam_doneq *queue = arg;
5411         struct ccb_hdr *ccb_h;
5412         STAILQ_HEAD(, ccb_hdr)  doneq;
5413
5414         STAILQ_INIT(&doneq);
5415         mtx_lock(&queue->cam_doneq_mtx);
5416         while (1) {
5417                 while (STAILQ_EMPTY(&queue->cam_doneq)) {
5418                         queue->cam_doneq_sleep = 1;
5419                         msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
5420                             PRIBIO, "-", 0);
5421                         queue->cam_doneq_sleep = 0;
5422                 }
5423                 STAILQ_CONCAT(&doneq, &queue->cam_doneq);
5424                 mtx_unlock(&queue->cam_doneq_mtx);
5425
5426                 THREAD_NO_SLEEPING();
5427                 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
5428                         STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
5429                         xpt_done_process(ccb_h);
5430                 }
5431                 THREAD_SLEEPING_OK();
5432
5433                 mtx_lock(&queue->cam_doneq_mtx);
5434         }
5435 }
5436
5437 static void
5438 camisr_runqueue(void)
5439 {
5440         struct  ccb_hdr *ccb_h;
5441         struct cam_doneq *queue;
5442         int i;
5443
5444         /* Process global queues. */
5445         for (i = 0; i < cam_num_doneqs; i++) {
5446                 queue = &cam_doneqs[i];
5447                 mtx_lock(&queue->cam_doneq_mtx);
5448                 while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) {
5449                         STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe);
5450                         mtx_unlock(&queue->cam_doneq_mtx);
5451                         xpt_done_process(ccb_h);
5452                         mtx_lock(&queue->cam_doneq_mtx);
5453                 }
5454                 mtx_unlock(&queue->cam_doneq_mtx);
5455         }
5456 }
5457
5458 /**
5459  * @brief Return the device_t associated with the path
5460  *
5461  * When a SIM is created, it registers a bus with a NEWBUS device_t. This is
5462  * stored in the internal cam_eb bus structure. There is no guarnatee any given
5463  * path will have a @c device_t associated with it (it's legal to call @c
5464  * xpt_bus_register with a @c NULL @c device_t.
5465  *
5466  * @param path          Path to return the device_t for.
5467  */
5468 device_t
5469 xpt_path_sim_device(const struct cam_path *path)
5470 {
5471         return (path->bus->parent_dev);
5472 }
5473
5474 struct kv 
5475 {
5476         uint32_t v;
5477         const char *name;
5478 };
5479
5480 static struct kv map[] = {
5481         { XPT_NOOP, "XPT_NOOP" },
5482         { XPT_SCSI_IO, "XPT_SCSI_IO" },
5483         { XPT_GDEV_TYPE, "XPT_GDEV_TYPE" },
5484         { XPT_GDEVLIST, "XPT_GDEVLIST" },
5485         { XPT_PATH_INQ, "XPT_PATH_INQ" },
5486         { XPT_REL_SIMQ, "XPT_REL_SIMQ" },
5487         { XPT_SASYNC_CB, "XPT_SASYNC_CB" },
5488         { XPT_SDEV_TYPE, "XPT_SDEV_TYPE" },
5489         { XPT_SCAN_BUS, "XPT_SCAN_BUS" },
5490         { XPT_DEV_MATCH, "XPT_DEV_MATCH" },
5491         { XPT_DEBUG, "XPT_DEBUG" },
5492         { XPT_PATH_STATS, "XPT_PATH_STATS" },
5493         { XPT_GDEV_STATS, "XPT_GDEV_STATS" },
5494         { XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" },
5495         { XPT_ASYNC, "XPT_ASYNC" },
5496         { XPT_ABORT, "XPT_ABORT" },
5497         { XPT_RESET_BUS, "XPT_RESET_BUS" },
5498         { XPT_RESET_DEV, "XPT_RESET_DEV" },
5499         { XPT_TERM_IO, "XPT_TERM_IO" },
5500         { XPT_SCAN_LUN, "XPT_SCAN_LUN" },
5501         { XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" },
5502         { XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" },
5503         { XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" },
5504         { XPT_ATA_IO, "XPT_ATA_IO" },
5505         { XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" },
5506         { XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" },
5507         { XPT_NVME_IO, "XPT_NVME_IO" },
5508         { XPT_MMC_IO, "XPT_MMC_IO" },
5509         { XPT_SMP_IO, "XPT_SMP_IO" },
5510         { XPT_SCAN_TGT, "XPT_SCAN_TGT" },
5511         { XPT_NVME_ADMIN, "XPT_NVME_ADMIN" },
5512         { XPT_ENG_INQ, "XPT_ENG_INQ" },
5513         { XPT_ENG_EXEC, "XPT_ENG_EXEC" },
5514         { XPT_EN_LUN, "XPT_EN_LUN" },
5515         { XPT_TARGET_IO, "XPT_TARGET_IO" },
5516         { XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" },
5517         { XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" },
5518         { XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" },
5519         { XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" },
5520         { XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" },
5521         { XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" },
5522         { 0, 0 }
5523 };
5524
5525 const char *
5526 xpt_action_name(uint32_t action)
5527 {
5528         static char buffer[32]; /* Only for unknown messages -- racy */
5529         struct kv *walker = map;
5530
5531         while (walker->name != NULL) {
5532                 if (walker->v == action)
5533                         return (walker->name);
5534                 walker++;
5535         }
5536
5537         snprintf(buffer, sizeof(buffer), "%#x", action);
5538         return (buffer);
5539 }
5540
5541 void
5542 xpt_cam_path_debug(struct cam_path *path, const char *fmt, ...)
5543 {
5544         struct sbuf sbuf;
5545         char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */
5546         struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN);
5547         va_list ap;
5548
5549         sbuf_set_drain(sb, sbuf_printf_drain, NULL);
5550         xpt_path_sbuf(path, sb);
5551         va_start(ap, fmt);
5552         sbuf_vprintf(sb, fmt, ap);
5553         va_end(ap);
5554         sbuf_finish(sb);
5555         sbuf_delete(sb);
5556         if (cam_debug_delay != 0)
5557                 DELAY(cam_debug_delay);
5558 }
5559
5560 void
5561 xpt_cam_dev_debug(struct cam_ed *dev, const char *fmt, ...)
5562 {
5563         struct sbuf sbuf;
5564         char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */
5565         struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN);
5566         va_list ap;
5567
5568         sbuf_set_drain(sb, sbuf_printf_drain, NULL);
5569         xpt_device_sbuf(dev, sb);
5570         va_start(ap, fmt);
5571         sbuf_vprintf(sb, fmt, ap);
5572         va_end(ap);
5573         sbuf_finish(sb);
5574         sbuf_delete(sb);
5575         if (cam_debug_delay != 0)
5576                 DELAY(cam_debug_delay);
5577 }
5578
5579 void
5580 xpt_cam_debug(const char *fmt, ...)
5581 {
5582         struct sbuf sbuf;
5583         char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */
5584         struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN);
5585         va_list ap;
5586
5587         sbuf_set_drain(sb, sbuf_printf_drain, NULL);
5588         sbuf_cat(sb, "cam_debug: ");
5589         va_start(ap, fmt);
5590         sbuf_vprintf(sb, fmt, ap);
5591         va_end(ap);
5592         sbuf_finish(sb);
5593         sbuf_delete(sb);
5594         if (cam_debug_delay != 0)
5595                 DELAY(cam_debug_delay);
5596 }