]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/cam/cam_xpt.c
zfs: merge openzfs/zfs@a94860a6d
[FreeBSD/FreeBSD.git] / sys / cam / cam_xpt.c
1 /*-
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * SPDX-License-Identifier: BSD-2-Clause
5  *
6  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
7  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions, and the following disclaimer,
15  *    without modification, immediately at the beginning of the file.
16  * 2. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31
32 #include "opt_printf.h"
33
34 #include <sys/param.h>
35 #include <sys/bio.h>
36 #include <sys/bus.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/malloc.h>
40 #include <sys/kernel.h>
41 #include <sys/time.h>
42 #include <sys/conf.h>
43 #include <sys/fcntl.h>
44 #include <sys/proc.h>
45 #include <sys/sbuf.h>
46 #include <sys/smp.h>
47 #include <sys/taskqueue.h>
48
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/sysctl.h>
52 #include <sys/kthread.h>
53
54 #include <cam/cam.h>
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_iosched.h>
57 #include <cam/cam_periph.h>
58 #include <cam/cam_queue.h>
59 #include <cam/cam_sim.h>
60 #include <cam/cam_xpt.h>
61 #include <cam/cam_xpt_sim.h>
62 #include <cam/cam_xpt_periph.h>
63 #include <cam/cam_xpt_internal.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_compat.h>
66
67 #include <cam/scsi/scsi_all.h>
68 #include <cam/scsi/scsi_message.h>
69 #include <cam/scsi/scsi_pass.h>
70
71 #include <machine/stdarg.h>     /* for xpt_print below */
72
73 /* Wild guess based on not wanting to grow the stack too much */
74 #define XPT_PRINT_MAXLEN        512
75 #ifdef PRINTF_BUFR_SIZE
76 #define XPT_PRINT_LEN   PRINTF_BUFR_SIZE
77 #else
78 #define XPT_PRINT_LEN   128
79 #endif
80 _Static_assert(XPT_PRINT_LEN <= XPT_PRINT_MAXLEN, "XPT_PRINT_LEN is too large");
81
82 /*
83  * This is the maximum number of high powered commands (e.g. start unit)
84  * that can be outstanding at a particular time.
85  */
86 #ifndef CAM_MAX_HIGHPOWER
87 #define CAM_MAX_HIGHPOWER  4
88 #endif
89
90 /* Datastructures internal to the xpt layer */
91 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
92 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices");
93 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs");
94 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths");
95
96 struct xpt_softc {
97         uint32_t                xpt_generation;
98
99         /* number of high powered commands that can go through right now */
100         struct mtx              xpt_highpower_lock;
101         STAILQ_HEAD(highpowerlist, cam_ed)      highpowerq;
102         int                     num_highpower;
103
104         /* queue for handling async rescan requests. */
105         TAILQ_HEAD(, ccb_hdr) ccb_scanq;
106         int buses_to_config;
107         int buses_config_done;
108
109         /*
110          * Registered buses
111          *
112          * N.B., "busses" is an archaic spelling of "buses".  In new code
113          * "buses" is preferred.
114          */
115         TAILQ_HEAD(,cam_eb)     xpt_busses;
116         u_int                   bus_generation;
117
118         int                     boot_delay;
119         struct callout          boot_callout;
120         struct task             boot_task;
121         struct root_hold_token  xpt_rootmount;
122
123         struct mtx              xpt_topo_lock;
124         struct taskqueue        *xpt_taskq;
125 };
126
127 typedef enum {
128         DM_RET_COPY             = 0x01,
129         DM_RET_FLAG_MASK        = 0x0f,
130         DM_RET_NONE             = 0x00,
131         DM_RET_STOP             = 0x10,
132         DM_RET_DESCEND          = 0x20,
133         DM_RET_ERROR            = 0x30,
134         DM_RET_ACTION_MASK      = 0xf0
135 } dev_match_ret;
136
137 typedef enum {
138         XPT_DEPTH_BUS,
139         XPT_DEPTH_TARGET,
140         XPT_DEPTH_DEVICE,
141         XPT_DEPTH_PERIPH
142 } xpt_traverse_depth;
143
144 struct xpt_traverse_config {
145         xpt_traverse_depth      depth;
146         void                    *tr_func;
147         void                    *tr_arg;
148 };
149
150 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
151 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
152 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
153 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
154 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
155
156 /* Transport layer configuration information */
157 static struct xpt_softc xsoftc;
158
159 MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF);
160
161 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
162            &xsoftc.boot_delay, 0, "Bus registration wait time");
163 SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD,
164             &xsoftc.xpt_generation, 0, "CAM peripheral generation count");
165
166 struct cam_doneq {
167         struct mtx_padalign     cam_doneq_mtx;
168         STAILQ_HEAD(, ccb_hdr)  cam_doneq;
169         int                     cam_doneq_sleep;
170 };
171
172 static struct cam_doneq cam_doneqs[MAXCPU];
173 static u_int __read_mostly cam_num_doneqs;
174 static struct proc *cam_proc;
175 static struct cam_doneq cam_async;
176
177 SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
178            &cam_num_doneqs, 0, "Number of completion queues/threads");
179
180 struct cam_periph *xpt_periph;
181
182 static periph_init_t xpt_periph_init;
183
184 static struct periph_driver xpt_driver =
185 {
186         xpt_periph_init, "xpt",
187         TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
188         CAM_PERIPH_DRV_EARLY
189 };
190
191 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
192
193 static d_open_t xptopen;
194 static d_close_t xptclose;
195 static d_ioctl_t xptioctl;
196 static d_ioctl_t xptdoioctl;
197
198 static struct cdevsw xpt_cdevsw = {
199         .d_version =    D_VERSION,
200         .d_flags =      0,
201         .d_open =       xptopen,
202         .d_close =      xptclose,
203         .d_ioctl =      xptioctl,
204         .d_name =       "xpt",
205 };
206
207 /* Storage for debugging datastructures */
208 struct cam_path *cam_dpath;
209 uint32_t __read_mostly cam_dflags = CAM_DEBUG_FLAGS;
210 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN,
211         &cam_dflags, 0, "Enabled debug flags");
212 uint32_t cam_debug_delay = CAM_DEBUG_DELAY;
213 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN,
214         &cam_debug_delay, 0, "Delay in us after each debug message");
215
216 /* Our boot-time initialization hook */
217 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
218
219 static moduledata_t cam_moduledata = {
220         "cam",
221         cam_module_event_handler,
222         NULL
223 };
224
225 static int      xpt_init(void *);
226
227 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
228 MODULE_VERSION(cam, 1);
229
230 static void             xpt_async_bcast(struct async_list *async_head,
231                                         uint32_t async_code,
232                                         struct cam_path *path,
233                                         void *async_arg);
234 static path_id_t xptnextfreepathid(void);
235 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
236 static union ccb *xpt_get_ccb(struct cam_periph *periph);
237 static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph);
238 static void      xpt_run_allocq(struct cam_periph *periph, int sleep);
239 static void      xpt_run_allocq_task(void *context, int pending);
240 static void      xpt_run_devq(struct cam_devq *devq);
241 static callout_func_t xpt_release_devq_timeout;
242 static void      xpt_acquire_bus(struct cam_eb *bus);
243 static void      xpt_release_bus(struct cam_eb *bus);
244 static uint32_t  xpt_freeze_devq_device(struct cam_ed *dev, u_int count);
245 static int       xpt_release_devq_device(struct cam_ed *dev, u_int count,
246                     int run_queue);
247 static struct cam_et*
248                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
249 static void      xpt_acquire_target(struct cam_et *target);
250 static void      xpt_release_target(struct cam_et *target);
251 static struct cam_eb*
252                  xpt_find_bus(path_id_t path_id);
253 static struct cam_et*
254                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
255 static struct cam_ed*
256                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
257 static void      xpt_config(void *arg);
258 static void      xpt_hold_boot_locked(void);
259 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
260                                  uint32_t new_priority);
261 static xpt_devicefunc_t xptpassannouncefunc;
262 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
263 static void      xptpoll(struct cam_sim *sim);
264 static void      camisr_runqueue(void);
265 static void      xpt_done_process(struct ccb_hdr *ccb_h);
266 static void      xpt_done_td(void *);
267 static void      xpt_async_td(void *);
268 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
269                                     u_int num_patterns, struct cam_eb *bus);
270 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
271                                        u_int num_patterns,
272                                        struct cam_ed *device);
273 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
274                                        u_int num_patterns,
275                                        struct cam_periph *periph);
276 static xpt_busfunc_t    xptedtbusfunc;
277 static xpt_targetfunc_t xptedttargetfunc;
278 static xpt_devicefunc_t xptedtdevicefunc;
279 static xpt_periphfunc_t xptedtperiphfunc;
280 static xpt_pdrvfunc_t   xptplistpdrvfunc;
281 static xpt_periphfunc_t xptplistperiphfunc;
282 static int              xptedtmatch(struct ccb_dev_match *cdm);
283 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
284 static int              xptbustraverse(struct cam_eb *start_bus,
285                                        xpt_busfunc_t *tr_func, void *arg);
286 static int              xpttargettraverse(struct cam_eb *bus,
287                                           struct cam_et *start_target,
288                                           xpt_targetfunc_t *tr_func, void *arg);
289 static int              xptdevicetraverse(struct cam_et *target,
290                                           struct cam_ed *start_device,
291                                           xpt_devicefunc_t *tr_func, void *arg);
292 static int              xptperiphtraverse(struct cam_ed *device,
293                                           struct cam_periph *start_periph,
294                                           xpt_periphfunc_t *tr_func, void *arg);
295 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
296                                         xpt_pdrvfunc_t *tr_func, void *arg);
297 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
298                                             struct cam_periph *start_periph,
299                                             xpt_periphfunc_t *tr_func,
300                                             void *arg);
301 static xpt_busfunc_t    xptdefbusfunc;
302 static xpt_targetfunc_t xptdeftargetfunc;
303 static xpt_devicefunc_t xptdefdevicefunc;
304 static xpt_periphfunc_t xptdefperiphfunc;
305 static void             xpt_finishconfig_task(void *context, int pending);
306 static void             xpt_dev_async_default(uint32_t async_code,
307                                               struct cam_eb *bus,
308                                               struct cam_et *target,
309                                               struct cam_ed *device,
310                                               void *async_arg);
311 static struct cam_ed *  xpt_alloc_device_default(struct cam_eb *bus,
312                                                  struct cam_et *target,
313                                                  lun_id_t lun_id);
314 static xpt_devicefunc_t xptsetasyncfunc;
315 static xpt_busfunc_t    xptsetasyncbusfunc;
316 static cam_status       xptregister(struct cam_periph *periph,
317                                     void *arg);
318
319 static __inline int
320 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
321 {
322         int     retval;
323
324         mtx_assert(&devq->send_mtx, MA_OWNED);
325         if ((dev->ccbq.queue.entries > 0) &&
326             (dev->ccbq.dev_openings > 0) &&
327             (dev->ccbq.queue.qfrozen_cnt == 0)) {
328                 /*
329                  * The priority of a device waiting for controller
330                  * resources is that of the highest priority CCB
331                  * enqueued.
332                  */
333                 retval =
334                     xpt_schedule_dev(&devq->send_queue,
335                                      &dev->devq_entry,
336                                      CAMQ_GET_PRIO(&dev->ccbq.queue));
337         } else {
338                 retval = 0;
339         }
340         return (retval);
341 }
342
343 static __inline int
344 device_is_queued(struct cam_ed *device)
345 {
346         return (device->devq_entry.index != CAM_UNQUEUED_INDEX);
347 }
348
349 static void
350 xpt_periph_init(void)
351 {
352         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
353 }
354
355 static int
356 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
357 {
358
359         /*
360          * Only allow read-write access.
361          */
362         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
363                 return(EPERM);
364
365         /*
366          * We don't allow nonblocking access.
367          */
368         if ((flags & O_NONBLOCK) != 0) {
369                 printf("%s: can't do nonblocking access\n", devtoname(dev));
370                 return(ENODEV);
371         }
372
373         return(0);
374 }
375
376 static int
377 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
378 {
379
380         return(0);
381 }
382
383 /*
384  * Don't automatically grab the xpt softc lock here even though this is going
385  * through the xpt device.  The xpt device is really just a back door for
386  * accessing other devices and SIMs, so the right thing to do is to grab
387  * the appropriate SIM lock once the bus/SIM is located.
388  */
389 static int
390 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
391 {
392         int error;
393
394         if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
395                 error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl);
396         }
397         return (error);
398 }
399
400 static int
401 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
402 {
403         int error;
404
405         error = 0;
406
407         switch(cmd) {
408         /*
409          * For the transport layer CAMIOCOMMAND ioctl, we really only want
410          * to accept CCB types that don't quite make sense to send through a
411          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
412          * in the CAM spec.
413          */
414         case CAMIOCOMMAND: {
415                 union ccb *ccb;
416                 union ccb *inccb;
417                 struct cam_eb *bus;
418
419                 inccb = (union ccb *)addr;
420 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
421                 if (inccb->ccb_h.func_code == XPT_SCSI_IO)
422                         inccb->csio.bio = NULL;
423 #endif
424
425                 if (inccb->ccb_h.flags & CAM_UNLOCKED)
426                         return (EINVAL);
427
428                 bus = xpt_find_bus(inccb->ccb_h.path_id);
429                 if (bus == NULL)
430                         return (EINVAL);
431
432                 switch (inccb->ccb_h.func_code) {
433                 case XPT_SCAN_BUS:
434                 case XPT_RESET_BUS:
435                         if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
436                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
437                                 xpt_release_bus(bus);
438                                 return (EINVAL);
439                         }
440                         break;
441                 case XPT_SCAN_TGT:
442                         if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
443                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
444                                 xpt_release_bus(bus);
445                                 return (EINVAL);
446                         }
447                         break;
448                 default:
449                         break;
450                 }
451
452                 switch(inccb->ccb_h.func_code) {
453                 case XPT_SCAN_BUS:
454                 case XPT_RESET_BUS:
455                 case XPT_PATH_INQ:
456                 case XPT_ENG_INQ:
457                 case XPT_SCAN_LUN:
458                 case XPT_SCAN_TGT:
459
460                         ccb = xpt_alloc_ccb();
461
462                         /*
463                          * Create a path using the bus, target, and lun the
464                          * user passed in.
465                          */
466                         if (xpt_create_path(&ccb->ccb_h.path, NULL,
467                                             inccb->ccb_h.path_id,
468                                             inccb->ccb_h.target_id,
469                                             inccb->ccb_h.target_lun) !=
470                                             CAM_REQ_CMP){
471                                 error = EINVAL;
472                                 xpt_free_ccb(ccb);
473                                 break;
474                         }
475                         /* Ensure all of our fields are correct */
476                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
477                                       inccb->ccb_h.pinfo.priority);
478                         xpt_merge_ccb(ccb, inccb);
479                         xpt_path_lock(ccb->ccb_h.path);
480                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
481                         xpt_path_unlock(ccb->ccb_h.path);
482                         bcopy(ccb, inccb, sizeof(union ccb));
483                         xpt_free_path(ccb->ccb_h.path);
484                         xpt_free_ccb(ccb);
485                         break;
486
487                 case XPT_DEBUG: {
488                         union ccb ccb;
489
490                         /*
491                          * This is an immediate CCB, so it's okay to
492                          * allocate it on the stack.
493                          */
494                         memset(&ccb, 0, sizeof(ccb));
495
496                         /*
497                          * Create a path using the bus, target, and lun the
498                          * user passed in.
499                          */
500                         if (xpt_create_path(&ccb.ccb_h.path, NULL,
501                                             inccb->ccb_h.path_id,
502                                             inccb->ccb_h.target_id,
503                                             inccb->ccb_h.target_lun) !=
504                                             CAM_REQ_CMP){
505                                 error = EINVAL;
506                                 break;
507                         }
508                         /* Ensure all of our fields are correct */
509                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
510                                       inccb->ccb_h.pinfo.priority);
511                         xpt_merge_ccb(&ccb, inccb);
512                         xpt_action(&ccb);
513                         bcopy(&ccb, inccb, sizeof(union ccb));
514                         xpt_free_path(ccb.ccb_h.path);
515                         break;
516                 }
517                 case XPT_DEV_MATCH: {
518                         struct cam_periph_map_info mapinfo;
519                         struct cam_path *old_path;
520
521                         /*
522                          * We can't deal with physical addresses for this
523                          * type of transaction.
524                          */
525                         if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
526                             CAM_DATA_VADDR) {
527                                 error = EINVAL;
528                                 break;
529                         }
530
531                         /*
532                          * Save this in case the caller had it set to
533                          * something in particular.
534                          */
535                         old_path = inccb->ccb_h.path;
536
537                         /*
538                          * We really don't need a path for the matching
539                          * code.  The path is needed because of the
540                          * debugging statements in xpt_action().  They
541                          * assume that the CCB has a valid path.
542                          */
543                         inccb->ccb_h.path = xpt_periph->path;
544
545                         bzero(&mapinfo, sizeof(mapinfo));
546
547                         /*
548                          * Map the pattern and match buffers into kernel
549                          * virtual address space.
550                          */
551                         error = cam_periph_mapmem(inccb, &mapinfo, maxphys);
552
553                         if (error) {
554                                 inccb->ccb_h.path = old_path;
555                                 break;
556                         }
557
558                         /*
559                          * This is an immediate CCB, we can send it on directly.
560                          */
561                         xpt_action(inccb);
562
563                         /*
564                          * Map the buffers back into user space.
565                          */
566                         cam_periph_unmapmem(inccb, &mapinfo);
567
568                         inccb->ccb_h.path = old_path;
569
570                         error = 0;
571                         break;
572                 }
573                 default:
574                         error = ENOTSUP;
575                         break;
576                 }
577                 xpt_release_bus(bus);
578                 break;
579         }
580         /*
581          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
582          * with the periphal driver name and unit name filled in.  The other
583          * fields don't really matter as input.  The passthrough driver name
584          * ("pass"), and unit number are passed back in the ccb.  The current
585          * device generation number, and the index into the device peripheral
586          * driver list, and the status are also passed back.  Note that
587          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
588          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
589          * (or rather should be) impossible for the device peripheral driver
590          * list to change since we look at the whole thing in one pass, and
591          * we do it with lock protection.
592          *
593          */
594         case CAMGETPASSTHRU: {
595                 union ccb *ccb;
596                 struct cam_periph *periph;
597                 struct periph_driver **p_drv;
598                 char   *name;
599                 u_int unit;
600                 bool base_periph_found;
601
602                 ccb = (union ccb *)addr;
603                 unit = ccb->cgdl.unit_number;
604                 name = ccb->cgdl.periph_name;
605                 base_periph_found = false;
606 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
607                 if (ccb->ccb_h.func_code == XPT_SCSI_IO)
608                         ccb->csio.bio = NULL;
609 #endif
610
611                 /*
612                  * Sanity check -- make sure we don't get a null peripheral
613                  * driver name.
614                  */
615                 if (*ccb->cgdl.periph_name == '\0') {
616                         error = EINVAL;
617                         break;
618                 }
619
620                 /* Keep the list from changing while we traverse it */
621                 xpt_lock_buses();
622
623                 /* first find our driver in the list of drivers */
624                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
625                         if (strcmp((*p_drv)->driver_name, name) == 0)
626                                 break;
627
628                 if (*p_drv == NULL) {
629                         xpt_unlock_buses();
630                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
631                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
632                         *ccb->cgdl.periph_name = '\0';
633                         ccb->cgdl.unit_number = 0;
634                         error = ENOENT;
635                         break;
636                 }
637
638                 /*
639                  * Run through every peripheral instance of this driver
640                  * and check to see whether it matches the unit passed
641                  * in by the user.  If it does, get out of the loops and
642                  * find the passthrough driver associated with that
643                  * peripheral driver.
644                  */
645                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
646                      periph = TAILQ_NEXT(periph, unit_links)) {
647                         if (periph->unit_number == unit)
648                                 break;
649                 }
650                 /*
651                  * If we found the peripheral driver that the user passed
652                  * in, go through all of the peripheral drivers for that
653                  * particular device and look for a passthrough driver.
654                  */
655                 if (periph != NULL) {
656                         struct cam_ed *device;
657                         int i;
658
659                         base_periph_found = true;
660                         device = periph->path->device;
661                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
662                              periph != NULL;
663                              periph = SLIST_NEXT(periph, periph_links), i++) {
664                                 /*
665                                  * Check to see whether we have a
666                                  * passthrough device or not.
667                                  */
668                                 if (strcmp(periph->periph_name, "pass") == 0) {
669                                         /*
670                                          * Fill in the getdevlist fields.
671                                          */
672                                         strlcpy(ccb->cgdl.periph_name,
673                                                periph->periph_name,
674                                                sizeof(ccb->cgdl.periph_name));
675                                         ccb->cgdl.unit_number =
676                                                 periph->unit_number;
677                                         if (SLIST_NEXT(periph, periph_links))
678                                                 ccb->cgdl.status =
679                                                         CAM_GDEVLIST_MORE_DEVS;
680                                         else
681                                                 ccb->cgdl.status =
682                                                        CAM_GDEVLIST_LAST_DEVICE;
683                                         ccb->cgdl.generation =
684                                                 device->generation;
685                                         ccb->cgdl.index = i;
686                                         /*
687                                          * Fill in some CCB header fields
688                                          * that the user may want.
689                                          */
690                                         ccb->ccb_h.path_id =
691                                                 periph->path->bus->path_id;
692                                         ccb->ccb_h.target_id =
693                                                 periph->path->target->target_id;
694                                         ccb->ccb_h.target_lun =
695                                                 periph->path->device->lun_id;
696                                         ccb->ccb_h.status = CAM_REQ_CMP;
697                                         break;
698                                 }
699                         }
700                 }
701
702                 /*
703                  * If the periph is null here, one of two things has
704                  * happened.  The first possibility is that we couldn't
705                  * find the unit number of the particular peripheral driver
706                  * that the user is asking about.  e.g. the user asks for
707                  * the passthrough driver for "da11".  We find the list of
708                  * "da" peripherals all right, but there is no unit 11.
709                  * The other possibility is that we went through the list
710                  * of peripheral drivers attached to the device structure,
711                  * but didn't find one with the name "pass".  Either way,
712                  * we return ENOENT, since we couldn't find something.
713                  */
714                 if (periph == NULL) {
715                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
716                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
717                         *ccb->cgdl.periph_name = '\0';
718                         ccb->cgdl.unit_number = 0;
719                         error = ENOENT;
720                         /*
721                          * It is unfortunate that this is even necessary,
722                          * but there are many, many clueless users out there.
723                          * If this is true, the user is looking for the
724                          * passthrough driver, but doesn't have one in his
725                          * kernel.
726                          */
727                         if (base_periph_found) {
728                                 printf("xptioctl: pass driver is not in the "
729                                        "kernel\n");
730                                 printf("xptioctl: put \"device pass\" in "
731                                        "your kernel config file\n");
732                         }
733                 }
734                 xpt_unlock_buses();
735                 break;
736                 }
737         default:
738                 error = ENOTTY;
739                 break;
740         }
741
742         return(error);
743 }
744
745 static int
746 cam_module_event_handler(module_t mod, int what, void *arg)
747 {
748         int error;
749
750         switch (what) {
751         case MOD_LOAD:
752                 if ((error = xpt_init(NULL)) != 0)
753                         return (error);
754                 break;
755         case MOD_UNLOAD:
756                 return EBUSY;
757         default:
758                 return EOPNOTSUPP;
759         }
760
761         return 0;
762 }
763
764 static struct xpt_proto *
765 xpt_proto_find(cam_proto proto)
766 {
767         struct xpt_proto **pp;
768
769         SET_FOREACH(pp, cam_xpt_proto_set) {
770                 if ((*pp)->proto == proto)
771                         return *pp;
772         }
773
774         return NULL;
775 }
776
777 static void
778 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
779 {
780
781         if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
782                 xpt_free_path(done_ccb->ccb_h.path);
783                 xpt_free_ccb(done_ccb);
784         } else {
785                 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
786                 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
787         }
788         xpt_release_boot();
789 }
790
791 /* thread to handle bus rescans */
792 static void
793 xpt_scanner_thread(void *dummy)
794 {
795         union ccb       *ccb;
796         struct mtx      *mtx;
797         struct cam_ed   *device;
798
799         xpt_lock_buses();
800         for (;;) {
801                 if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
802                         msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
803                                "-", 0);
804                 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
805                         TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
806                         xpt_unlock_buses();
807
808                         /*
809                          * We need to lock the device's mutex which we use as
810                          * the path mutex. We can't do it directly because the
811                          * cam_path in the ccb may wind up going away because
812                          * the path lock may be dropped and the path retired in
813                          * the completion callback. We do this directly to keep
814                          * the reference counts in cam_path sane. We also have
815                          * to copy the device pointer because ccb_h.path may
816                          * be freed in the callback.
817                          */
818                         mtx = xpt_path_mtx(ccb->ccb_h.path);
819                         device = ccb->ccb_h.path->device;
820                         xpt_acquire_device(device);
821                         mtx_lock(mtx);
822                         xpt_action(ccb);
823                         mtx_unlock(mtx);
824                         xpt_release_device(device);
825
826                         xpt_lock_buses();
827                 }
828         }
829 }
830
831 void
832 xpt_rescan(union ccb *ccb)
833 {
834         struct ccb_hdr *hdr;
835
836         /* Prepare request */
837         if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
838             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
839                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
840         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
841             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
842                 ccb->ccb_h.func_code = XPT_SCAN_TGT;
843         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
844             ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
845                 ccb->ccb_h.func_code = XPT_SCAN_LUN;
846         else {
847                 xpt_print(ccb->ccb_h.path, "illegal scan path\n");
848                 xpt_free_path(ccb->ccb_h.path);
849                 xpt_free_ccb(ccb);
850                 return;
851         }
852         CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
853             ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code,
854                 xpt_action_name(ccb->ccb_h.func_code)));
855
856         ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
857         ccb->ccb_h.cbfcnp = xpt_rescan_done;
858         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
859         /* Don't make duplicate entries for the same paths. */
860         xpt_lock_buses();
861         if (ccb->ccb_h.ppriv_ptr1 == NULL) {
862                 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
863                         if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
864                                 wakeup(&xsoftc.ccb_scanq);
865                                 xpt_unlock_buses();
866                                 xpt_print(ccb->ccb_h.path, "rescan already queued\n");
867                                 xpt_free_path(ccb->ccb_h.path);
868                                 xpt_free_ccb(ccb);
869                                 return;
870                         }
871                 }
872         }
873         TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
874         xpt_hold_boot_locked();
875         wakeup(&xsoftc.ccb_scanq);
876         xpt_unlock_buses();
877 }
878
879 /* Functions accessed by the peripheral drivers */
880 static int
881 xpt_init(void *dummy)
882 {
883         struct cam_sim *xpt_sim;
884         struct cam_path *path;
885         struct cam_devq *devq;
886         cam_status status;
887         int error, i;
888
889         TAILQ_INIT(&xsoftc.xpt_busses);
890         TAILQ_INIT(&xsoftc.ccb_scanq);
891         STAILQ_INIT(&xsoftc.highpowerq);
892         xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
893
894         mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF);
895         xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK,
896             taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq);
897
898 #ifdef CAM_BOOT_DELAY
899         /*
900          * Override this value at compile time to assist our users
901          * who don't use loader to boot a kernel.
902          */
903         xsoftc.boot_delay = CAM_BOOT_DELAY;
904 #endif
905
906         /*
907          * The xpt layer is, itself, the equivalent of a SIM.
908          * Allow 16 ccbs in the ccb pool for it.  This should
909          * give decent parallelism when we probe buses and
910          * perform other XPT functions.
911          */
912         devq = cam_simq_alloc(16);
913         xpt_sim = cam_sim_alloc(xptaction,
914                                 xptpoll,
915                                 "xpt",
916                                 /*softc*/NULL,
917                                 /*unit*/0,
918                                 /*mtx*/NULL,
919                                 /*max_dev_transactions*/0,
920                                 /*max_tagged_dev_transactions*/0,
921                                 devq);
922         if (xpt_sim == NULL)
923                 return (ENOMEM);
924
925         if ((error = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
926                 printf("xpt_init: xpt_bus_register failed with errno %d,"
927                        " failing attach\n", error);
928                 return (EINVAL);
929         }
930
931         /*
932          * Looking at the XPT from the SIM layer, the XPT is
933          * the equivalent of a peripheral driver.  Allocate
934          * a peripheral driver entry for us.
935          */
936         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
937                                       CAM_TARGET_WILDCARD,
938                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
939                 printf("xpt_init: xpt_create_path failed with status %#x,"
940                        " failing attach\n", status);
941                 return (EINVAL);
942         }
943         xpt_path_lock(path);
944         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
945                          path, NULL, 0, xpt_sim);
946         xpt_path_unlock(path);
947         xpt_free_path(path);
948
949         if (cam_num_doneqs < 1)
950                 cam_num_doneqs = 1 + mp_ncpus / 6;
951         else if (cam_num_doneqs > MAXCPU)
952                 cam_num_doneqs = MAXCPU;
953         for (i = 0; i < cam_num_doneqs; i++) {
954                 mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL,
955                     MTX_DEF);
956                 STAILQ_INIT(&cam_doneqs[i].cam_doneq);
957                 error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i],
958                     &cam_proc, NULL, 0, 0, "cam", "doneq%d", i);
959                 if (error != 0) {
960                         cam_num_doneqs = i;
961                         break;
962                 }
963         }
964         if (cam_num_doneqs < 1) {
965                 printf("xpt_init: Cannot init completion queues "
966                        "- failing attach\n");
967                 return (ENOMEM);
968         }
969
970         mtx_init(&cam_async.cam_doneq_mtx, "CAM async", NULL, MTX_DEF);
971         STAILQ_INIT(&cam_async.cam_doneq);
972         if (kproc_kthread_add(xpt_async_td, &cam_async,
973                 &cam_proc, NULL, 0, 0, "cam", "async") != 0) {
974                 printf("xpt_init: Cannot init async thread "
975                        "- failing attach\n");
976                 return (ENOMEM);
977         }
978
979         /*
980          * Register a callback for when interrupts are enabled.
981          */
982         config_intrhook_oneshot(xpt_config, NULL);
983
984         return (0);
985 }
986
987 static cam_status
988 xptregister(struct cam_periph *periph, void *arg)
989 {
990         struct cam_sim *xpt_sim;
991
992         if (periph == NULL) {
993                 printf("xptregister: periph was NULL!!\n");
994                 return(CAM_REQ_CMP_ERR);
995         }
996
997         xpt_sim = (struct cam_sim *)arg;
998         xpt_sim->softc = periph;
999         xpt_periph = periph;
1000         periph->softc = NULL;
1001
1002         return(CAM_REQ_CMP);
1003 }
1004
1005 int32_t
1006 xpt_add_periph(struct cam_periph *periph)
1007 {
1008         struct cam_ed *device;
1009         int32_t  status;
1010
1011         TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph);
1012         device = periph->path->device;
1013         status = CAM_REQ_CMP;
1014         if (device != NULL) {
1015                 mtx_lock(&device->target->bus->eb_mtx);
1016                 device->generation++;
1017                 SLIST_INSERT_HEAD(&device->periphs, periph, periph_links);
1018                 mtx_unlock(&device->target->bus->eb_mtx);
1019                 atomic_add_32(&xsoftc.xpt_generation, 1);
1020         }
1021
1022         return (status);
1023 }
1024
1025 void
1026 xpt_remove_periph(struct cam_periph *periph)
1027 {
1028         struct cam_ed *device;
1029
1030         device = periph->path->device;
1031         if (device != NULL) {
1032                 mtx_lock(&device->target->bus->eb_mtx);
1033                 device->generation++;
1034                 SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links);
1035                 mtx_unlock(&device->target->bus->eb_mtx);
1036                 atomic_add_32(&xsoftc.xpt_generation, 1);
1037         }
1038 }
1039
1040 void
1041 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1042 {
1043         char buf[128];
1044         struct sbuf sb;
1045
1046         (void)sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL);
1047         sbuf_set_drain(&sb, sbuf_printf_drain, NULL);
1048         xpt_announce_periph_sbuf(periph, &sb, announce_string);
1049         (void)sbuf_finish(&sb);
1050 }
1051
1052 void
1053 xpt_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb,
1054     char *announce_string)
1055 {
1056         struct  cam_path *path = periph->path;
1057         struct  xpt_proto *proto;
1058
1059         cam_periph_assert(periph, MA_OWNED);
1060         periph->flags |= CAM_PERIPH_ANNOUNCED;
1061
1062         sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1063             periph->periph_name, periph->unit_number,
1064             path->bus->sim->sim_name,
1065             path->bus->sim->unit_number,
1066             path->bus->sim->bus_id,
1067             path->bus->path_id,
1068             path->target->target_id,
1069             (uintmax_t)path->device->lun_id);
1070         sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number);
1071         proto = xpt_proto_find(path->device->protocol);
1072         if (proto)
1073                 proto->ops->announce_sbuf(path->device, sb);
1074         else
1075                 sbuf_printf(sb, "Unknown protocol device %d\n",
1076                     path->device->protocol);
1077         if (path->device->serial_num_len > 0) {
1078                 /* Don't wrap the screen  - print only the first 60 chars */
1079                 sbuf_printf(sb, "%s%d: Serial Number %.60s\n",
1080                     periph->periph_name, periph->unit_number,
1081                     path->device->serial_num);
1082         }
1083         /* Announce transport details. */
1084         path->bus->xport->ops->announce_sbuf(periph, sb);
1085         /* Announce command queueing. */
1086         if (path->device->inq_flags & SID_CmdQue
1087          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1088                 sbuf_printf(sb, "%s%d: Command Queueing enabled\n",
1089                     periph->periph_name, periph->unit_number);
1090         }
1091         /* Announce caller's details if they've passed in. */
1092         if (announce_string != NULL)
1093                 sbuf_printf(sb, "%s%d: %s\n", periph->periph_name,
1094                     periph->unit_number, announce_string);
1095 }
1096
1097 void
1098 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string)
1099 {
1100         if (quirks != 0) {
1101                 printf("%s%d: quirks=0x%b\n", periph->periph_name,
1102                     periph->unit_number, quirks, bit_string);
1103         }
1104 }
1105
1106 void
1107 xpt_announce_quirks_sbuf(struct cam_periph *periph, struct sbuf *sb,
1108                          int quirks, char *bit_string)
1109 {
1110         if (quirks != 0) {
1111                 sbuf_printf(sb, "%s%d: quirks=0x%b\n", periph->periph_name,
1112                     periph->unit_number, quirks, bit_string);
1113         }
1114 }
1115
1116 void
1117 xpt_denounce_periph(struct cam_periph *periph)
1118 {
1119         char buf[128];
1120         struct sbuf sb;
1121
1122         (void)sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL);
1123         sbuf_set_drain(&sb, sbuf_printf_drain, NULL);
1124         xpt_denounce_periph_sbuf(periph, &sb);
1125         (void)sbuf_finish(&sb);
1126 }
1127
1128 void
1129 xpt_denounce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb)
1130 {
1131         struct cam_path *path = periph->path;
1132         struct xpt_proto *proto;
1133
1134         cam_periph_assert(periph, MA_OWNED);
1135
1136         sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1137             periph->periph_name, periph->unit_number,
1138             path->bus->sim->sim_name,
1139             path->bus->sim->unit_number,
1140             path->bus->sim->bus_id,
1141             path->bus->path_id,
1142             path->target->target_id,
1143             (uintmax_t)path->device->lun_id);
1144         sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number);
1145         proto = xpt_proto_find(path->device->protocol);
1146         if (proto)
1147                 proto->ops->denounce_sbuf(path->device, sb);
1148         else
1149                 sbuf_printf(sb, "Unknown protocol device %d",
1150                     path->device->protocol);
1151         if (path->device->serial_num_len > 0)
1152                 sbuf_printf(sb, " s/n %.60s", path->device->serial_num);
1153         sbuf_printf(sb, " detached\n");
1154 }
1155
1156 int
1157 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
1158 {
1159         int ret = -1, l, o;
1160         struct ccb_dev_advinfo cdai;
1161         struct scsi_vpd_device_id *did;
1162         struct scsi_vpd_id_descriptor *idd;
1163
1164         xpt_path_assert(path, MA_OWNED);
1165
1166         memset(&cdai, 0, sizeof(cdai));
1167         xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
1168         cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
1169         cdai.flags = CDAI_FLAG_NONE;
1170         cdai.bufsiz = len;
1171         cdai.buf = buf;
1172
1173         if (!strcmp(attr, "GEOM::ident"))
1174                 cdai.buftype = CDAI_TYPE_SERIAL_NUM;
1175         else if (!strcmp(attr, "GEOM::physpath"))
1176                 cdai.buftype = CDAI_TYPE_PHYS_PATH;
1177         else if (strcmp(attr, "GEOM::lunid") == 0 ||
1178                  strcmp(attr, "GEOM::lunname") == 0) {
1179                 cdai.buftype = CDAI_TYPE_SCSI_DEVID;
1180                 cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
1181                 cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT);
1182                 if (cdai.buf == NULL) {
1183                         ret = ENOMEM;
1184                         goto out;
1185                 }
1186         } else
1187                 goto out;
1188
1189         xpt_action((union ccb *)&cdai); /* can only be synchronous */
1190         if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
1191                 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
1192         if (cdai.provsiz == 0)
1193                 goto out;
1194         switch(cdai.buftype) {
1195         case CDAI_TYPE_SCSI_DEVID:
1196                 did = (struct scsi_vpd_device_id *)cdai.buf;
1197                 if (strcmp(attr, "GEOM::lunid") == 0) {
1198                         idd = scsi_get_devid(did, cdai.provsiz,
1199                             scsi_devid_is_lun_naa);
1200                         if (idd == NULL)
1201                                 idd = scsi_get_devid(did, cdai.provsiz,
1202                                     scsi_devid_is_lun_eui64);
1203                         if (idd == NULL)
1204                                 idd = scsi_get_devid(did, cdai.provsiz,
1205                                     scsi_devid_is_lun_uuid);
1206                         if (idd == NULL)
1207                                 idd = scsi_get_devid(did, cdai.provsiz,
1208                                     scsi_devid_is_lun_md5);
1209                 } else
1210                         idd = NULL;
1211
1212                 if (idd == NULL)
1213                         idd = scsi_get_devid(did, cdai.provsiz,
1214                             scsi_devid_is_lun_t10);
1215                 if (idd == NULL)
1216                         idd = scsi_get_devid(did, cdai.provsiz,
1217                             scsi_devid_is_lun_name);
1218                 if (idd == NULL)
1219                         break;
1220
1221                 ret = 0;
1222                 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) ==
1223                     SVPD_ID_CODESET_ASCII) {
1224                         if (idd->length < len) {
1225                                 for (l = 0; l < idd->length; l++)
1226                                         buf[l] = idd->identifier[l] ?
1227                                             idd->identifier[l] : ' ';
1228                                 buf[l] = 0;
1229                         } else
1230                                 ret = EFAULT;
1231                         break;
1232                 }
1233                 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) ==
1234                     SVPD_ID_CODESET_UTF8) {
1235                         l = strnlen(idd->identifier, idd->length);
1236                         if (l < len) {
1237                                 bcopy(idd->identifier, buf, l);
1238                                 buf[l] = 0;
1239                         } else
1240                                 ret = EFAULT;
1241                         break;
1242                 }
1243                 if ((idd->id_type & SVPD_ID_TYPE_MASK) ==
1244                     SVPD_ID_TYPE_UUID && idd->identifier[0] == 0x10) {
1245                         if ((idd->length - 2) * 2 + 4 >= len) {
1246                                 ret = EFAULT;
1247                                 break;
1248                         }
1249                         for (l = 2, o = 0; l < idd->length; l++) {
1250                                 if (l == 6 || l == 8 || l == 10 || l == 12)
1251                                     o += sprintf(buf + o, "-");
1252                                 o += sprintf(buf + o, "%02x",
1253                                     idd->identifier[l]);
1254                         }
1255                         break;
1256                 }
1257                 if (idd->length * 2 < len) {
1258                         for (l = 0; l < idd->length; l++)
1259                                 sprintf(buf + l * 2, "%02x",
1260                                     idd->identifier[l]);
1261                 } else
1262                                 ret = EFAULT;
1263                 break;
1264         default:
1265                 if (cdai.provsiz < len) {
1266                         cdai.buf[cdai.provsiz] = 0;
1267                         ret = 0;
1268                 } else
1269                         ret = EFAULT;
1270                 break;
1271         }
1272
1273 out:
1274         if ((char *)cdai.buf != buf)
1275                 free(cdai.buf, M_CAMXPT);
1276         return ret;
1277 }
1278
1279 static dev_match_ret
1280 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1281             struct cam_eb *bus)
1282 {
1283         dev_match_ret retval;
1284         u_int i;
1285
1286         retval = DM_RET_NONE;
1287
1288         /*
1289          * If we aren't given something to match against, that's an error.
1290          */
1291         if (bus == NULL)
1292                 return(DM_RET_ERROR);
1293
1294         /*
1295          * If there are no match entries, then this bus matches no
1296          * matter what.
1297          */
1298         if ((patterns == NULL) || (num_patterns == 0))
1299                 return(DM_RET_DESCEND | DM_RET_COPY);
1300
1301         for (i = 0; i < num_patterns; i++) {
1302                 struct bus_match_pattern *cur_pattern;
1303                 struct device_match_pattern *dp = &patterns[i].pattern.device_pattern;
1304                 struct periph_match_pattern *pp = &patterns[i].pattern.periph_pattern;
1305
1306                 /*
1307                  * If the pattern in question isn't for a bus node, we
1308                  * aren't interested.  However, we do indicate to the
1309                  * calling routine that we should continue descending the
1310                  * tree, since the user wants to match against lower-level
1311                  * EDT elements.
1312                  */
1313                 if (patterns[i].type == DEV_MATCH_DEVICE &&
1314                     (dp->flags & DEV_MATCH_PATH) != 0 &&
1315                     dp->path_id != bus->path_id)
1316                         continue;
1317                 if (patterns[i].type == DEV_MATCH_PERIPH &&
1318                     (pp->flags & PERIPH_MATCH_PATH) != 0 &&
1319                     pp->path_id != bus->path_id)
1320                         continue;
1321                 if (patterns[i].type != DEV_MATCH_BUS) {
1322                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1323                                 retval |= DM_RET_DESCEND;
1324                         continue;
1325                 }
1326
1327                 cur_pattern = &patterns[i].pattern.bus_pattern;
1328
1329                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1330                  && (cur_pattern->path_id != bus->path_id))
1331                         continue;
1332
1333                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1334                  && (cur_pattern->bus_id != bus->sim->bus_id))
1335                         continue;
1336
1337                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1338                  && (cur_pattern->unit_number != bus->sim->unit_number))
1339                         continue;
1340
1341                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1342                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1343                              DEV_IDLEN) != 0))
1344                         continue;
1345
1346                 /*
1347                  * If we get to this point, the user definitely wants
1348                  * information on this bus.  So tell the caller to copy the
1349                  * data out.
1350                  */
1351                 retval |= DM_RET_COPY;
1352
1353                 /*
1354                  * If the return action has been set to descend, then we
1355                  * know that we've already seen a non-bus matching
1356                  * expression, therefore we need to further descend the tree.
1357                  * This won't change by continuing around the loop, so we
1358                  * go ahead and return.  If we haven't seen a non-bus
1359                  * matching expression, we keep going around the loop until
1360                  * we exhaust the matching expressions.  We'll set the stop
1361                  * flag once we fall out of the loop.
1362                  */
1363                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1364                         return(retval);
1365         }
1366
1367         /*
1368          * If the return action hasn't been set to descend yet, that means
1369          * we haven't seen anything other than bus matching patterns.  So
1370          * tell the caller to stop descending the tree -- the user doesn't
1371          * want to match against lower level tree elements.
1372          */
1373         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1374                 retval |= DM_RET_STOP;
1375
1376         return(retval);
1377 }
1378
1379 static dev_match_ret
1380 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1381                struct cam_ed *device)
1382 {
1383         dev_match_ret retval;
1384         u_int i;
1385
1386         retval = DM_RET_NONE;
1387
1388         /*
1389          * If we aren't given something to match against, that's an error.
1390          */
1391         if (device == NULL)
1392                 return(DM_RET_ERROR);
1393
1394         /*
1395          * If there are no match entries, then this device matches no
1396          * matter what.
1397          */
1398         if ((patterns == NULL) || (num_patterns == 0))
1399                 return(DM_RET_DESCEND | DM_RET_COPY);
1400
1401         for (i = 0; i < num_patterns; i++) {
1402                 struct device_match_pattern *cur_pattern;
1403                 struct scsi_vpd_device_id *device_id_page;
1404                 struct periph_match_pattern *pp = &patterns[i].pattern.periph_pattern;
1405
1406                 /*
1407                  * If the pattern in question isn't for a device node, we
1408                  * aren't interested.
1409                  */
1410                 if (patterns[i].type == DEV_MATCH_PERIPH &&
1411                     (pp->flags & PERIPH_MATCH_TARGET) != 0 &&
1412                     pp->target_id != device->target->target_id)
1413                         continue;
1414                 if (patterns[i].type == DEV_MATCH_PERIPH &&
1415                     (pp->flags & PERIPH_MATCH_LUN) != 0 &&
1416                     pp->target_lun != device->lun_id)
1417                         continue;
1418                 if (patterns[i].type != DEV_MATCH_DEVICE) {
1419                         if ((patterns[i].type == DEV_MATCH_PERIPH)
1420                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1421                                 retval |= DM_RET_DESCEND;
1422                         continue;
1423                 }
1424
1425                 cur_pattern = &patterns[i].pattern.device_pattern;
1426
1427                 /* Error out if mutually exclusive options are specified. */
1428                 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1429                  == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1430                         return(DM_RET_ERROR);
1431
1432                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1433                  && (cur_pattern->path_id != device->target->bus->path_id))
1434                         continue;
1435
1436                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1437                  && (cur_pattern->target_id != device->target->target_id))
1438                         continue;
1439
1440                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1441                  && (cur_pattern->target_lun != device->lun_id))
1442                         continue;
1443
1444                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1445                  && (cam_quirkmatch((caddr_t)&device->inq_data,
1446                                     (caddr_t)&cur_pattern->data.inq_pat,
1447                                     1, sizeof(cur_pattern->data.inq_pat),
1448                                     scsi_static_inquiry_match) == NULL))
1449                         continue;
1450
1451                 device_id_page = (struct scsi_vpd_device_id *)device->device_id;
1452                 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
1453                  && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
1454                   || scsi_devid_match((uint8_t *)device_id_page->desc_list,
1455                                       device->device_id_len
1456                                     - SVPD_DEVICE_ID_HDR_LEN,
1457                                       cur_pattern->data.devid_pat.id,
1458                                       cur_pattern->data.devid_pat.id_len) != 0))
1459                         continue;
1460
1461                 /*
1462                  * If we get to this point, the user definitely wants
1463                  * information on this device.  So tell the caller to copy
1464                  * the data out.
1465                  */
1466                 retval |= DM_RET_COPY;
1467
1468                 /*
1469                  * If the return action has been set to descend, then we
1470                  * know that we've already seen a peripheral matching
1471                  * expression, therefore we need to further descend the tree.
1472                  * This won't change by continuing around the loop, so we
1473                  * go ahead and return.  If we haven't seen a peripheral
1474                  * matching expression, we keep going around the loop until
1475                  * we exhaust the matching expressions.  We'll set the stop
1476                  * flag once we fall out of the loop.
1477                  */
1478                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1479                         return(retval);
1480         }
1481
1482         /*
1483          * If the return action hasn't been set to descend yet, that means
1484          * we haven't seen any peripheral matching patterns.  So tell the
1485          * caller to stop descending the tree -- the user doesn't want to
1486          * match against lower level tree elements.
1487          */
1488         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1489                 retval |= DM_RET_STOP;
1490
1491         return(retval);
1492 }
1493
1494 /*
1495  * Match a single peripheral against any number of match patterns.
1496  */
1497 static dev_match_ret
1498 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1499                struct cam_periph *periph)
1500 {
1501         dev_match_ret retval;
1502         u_int i;
1503
1504         /*
1505          * If we aren't given something to match against, that's an error.
1506          */
1507         if (periph == NULL)
1508                 return(DM_RET_ERROR);
1509
1510         /*
1511          * If there are no match entries, then this peripheral matches no
1512          * matter what.
1513          */
1514         if ((patterns == NULL) || (num_patterns == 0))
1515                 return(DM_RET_STOP | DM_RET_COPY);
1516
1517         /*
1518          * There aren't any nodes below a peripheral node, so there's no
1519          * reason to descend the tree any further.
1520          */
1521         retval = DM_RET_STOP;
1522
1523         for (i = 0; i < num_patterns; i++) {
1524                 struct periph_match_pattern *cur_pattern;
1525
1526                 /*
1527                  * If the pattern in question isn't for a peripheral, we
1528                  * aren't interested.
1529                  */
1530                 if (patterns[i].type != DEV_MATCH_PERIPH)
1531                         continue;
1532
1533                 cur_pattern = &patterns[i].pattern.periph_pattern;
1534
1535                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1536                  && (cur_pattern->path_id != periph->path->bus->path_id))
1537                         continue;
1538
1539                 /*
1540                  * For the target and lun id's, we have to make sure the
1541                  * target and lun pointers aren't NULL.  The xpt peripheral
1542                  * has a wildcard target and device.
1543                  */
1544                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1545                  && ((periph->path->target == NULL)
1546                  ||(cur_pattern->target_id != periph->path->target->target_id)))
1547                         continue;
1548
1549                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1550                  && ((periph->path->device == NULL)
1551                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
1552                         continue;
1553
1554                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1555                  && (cur_pattern->unit_number != periph->unit_number))
1556                         continue;
1557
1558                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1559                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
1560                              DEV_IDLEN) != 0))
1561                         continue;
1562
1563                 /*
1564                  * If we get to this point, the user definitely wants
1565                  * information on this peripheral.  So tell the caller to
1566                  * copy the data out.
1567                  */
1568                 retval |= DM_RET_COPY;
1569
1570                 /*
1571                  * The return action has already been set to stop, since
1572                  * peripherals don't have any nodes below them in the EDT.
1573                  */
1574                 return(retval);
1575         }
1576
1577         /*
1578          * If we get to this point, the peripheral that was passed in
1579          * doesn't match any of the patterns.
1580          */
1581         return(retval);
1582 }
1583
1584 static int
1585 xptedtbusfunc(struct cam_eb *bus, void *arg)
1586 {
1587         struct ccb_dev_match *cdm;
1588         struct cam_et *target;
1589         dev_match_ret retval;
1590
1591         cdm = (struct ccb_dev_match *)arg;
1592
1593         /*
1594          * If our position is for something deeper in the tree, that means
1595          * that we've already seen this node.  So, we keep going down.
1596          */
1597         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1598          && (cdm->pos.cookie.bus == bus)
1599          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1600          && (cdm->pos.cookie.target != NULL))
1601                 retval = DM_RET_DESCEND;
1602         else
1603                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1604
1605         /*
1606          * If we got an error, bail out of the search.
1607          */
1608         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1609                 cdm->status = CAM_DEV_MATCH_ERROR;
1610                 return(0);
1611         }
1612
1613         /*
1614          * If the copy flag is set, copy this bus out.
1615          */
1616         if (retval & DM_RET_COPY) {
1617                 int spaceleft, j;
1618
1619                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1620                         sizeof(struct dev_match_result));
1621
1622                 /*
1623                  * If we don't have enough space to put in another
1624                  * match result, save our position and tell the
1625                  * user there are more devices to check.
1626                  */
1627                 if (spaceleft < sizeof(struct dev_match_result)) {
1628                         bzero(&cdm->pos, sizeof(cdm->pos));
1629                         cdm->pos.position_type =
1630                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1631
1632                         cdm->pos.cookie.bus = bus;
1633                         cdm->pos.generations[CAM_BUS_GENERATION]=
1634                                 xsoftc.bus_generation;
1635                         cdm->status = CAM_DEV_MATCH_MORE;
1636                         return(0);
1637                 }
1638                 j = cdm->num_matches;
1639                 cdm->num_matches++;
1640                 cdm->matches[j].type = DEV_MATCH_BUS;
1641                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
1642                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1643                 cdm->matches[j].result.bus_result.unit_number =
1644                         bus->sim->unit_number;
1645                 strlcpy(cdm->matches[j].result.bus_result.dev_name,
1646                         bus->sim->sim_name,
1647                         sizeof(cdm->matches[j].result.bus_result.dev_name));
1648         }
1649
1650         /*
1651          * If the user is only interested in buses, there's no
1652          * reason to descend to the next level in the tree.
1653          */
1654         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1655                 return(1);
1656
1657         /*
1658          * If there is a target generation recorded, check it to
1659          * make sure the target list hasn't changed.
1660          */
1661         mtx_lock(&bus->eb_mtx);
1662         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1663          && (cdm->pos.cookie.bus == bus)
1664          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1665          && (cdm->pos.cookie.target != NULL)) {
1666                 if ((cdm->pos.generations[CAM_TARGET_GENERATION] !=
1667                     bus->generation)) {
1668                         mtx_unlock(&bus->eb_mtx);
1669                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1670                         return (0);
1671                 }
1672                 target = (struct cam_et *)cdm->pos.cookie.target;
1673                 target->refcount++;
1674         } else
1675                 target = NULL;
1676         mtx_unlock(&bus->eb_mtx);
1677
1678         return (xpttargettraverse(bus, target, xptedttargetfunc, arg));
1679 }
1680
1681 static int
1682 xptedttargetfunc(struct cam_et *target, void *arg)
1683 {
1684         struct ccb_dev_match *cdm;
1685         struct cam_eb *bus;
1686         struct cam_ed *device;
1687
1688         cdm = (struct ccb_dev_match *)arg;
1689         bus = target->bus;
1690
1691         /*
1692          * If there is a device list generation recorded, check it to
1693          * make sure the device list hasn't changed.
1694          */
1695         mtx_lock(&bus->eb_mtx);
1696         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1697          && (cdm->pos.cookie.bus == bus)
1698          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1699          && (cdm->pos.cookie.target == target)
1700          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1701          && (cdm->pos.cookie.device != NULL)) {
1702                 if (cdm->pos.generations[CAM_DEV_GENERATION] !=
1703                     target->generation) {
1704                         mtx_unlock(&bus->eb_mtx);
1705                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1706                         return(0);
1707                 }
1708                 device = (struct cam_ed *)cdm->pos.cookie.device;
1709                 device->refcount++;
1710         } else
1711                 device = NULL;
1712         mtx_unlock(&bus->eb_mtx);
1713
1714         return (xptdevicetraverse(target, device, xptedtdevicefunc, arg));
1715 }
1716
1717 static int
1718 xptedtdevicefunc(struct cam_ed *device, void *arg)
1719 {
1720         struct cam_eb *bus;
1721         struct cam_periph *periph;
1722         struct ccb_dev_match *cdm;
1723         dev_match_ret retval;
1724
1725         cdm = (struct ccb_dev_match *)arg;
1726         bus = device->target->bus;
1727
1728         /*
1729          * If our position is for something deeper in the tree, that means
1730          * that we've already seen this node.  So, we keep going down.
1731          */
1732         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1733          && (cdm->pos.cookie.device == device)
1734          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1735          && (cdm->pos.cookie.periph != NULL))
1736                 retval = DM_RET_DESCEND;
1737         else
1738                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
1739                                         device);
1740
1741         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1742                 cdm->status = CAM_DEV_MATCH_ERROR;
1743                 return(0);
1744         }
1745
1746         /*
1747          * If the copy flag is set, copy this device out.
1748          */
1749         if (retval & DM_RET_COPY) {
1750                 int spaceleft, j;
1751
1752                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1753                         sizeof(struct dev_match_result));
1754
1755                 /*
1756                  * If we don't have enough space to put in another
1757                  * match result, save our position and tell the
1758                  * user there are more devices to check.
1759                  */
1760                 if (spaceleft < sizeof(struct dev_match_result)) {
1761                         bzero(&cdm->pos, sizeof(cdm->pos));
1762                         cdm->pos.position_type =
1763                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1764                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
1765
1766                         cdm->pos.cookie.bus = device->target->bus;
1767                         cdm->pos.generations[CAM_BUS_GENERATION]=
1768                                 xsoftc.bus_generation;
1769                         cdm->pos.cookie.target = device->target;
1770                         cdm->pos.generations[CAM_TARGET_GENERATION] =
1771                                 device->target->bus->generation;
1772                         cdm->pos.cookie.device = device;
1773                         cdm->pos.generations[CAM_DEV_GENERATION] =
1774                                 device->target->generation;
1775                         cdm->status = CAM_DEV_MATCH_MORE;
1776                         return(0);
1777                 }
1778                 j = cdm->num_matches;
1779                 cdm->num_matches++;
1780                 cdm->matches[j].type = DEV_MATCH_DEVICE;
1781                 cdm->matches[j].result.device_result.path_id =
1782                         device->target->bus->path_id;
1783                 cdm->matches[j].result.device_result.target_id =
1784                         device->target->target_id;
1785                 cdm->matches[j].result.device_result.target_lun =
1786                         device->lun_id;
1787                 cdm->matches[j].result.device_result.protocol =
1788                         device->protocol;
1789                 bcopy(&device->inq_data,
1790                       &cdm->matches[j].result.device_result.inq_data,
1791                       sizeof(struct scsi_inquiry_data));
1792                 bcopy(&device->ident_data,
1793                       &cdm->matches[j].result.device_result.ident_data,
1794                       sizeof(struct ata_params));
1795
1796                 /* Let the user know whether this device is unconfigured */
1797                 if (device->flags & CAM_DEV_UNCONFIGURED)
1798                         cdm->matches[j].result.device_result.flags =
1799                                 DEV_RESULT_UNCONFIGURED;
1800                 else
1801                         cdm->matches[j].result.device_result.flags =
1802                                 DEV_RESULT_NOFLAG;
1803         }
1804
1805         /*
1806          * If the user isn't interested in peripherals, don't descend
1807          * the tree any further.
1808          */
1809         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1810                 return(1);
1811
1812         /*
1813          * If there is a peripheral list generation recorded, make sure
1814          * it hasn't changed.
1815          */
1816         xpt_lock_buses();
1817         mtx_lock(&bus->eb_mtx);
1818         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1819          && (cdm->pos.cookie.bus == bus)
1820          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1821          && (cdm->pos.cookie.target == device->target)
1822          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1823          && (cdm->pos.cookie.device == device)
1824          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1825          && (cdm->pos.cookie.periph != NULL)) {
1826                 if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1827                     device->generation) {
1828                         mtx_unlock(&bus->eb_mtx);
1829                         xpt_unlock_buses();
1830                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1831                         return(0);
1832                 }
1833                 periph = (struct cam_periph *)cdm->pos.cookie.periph;
1834                 periph->refcount++;
1835         } else
1836                 periph = NULL;
1837         mtx_unlock(&bus->eb_mtx);
1838         xpt_unlock_buses();
1839
1840         return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg));
1841 }
1842
1843 static int
1844 xptedtperiphfunc(struct cam_periph *periph, void *arg)
1845 {
1846         struct ccb_dev_match *cdm;
1847         dev_match_ret retval;
1848
1849         cdm = (struct ccb_dev_match *)arg;
1850
1851         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1852
1853         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1854                 cdm->status = CAM_DEV_MATCH_ERROR;
1855                 return(0);
1856         }
1857
1858         /*
1859          * If the copy flag is set, copy this peripheral out.
1860          */
1861         if (retval & DM_RET_COPY) {
1862                 int spaceleft, j;
1863                 size_t l;
1864
1865                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1866                         sizeof(struct dev_match_result));
1867
1868                 /*
1869                  * If we don't have enough space to put in another
1870                  * match result, save our position and tell the
1871                  * user there are more devices to check.
1872                  */
1873                 if (spaceleft < sizeof(struct dev_match_result)) {
1874                         bzero(&cdm->pos, sizeof(cdm->pos));
1875                         cdm->pos.position_type =
1876                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1877                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
1878                                 CAM_DEV_POS_PERIPH;
1879
1880                         cdm->pos.cookie.bus = periph->path->bus;
1881                         cdm->pos.generations[CAM_BUS_GENERATION]=
1882                                 xsoftc.bus_generation;
1883                         cdm->pos.cookie.target = periph->path->target;
1884                         cdm->pos.generations[CAM_TARGET_GENERATION] =
1885                                 periph->path->bus->generation;
1886                         cdm->pos.cookie.device = periph->path->device;
1887                         cdm->pos.generations[CAM_DEV_GENERATION] =
1888                                 periph->path->target->generation;
1889                         cdm->pos.cookie.periph = periph;
1890                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
1891                                 periph->path->device->generation;
1892                         cdm->status = CAM_DEV_MATCH_MORE;
1893                         return(0);
1894                 }
1895
1896                 j = cdm->num_matches;
1897                 cdm->num_matches++;
1898                 cdm->matches[j].type = DEV_MATCH_PERIPH;
1899                 cdm->matches[j].result.periph_result.path_id =
1900                         periph->path->bus->path_id;
1901                 cdm->matches[j].result.periph_result.target_id =
1902                         periph->path->target->target_id;
1903                 cdm->matches[j].result.periph_result.target_lun =
1904                         periph->path->device->lun_id;
1905                 cdm->matches[j].result.periph_result.unit_number =
1906                         periph->unit_number;
1907                 l = sizeof(cdm->matches[j].result.periph_result.periph_name);
1908                 strlcpy(cdm->matches[j].result.periph_result.periph_name,
1909                         periph->periph_name, l);
1910         }
1911
1912         return(1);
1913 }
1914
1915 static int
1916 xptedtmatch(struct ccb_dev_match *cdm)
1917 {
1918         struct cam_eb *bus;
1919         int ret;
1920
1921         cdm->num_matches = 0;
1922
1923         /*
1924          * Check the bus list generation.  If it has changed, the user
1925          * needs to reset everything and start over.
1926          */
1927         xpt_lock_buses();
1928         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1929          && (cdm->pos.cookie.bus != NULL)) {
1930                 if (cdm->pos.generations[CAM_BUS_GENERATION] !=
1931                     xsoftc.bus_generation) {
1932                         xpt_unlock_buses();
1933                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1934                         return(0);
1935                 }
1936                 bus = (struct cam_eb *)cdm->pos.cookie.bus;
1937                 bus->refcount++;
1938         } else
1939                 bus = NULL;
1940         xpt_unlock_buses();
1941
1942         ret = xptbustraverse(bus, xptedtbusfunc, cdm);
1943
1944         /*
1945          * If we get back 0, that means that we had to stop before fully
1946          * traversing the EDT.  It also means that one of the subroutines
1947          * has set the status field to the proper value.  If we get back 1,
1948          * we've fully traversed the EDT and copied out any matching entries.
1949          */
1950         if (ret == 1)
1951                 cdm->status = CAM_DEV_MATCH_LAST;
1952
1953         return(ret);
1954 }
1955
1956 static int
1957 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
1958 {
1959         struct cam_periph *periph;
1960         struct ccb_dev_match *cdm;
1961
1962         cdm = (struct ccb_dev_match *)arg;
1963
1964         xpt_lock_buses();
1965         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
1966          && (cdm->pos.cookie.pdrv == pdrv)
1967          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1968          && (cdm->pos.cookie.periph != NULL)) {
1969                 if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1970                     (*pdrv)->generation) {
1971                         xpt_unlock_buses();
1972                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1973                         return(0);
1974                 }
1975                 periph = (struct cam_periph *)cdm->pos.cookie.periph;
1976                 periph->refcount++;
1977         } else
1978                 periph = NULL;
1979         xpt_unlock_buses();
1980
1981         return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg));
1982 }
1983
1984 static int
1985 xptplistperiphfunc(struct cam_periph *periph, void *arg)
1986 {
1987         struct ccb_dev_match *cdm;
1988         dev_match_ret retval;
1989
1990         cdm = (struct ccb_dev_match *)arg;
1991
1992         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1993
1994         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1995                 cdm->status = CAM_DEV_MATCH_ERROR;
1996                 return(0);
1997         }
1998
1999         /*
2000          * If the copy flag is set, copy this peripheral out.
2001          */
2002         if (retval & DM_RET_COPY) {
2003                 int spaceleft, j;
2004                 size_t l;
2005
2006                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2007                         sizeof(struct dev_match_result));
2008
2009                 /*
2010                  * If we don't have enough space to put in another
2011                  * match result, save our position and tell the
2012                  * user there are more devices to check.
2013                  */
2014                 if (spaceleft < sizeof(struct dev_match_result)) {
2015                         struct periph_driver **pdrv;
2016
2017                         pdrv = NULL;
2018                         bzero(&cdm->pos, sizeof(cdm->pos));
2019                         cdm->pos.position_type =
2020                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2021                                 CAM_DEV_POS_PERIPH;
2022
2023                         /*
2024                          * This may look a bit non-sensical, but it is
2025                          * actually quite logical.  There are very few
2026                          * peripheral drivers, and bloating every peripheral
2027                          * structure with a pointer back to its parent
2028                          * peripheral driver linker set entry would cost
2029                          * more in the long run than doing this quick lookup.
2030                          */
2031                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2032                                 if (strcmp((*pdrv)->driver_name,
2033                                     periph->periph_name) == 0)
2034                                         break;
2035                         }
2036
2037                         if (*pdrv == NULL) {
2038                                 cdm->status = CAM_DEV_MATCH_ERROR;
2039                                 return(0);
2040                         }
2041
2042                         cdm->pos.cookie.pdrv = pdrv;
2043                         /*
2044                          * The periph generation slot does double duty, as
2045                          * does the periph pointer slot.  They are used for
2046                          * both edt and pdrv lookups and positioning.
2047                          */
2048                         cdm->pos.cookie.periph = periph;
2049                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2050                                 (*pdrv)->generation;
2051                         cdm->status = CAM_DEV_MATCH_MORE;
2052                         return(0);
2053                 }
2054
2055                 j = cdm->num_matches;
2056                 cdm->num_matches++;
2057                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2058                 cdm->matches[j].result.periph_result.path_id =
2059                         periph->path->bus->path_id;
2060
2061                 /*
2062                  * The transport layer peripheral doesn't have a target or
2063                  * lun.
2064                  */
2065                 if (periph->path->target)
2066                         cdm->matches[j].result.periph_result.target_id =
2067                                 periph->path->target->target_id;
2068                 else
2069                         cdm->matches[j].result.periph_result.target_id =
2070                                 CAM_TARGET_WILDCARD;
2071
2072                 if (periph->path->device)
2073                         cdm->matches[j].result.periph_result.target_lun =
2074                                 periph->path->device->lun_id;
2075                 else
2076                         cdm->matches[j].result.periph_result.target_lun =
2077                                 CAM_LUN_WILDCARD;
2078
2079                 cdm->matches[j].result.periph_result.unit_number =
2080                         periph->unit_number;
2081                 l = sizeof(cdm->matches[j].result.periph_result.periph_name);
2082                 strlcpy(cdm->matches[j].result.periph_result.periph_name,
2083                         periph->periph_name, l);
2084         }
2085
2086         return(1);
2087 }
2088
2089 static int
2090 xptperiphlistmatch(struct ccb_dev_match *cdm)
2091 {
2092         int ret;
2093
2094         cdm->num_matches = 0;
2095
2096         /*
2097          * At this point in the edt traversal function, we check the bus
2098          * list generation to make sure that no buses have been added or
2099          * removed since the user last sent a XPT_DEV_MATCH ccb through.
2100          * For the peripheral driver list traversal function, however, we
2101          * don't have to worry about new peripheral driver types coming or
2102          * going; they're in a linker set, and therefore can't change
2103          * without a recompile.
2104          */
2105
2106         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2107          && (cdm->pos.cookie.pdrv != NULL))
2108                 ret = xptpdrvtraverse(
2109                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
2110                                 xptplistpdrvfunc, cdm);
2111         else
2112                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2113
2114         /*
2115          * If we get back 0, that means that we had to stop before fully
2116          * traversing the peripheral driver tree.  It also means that one of
2117          * the subroutines has set the status field to the proper value.  If
2118          * we get back 1, we've fully traversed the EDT and copied out any
2119          * matching entries.
2120          */
2121         if (ret == 1)
2122                 cdm->status = CAM_DEV_MATCH_LAST;
2123
2124         return(ret);
2125 }
2126
2127 static int
2128 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2129 {
2130         struct cam_eb *bus, *next_bus;
2131         int retval;
2132
2133         retval = 1;
2134         if (start_bus)
2135                 bus = start_bus;
2136         else {
2137                 xpt_lock_buses();
2138                 bus = TAILQ_FIRST(&xsoftc.xpt_busses);
2139                 if (bus == NULL) {
2140                         xpt_unlock_buses();
2141                         return (retval);
2142                 }
2143                 bus->refcount++;
2144                 xpt_unlock_buses();
2145         }
2146         for (; bus != NULL; bus = next_bus) {
2147                 retval = tr_func(bus, arg);
2148                 if (retval == 0) {
2149                         xpt_release_bus(bus);
2150                         break;
2151                 }
2152                 xpt_lock_buses();
2153                 next_bus = TAILQ_NEXT(bus, links);
2154                 if (next_bus)
2155                         next_bus->refcount++;
2156                 xpt_unlock_buses();
2157                 xpt_release_bus(bus);
2158         }
2159         return(retval);
2160 }
2161
2162 static int
2163 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2164                   xpt_targetfunc_t *tr_func, void *arg)
2165 {
2166         struct cam_et *target, *next_target;
2167         int retval;
2168
2169         retval = 1;
2170         if (start_target)
2171                 target = start_target;
2172         else {
2173                 mtx_lock(&bus->eb_mtx);
2174                 target = TAILQ_FIRST(&bus->et_entries);
2175                 if (target == NULL) {
2176                         mtx_unlock(&bus->eb_mtx);
2177                         return (retval);
2178                 }
2179                 target->refcount++;
2180                 mtx_unlock(&bus->eb_mtx);
2181         }
2182         for (; target != NULL; target = next_target) {
2183                 retval = tr_func(target, arg);
2184                 if (retval == 0) {
2185                         xpt_release_target(target);
2186                         break;
2187                 }
2188                 mtx_lock(&bus->eb_mtx);
2189                 next_target = TAILQ_NEXT(target, links);
2190                 if (next_target)
2191                         next_target->refcount++;
2192                 mtx_unlock(&bus->eb_mtx);
2193                 xpt_release_target(target);
2194         }
2195         return(retval);
2196 }
2197
2198 static int
2199 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2200                   xpt_devicefunc_t *tr_func, void *arg)
2201 {
2202         struct cam_eb *bus;
2203         struct cam_ed *device, *next_device;
2204         int retval;
2205
2206         retval = 1;
2207         bus = target->bus;
2208         if (start_device)
2209                 device = start_device;
2210         else {
2211                 mtx_lock(&bus->eb_mtx);
2212                 device = TAILQ_FIRST(&target->ed_entries);
2213                 if (device == NULL) {
2214                         mtx_unlock(&bus->eb_mtx);
2215                         return (retval);
2216                 }
2217                 device->refcount++;
2218                 mtx_unlock(&bus->eb_mtx);
2219         }
2220         for (; device != NULL; device = next_device) {
2221                 mtx_lock(&device->device_mtx);
2222                 retval = tr_func(device, arg);
2223                 mtx_unlock(&device->device_mtx);
2224                 if (retval == 0) {
2225                         xpt_release_device(device);
2226                         break;
2227                 }
2228                 mtx_lock(&bus->eb_mtx);
2229                 next_device = TAILQ_NEXT(device, links);
2230                 if (next_device)
2231                         next_device->refcount++;
2232                 mtx_unlock(&bus->eb_mtx);
2233                 xpt_release_device(device);
2234         }
2235         return(retval);
2236 }
2237
2238 static int
2239 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2240                   xpt_periphfunc_t *tr_func, void *arg)
2241 {
2242         struct cam_eb *bus;
2243         struct cam_periph *periph, *next_periph;
2244         int retval;
2245
2246         retval = 1;
2247
2248         bus = device->target->bus;
2249         if (start_periph)
2250                 periph = start_periph;
2251         else {
2252                 xpt_lock_buses();
2253                 mtx_lock(&bus->eb_mtx);
2254                 periph = SLIST_FIRST(&device->periphs);
2255                 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2256                         periph = SLIST_NEXT(periph, periph_links);
2257                 if (periph == NULL) {
2258                         mtx_unlock(&bus->eb_mtx);
2259                         xpt_unlock_buses();
2260                         return (retval);
2261                 }
2262                 periph->refcount++;
2263                 mtx_unlock(&bus->eb_mtx);
2264                 xpt_unlock_buses();
2265         }
2266         for (; periph != NULL; periph = next_periph) {
2267                 retval = tr_func(periph, arg);
2268                 if (retval == 0) {
2269                         cam_periph_release_locked(periph);
2270                         break;
2271                 }
2272                 xpt_lock_buses();
2273                 mtx_lock(&bus->eb_mtx);
2274                 next_periph = SLIST_NEXT(periph, periph_links);
2275                 while (next_periph != NULL &&
2276                     (next_periph->flags & CAM_PERIPH_FREE) != 0)
2277                         next_periph = SLIST_NEXT(next_periph, periph_links);
2278                 if (next_periph)
2279                         next_periph->refcount++;
2280                 mtx_unlock(&bus->eb_mtx);
2281                 xpt_unlock_buses();
2282                 cam_periph_release_locked(periph);
2283         }
2284         return(retval);
2285 }
2286
2287 static int
2288 xptpdrvtraverse(struct periph_driver **start_pdrv,
2289                 xpt_pdrvfunc_t *tr_func, void *arg)
2290 {
2291         struct periph_driver **pdrv;
2292         int retval;
2293
2294         retval = 1;
2295
2296         /*
2297          * We don't traverse the peripheral driver list like we do the
2298          * other lists, because it is a linker set, and therefore cannot be
2299          * changed during runtime.  If the peripheral driver list is ever
2300          * re-done to be something other than a linker set (i.e. it can
2301          * change while the system is running), the list traversal should
2302          * be modified to work like the other traversal functions.
2303          */
2304         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2305              *pdrv != NULL; pdrv++) {
2306                 retval = tr_func(pdrv, arg);
2307
2308                 if (retval == 0)
2309                         return(retval);
2310         }
2311
2312         return(retval);
2313 }
2314
2315 static int
2316 xptpdperiphtraverse(struct periph_driver **pdrv,
2317                     struct cam_periph *start_periph,
2318                     xpt_periphfunc_t *tr_func, void *arg)
2319 {
2320         struct cam_periph *periph, *next_periph;
2321         int retval;
2322
2323         retval = 1;
2324
2325         if (start_periph)
2326                 periph = start_periph;
2327         else {
2328                 xpt_lock_buses();
2329                 periph = TAILQ_FIRST(&(*pdrv)->units);
2330                 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2331                         periph = TAILQ_NEXT(periph, unit_links);
2332                 if (periph == NULL) {
2333                         xpt_unlock_buses();
2334                         return (retval);
2335                 }
2336                 periph->refcount++;
2337                 xpt_unlock_buses();
2338         }
2339         for (; periph != NULL; periph = next_periph) {
2340                 cam_periph_lock(periph);
2341                 retval = tr_func(periph, arg);
2342                 cam_periph_unlock(periph);
2343                 if (retval == 0) {
2344                         cam_periph_release(periph);
2345                         break;
2346                 }
2347                 xpt_lock_buses();
2348                 next_periph = TAILQ_NEXT(periph, unit_links);
2349                 while (next_periph != NULL &&
2350                     (next_periph->flags & CAM_PERIPH_FREE) != 0)
2351                         next_periph = TAILQ_NEXT(next_periph, unit_links);
2352                 if (next_periph)
2353                         next_periph->refcount++;
2354                 xpt_unlock_buses();
2355                 cam_periph_release(periph);
2356         }
2357         return(retval);
2358 }
2359
2360 static int
2361 xptdefbusfunc(struct cam_eb *bus, void *arg)
2362 {
2363         struct xpt_traverse_config *tr_config;
2364
2365         tr_config = (struct xpt_traverse_config *)arg;
2366
2367         if (tr_config->depth == XPT_DEPTH_BUS) {
2368                 xpt_busfunc_t *tr_func;
2369
2370                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2371
2372                 return(tr_func(bus, tr_config->tr_arg));
2373         } else
2374                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2375 }
2376
2377 static int
2378 xptdeftargetfunc(struct cam_et *target, void *arg)
2379 {
2380         struct xpt_traverse_config *tr_config;
2381
2382         tr_config = (struct xpt_traverse_config *)arg;
2383
2384         if (tr_config->depth == XPT_DEPTH_TARGET) {
2385                 xpt_targetfunc_t *tr_func;
2386
2387                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2388
2389                 return(tr_func(target, tr_config->tr_arg));
2390         } else
2391                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2392 }
2393
2394 static int
2395 xptdefdevicefunc(struct cam_ed *device, void *arg)
2396 {
2397         struct xpt_traverse_config *tr_config;
2398
2399         tr_config = (struct xpt_traverse_config *)arg;
2400
2401         if (tr_config->depth == XPT_DEPTH_DEVICE) {
2402                 xpt_devicefunc_t *tr_func;
2403
2404                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2405
2406                 return(tr_func(device, tr_config->tr_arg));
2407         } else
2408                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2409 }
2410
2411 static int
2412 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2413 {
2414         struct xpt_traverse_config *tr_config;
2415         xpt_periphfunc_t *tr_func;
2416
2417         tr_config = (struct xpt_traverse_config *)arg;
2418
2419         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2420
2421         /*
2422          * Unlike the other default functions, we don't check for depth
2423          * here.  The peripheral driver level is the last level in the EDT,
2424          * so if we're here, we should execute the function in question.
2425          */
2426         return(tr_func(periph, tr_config->tr_arg));
2427 }
2428
2429 /*
2430  * Execute the given function for every bus in the EDT.
2431  */
2432 static int
2433 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2434 {
2435         struct xpt_traverse_config tr_config;
2436
2437         tr_config.depth = XPT_DEPTH_BUS;
2438         tr_config.tr_func = tr_func;
2439         tr_config.tr_arg = arg;
2440
2441         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2442 }
2443
2444 /*
2445  * Execute the given function for every device in the EDT.
2446  */
2447 static int
2448 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2449 {
2450         struct xpt_traverse_config tr_config;
2451
2452         tr_config.depth = XPT_DEPTH_DEVICE;
2453         tr_config.tr_func = tr_func;
2454         tr_config.tr_arg = arg;
2455
2456         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2457 }
2458
2459 static int
2460 xptsetasyncfunc(struct cam_ed *device, void *arg)
2461 {
2462         struct cam_path path;
2463         struct ccb_getdev cgd;
2464         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2465
2466         /*
2467          * Don't report unconfigured devices (Wildcard devs,
2468          * devices only for target mode, device instances
2469          * that have been invalidated but are waiting for
2470          * their last reference count to be released).
2471          */
2472         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2473                 return (1);
2474
2475         memset(&cgd, 0, sizeof(cgd));
2476         xpt_compile_path(&path,
2477                          NULL,
2478                          device->target->bus->path_id,
2479                          device->target->target_id,
2480                          device->lun_id);
2481         xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
2482         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2483         xpt_action((union ccb *)&cgd);
2484         csa->callback(csa->callback_arg,
2485                             AC_FOUND_DEVICE,
2486                             &path, &cgd);
2487         xpt_release_path(&path);
2488
2489         return(1);
2490 }
2491
2492 static int
2493 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2494 {
2495         struct cam_path path;
2496         struct ccb_pathinq cpi;
2497         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2498
2499         xpt_compile_path(&path, /*periph*/NULL,
2500                          bus->path_id,
2501                          CAM_TARGET_WILDCARD,
2502                          CAM_LUN_WILDCARD);
2503         xpt_path_lock(&path);
2504         xpt_path_inq(&cpi, &path);
2505         csa->callback(csa->callback_arg,
2506                             AC_PATH_REGISTERED,
2507                             &path, &cpi);
2508         xpt_path_unlock(&path);
2509         xpt_release_path(&path);
2510
2511         return(1);
2512 }
2513
2514 void
2515 xpt_action(union ccb *start_ccb)
2516 {
2517
2518         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE,
2519             ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code,
2520                 xpt_action_name(start_ccb->ccb_h.func_code)));
2521
2522         start_ccb->ccb_h.status = CAM_REQ_INPROG;
2523         (*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb);
2524 }
2525
2526 void
2527 xpt_action_default(union ccb *start_ccb)
2528 {
2529         struct cam_path *path;
2530         struct cam_sim *sim;
2531         struct mtx *mtx;
2532
2533         path = start_ccb->ccb_h.path;
2534         CAM_DEBUG(path, CAM_DEBUG_TRACE,
2535             ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code,
2536                 xpt_action_name(start_ccb->ccb_h.func_code)));
2537
2538         switch (start_ccb->ccb_h.func_code) {
2539         case XPT_SCSI_IO:
2540         {
2541                 struct cam_ed *device;
2542
2543                 /*
2544                  * For the sake of compatibility with SCSI-1
2545                  * devices that may not understand the identify
2546                  * message, we include lun information in the
2547                  * second byte of all commands.  SCSI-1 specifies
2548                  * that luns are a 3 bit value and reserves only 3
2549                  * bits for lun information in the CDB.  Later
2550                  * revisions of the SCSI spec allow for more than 8
2551                  * luns, but have deprecated lun information in the
2552                  * CDB.  So, if the lun won't fit, we must omit.
2553                  *
2554                  * Also be aware that during initial probing for devices,
2555                  * the inquiry information is unknown but initialized to 0.
2556                  * This means that this code will be exercised while probing
2557                  * devices with an ANSI revision greater than 2.
2558                  */
2559                 device = path->device;
2560                 if (device->protocol_version <= SCSI_REV_2
2561                  && start_ccb->ccb_h.target_lun < 8
2562                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2563                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
2564                             start_ccb->ccb_h.target_lun << 5;
2565                 }
2566                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2567         }
2568         /* FALLTHROUGH */
2569         case XPT_TARGET_IO:
2570         case XPT_CONT_TARGET_IO:
2571                 start_ccb->csio.sense_resid = 0;
2572                 start_ccb->csio.resid = 0;
2573                 /* FALLTHROUGH */
2574         case XPT_ATA_IO:
2575                 if (start_ccb->ccb_h.func_code == XPT_ATA_IO)
2576                         start_ccb->ataio.resid = 0;
2577                 /* FALLTHROUGH */
2578         case XPT_NVME_IO:
2579         case XPT_NVME_ADMIN:
2580         case XPT_MMC_IO:
2581         case XPT_MMC_GET_TRAN_SETTINGS:
2582         case XPT_MMC_SET_TRAN_SETTINGS:
2583         case XPT_RESET_DEV:
2584         case XPT_ENG_EXEC:
2585         case XPT_SMP_IO:
2586         {
2587                 struct cam_devq *devq;
2588
2589                 devq = path->bus->sim->devq;
2590                 mtx_lock(&devq->send_mtx);
2591                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2592                 if (xpt_schedule_devq(devq, path->device) != 0)
2593                         xpt_run_devq(devq);
2594                 mtx_unlock(&devq->send_mtx);
2595                 break;
2596         }
2597         case XPT_CALC_GEOMETRY:
2598                 /* Filter out garbage */
2599                 if (start_ccb->ccg.block_size == 0
2600                  || start_ccb->ccg.volume_size == 0) {
2601                         start_ccb->ccg.cylinders = 0;
2602                         start_ccb->ccg.heads = 0;
2603                         start_ccb->ccg.secs_per_track = 0;
2604                         start_ccb->ccb_h.status = CAM_REQ_CMP;
2605                         break;
2606                 }
2607                 goto call_sim;
2608         case XPT_ABORT:
2609         {
2610                 union ccb* abort_ccb;
2611
2612                 abort_ccb = start_ccb->cab.abort_ccb;
2613                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2614                         struct cam_ed *device;
2615                         struct cam_devq *devq;
2616
2617                         device = abort_ccb->ccb_h.path->device;
2618                         devq = device->sim->devq;
2619
2620                         mtx_lock(&devq->send_mtx);
2621                         if (abort_ccb->ccb_h.pinfo.index > 0) {
2622                                 cam_ccbq_remove_ccb(&device->ccbq, abort_ccb);
2623                                 abort_ccb->ccb_h.status =
2624                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2625                                 xpt_freeze_devq_device(device, 1);
2626                                 mtx_unlock(&devq->send_mtx);
2627                                 xpt_done(abort_ccb);
2628                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2629                                 break;
2630                         }
2631                         mtx_unlock(&devq->send_mtx);
2632
2633                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2634                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2635                                 /*
2636                                  * We've caught this ccb en route to
2637                                  * the SIM.  Flag it for abort and the
2638                                  * SIM will do so just before starting
2639                                  * real work on the CCB.
2640                                  */
2641                                 abort_ccb->ccb_h.status =
2642                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2643                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2644                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2645                                 break;
2646                         }
2647                 }
2648                 if (XPT_FC_IS_QUEUED(abort_ccb)
2649                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2650                         /*
2651                          * It's already completed but waiting
2652                          * for our SWI to get to it.
2653                          */
2654                         start_ccb->ccb_h.status = CAM_UA_ABORT;
2655                         break;
2656                 }
2657                 /*
2658                  * If we weren't able to take care of the abort request
2659                  * in the XPT, pass the request down to the SIM for processing.
2660                  */
2661         }
2662         /* FALLTHROUGH */
2663         case XPT_ACCEPT_TARGET_IO:
2664         case XPT_EN_LUN:
2665         case XPT_IMMED_NOTIFY:
2666         case XPT_NOTIFY_ACK:
2667         case XPT_RESET_BUS:
2668         case XPT_IMMEDIATE_NOTIFY:
2669         case XPT_NOTIFY_ACKNOWLEDGE:
2670         case XPT_GET_SIM_KNOB_OLD:
2671         case XPT_GET_SIM_KNOB:
2672         case XPT_SET_SIM_KNOB:
2673         case XPT_GET_TRAN_SETTINGS:
2674         case XPT_SET_TRAN_SETTINGS:
2675         case XPT_PATH_INQ:
2676 call_sim:
2677                 sim = path->bus->sim;
2678                 mtx = sim->mtx;
2679                 if (mtx && !mtx_owned(mtx))
2680                         mtx_lock(mtx);
2681                 else
2682                         mtx = NULL;
2683
2684                 CAM_DEBUG(path, CAM_DEBUG_TRACE,
2685                     ("Calling sim->sim_action(): func=%#x\n", start_ccb->ccb_h.func_code));
2686                 (*(sim->sim_action))(sim, start_ccb);
2687                 CAM_DEBUG(path, CAM_DEBUG_TRACE,
2688                     ("sim->sim_action returned: status=%#x\n", start_ccb->ccb_h.status));
2689                 if (mtx)
2690                         mtx_unlock(mtx);
2691                 break;
2692         case XPT_PATH_STATS:
2693                 start_ccb->cpis.last_reset = path->bus->last_reset;
2694                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2695                 break;
2696         case XPT_GDEV_TYPE:
2697         {
2698                 struct cam_ed *dev;
2699
2700                 dev = path->device;
2701                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2702                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2703                 } else {
2704                         struct ccb_getdev *cgd;
2705
2706                         cgd = &start_ccb->cgd;
2707                         cgd->protocol = dev->protocol;
2708                         cgd->inq_data = dev->inq_data;
2709                         cgd->ident_data = dev->ident_data;
2710                         cgd->inq_flags = dev->inq_flags;
2711                         cgd->ccb_h.status = CAM_REQ_CMP;
2712                         cgd->serial_num_len = dev->serial_num_len;
2713                         if ((dev->serial_num_len > 0)
2714                          && (dev->serial_num != NULL))
2715                                 bcopy(dev->serial_num, cgd->serial_num,
2716                                       dev->serial_num_len);
2717                 }
2718                 break;
2719         }
2720         case XPT_GDEV_STATS:
2721         {
2722                 struct ccb_getdevstats *cgds = &start_ccb->cgds;
2723                 struct cam_ed *dev = path->device;
2724                 struct cam_eb *bus = path->bus;
2725                 struct cam_et *tar = path->target;
2726                 struct cam_devq *devq = bus->sim->devq;
2727
2728                 mtx_lock(&devq->send_mtx);
2729                 cgds->dev_openings = dev->ccbq.dev_openings;
2730                 cgds->dev_active = dev->ccbq.dev_active;
2731                 cgds->allocated = dev->ccbq.allocated;
2732                 cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq);
2733                 cgds->held = cgds->allocated - cgds->dev_active - cgds->queued;
2734                 cgds->last_reset = tar->last_reset;
2735                 cgds->maxtags = dev->maxtags;
2736                 cgds->mintags = dev->mintags;
2737                 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2738                         cgds->last_reset = bus->last_reset;
2739                 mtx_unlock(&devq->send_mtx);
2740                 cgds->ccb_h.status = CAM_REQ_CMP;
2741                 break;
2742         }
2743         case XPT_GDEVLIST:
2744         {
2745                 struct cam_periph       *nperiph;
2746                 struct periph_list      *periph_head;
2747                 struct ccb_getdevlist   *cgdl;
2748                 u_int                   i;
2749                 struct cam_ed           *device;
2750                 bool                    found;
2751
2752                 found = false;
2753
2754                 /*
2755                  * Don't want anyone mucking with our data.
2756                  */
2757                 device = path->device;
2758                 periph_head = &device->periphs;
2759                 cgdl = &start_ccb->cgdl;
2760
2761                 /*
2762                  * Check and see if the list has changed since the user
2763                  * last requested a list member.  If so, tell them that the
2764                  * list has changed, and therefore they need to start over
2765                  * from the beginning.
2766                  */
2767                 if ((cgdl->index != 0) &&
2768                     (cgdl->generation != device->generation)) {
2769                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2770                         break;
2771                 }
2772
2773                 /*
2774                  * Traverse the list of peripherals and attempt to find
2775                  * the requested peripheral.
2776                  */
2777                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
2778                      (nperiph != NULL) && (i <= cgdl->index);
2779                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
2780                         if (i == cgdl->index) {
2781                                 strlcpy(cgdl->periph_name,
2782                                         nperiph->periph_name,
2783                                         sizeof(cgdl->periph_name));
2784                                 cgdl->unit_number = nperiph->unit_number;
2785                                 found = true;
2786                         }
2787                 }
2788                 if (!found) {
2789                         cgdl->status = CAM_GDEVLIST_ERROR;
2790                         break;
2791                 }
2792
2793                 if (nperiph == NULL)
2794                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
2795                 else
2796                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
2797
2798                 cgdl->index++;
2799                 cgdl->generation = device->generation;
2800
2801                 cgdl->ccb_h.status = CAM_REQ_CMP;
2802                 break;
2803         }
2804         case XPT_DEV_MATCH:
2805         {
2806                 dev_pos_type position_type;
2807                 struct ccb_dev_match *cdm;
2808
2809                 cdm = &start_ccb->cdm;
2810
2811                 /*
2812                  * There are two ways of getting at information in the EDT.
2813                  * The first way is via the primary EDT tree.  It starts
2814                  * with a list of buses, then a list of targets on a bus,
2815                  * then devices/luns on a target, and then peripherals on a
2816                  * device/lun.  The "other" way is by the peripheral driver
2817                  * lists.  The peripheral driver lists are organized by
2818                  * peripheral driver.  (obviously)  So it makes sense to
2819                  * use the peripheral driver list if the user is looking
2820                  * for something like "da1", or all "da" devices.  If the
2821                  * user is looking for something on a particular bus/target
2822                  * or lun, it's generally better to go through the EDT tree.
2823                  */
2824
2825                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
2826                         position_type = cdm->pos.position_type;
2827                 else {
2828                         u_int i;
2829
2830                         position_type = CAM_DEV_POS_NONE;
2831
2832                         for (i = 0; i < cdm->num_patterns; i++) {
2833                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
2834                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
2835                                         position_type = CAM_DEV_POS_EDT;
2836                                         break;
2837                                 }
2838                         }
2839
2840                         if (cdm->num_patterns == 0)
2841                                 position_type = CAM_DEV_POS_EDT;
2842                         else if (position_type == CAM_DEV_POS_NONE)
2843                                 position_type = CAM_DEV_POS_PDRV;
2844                 }
2845
2846                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
2847                 case CAM_DEV_POS_EDT:
2848                         xptedtmatch(cdm);
2849                         break;
2850                 case CAM_DEV_POS_PDRV:
2851                         xptperiphlistmatch(cdm);
2852                         break;
2853                 default:
2854                         cdm->status = CAM_DEV_MATCH_ERROR;
2855                         break;
2856                 }
2857
2858                 if (cdm->status == CAM_DEV_MATCH_ERROR)
2859                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2860                 else
2861                         start_ccb->ccb_h.status = CAM_REQ_CMP;
2862
2863                 break;
2864         }
2865         case XPT_SASYNC_CB:
2866         {
2867                 struct ccb_setasync *csa;
2868                 struct async_node *cur_entry;
2869                 struct async_list *async_head;
2870                 uint32_t added;
2871
2872                 csa = &start_ccb->csa;
2873                 added = csa->event_enable;
2874                 async_head = &path->device->asyncs;
2875
2876                 /*
2877                  * If there is already an entry for us, simply
2878                  * update it.
2879                  */
2880                 cur_entry = SLIST_FIRST(async_head);
2881                 while (cur_entry != NULL) {
2882                         if ((cur_entry->callback_arg == csa->callback_arg)
2883                          && (cur_entry->callback == csa->callback))
2884                                 break;
2885                         cur_entry = SLIST_NEXT(cur_entry, links);
2886                 }
2887
2888                 if (cur_entry != NULL) {
2889                         /*
2890                          * If the request has no flags set,
2891                          * remove the entry.
2892                          */
2893                         added &= ~cur_entry->event_enable;
2894                         if (csa->event_enable == 0) {
2895                                 SLIST_REMOVE(async_head, cur_entry,
2896                                              async_node, links);
2897                                 xpt_release_device(path->device);
2898                                 free(cur_entry, M_CAMXPT);
2899                         } else {
2900                                 cur_entry->event_enable = csa->event_enable;
2901                         }
2902                         csa->event_enable = added;
2903                 } else {
2904                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
2905                                            M_NOWAIT);
2906                         if (cur_entry == NULL) {
2907                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
2908                                 break;
2909                         }
2910                         cur_entry->event_enable = csa->event_enable;
2911                         cur_entry->event_lock = (path->bus->sim->mtx &&
2912                             mtx_owned(path->bus->sim->mtx)) ? 1 : 0;
2913                         cur_entry->callback_arg = csa->callback_arg;
2914                         cur_entry->callback = csa->callback;
2915                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
2916                         xpt_acquire_device(path->device);
2917                 }
2918                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2919                 break;
2920         }
2921         case XPT_REL_SIMQ:
2922         {
2923                 struct ccb_relsim *crs;
2924                 struct cam_ed *dev;
2925
2926                 crs = &start_ccb->crs;
2927                 dev = path->device;
2928                 if (dev == NULL) {
2929                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
2930                         break;
2931                 }
2932
2933                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
2934                         /* Don't ever go below one opening */
2935                         if (crs->openings > 0) {
2936                                 xpt_dev_ccbq_resize(path, crs->openings);
2937                                 if (bootverbose) {
2938                                         xpt_print(path,
2939                                             "number of openings is now %d\n",
2940                                             crs->openings);
2941                                 }
2942                         }
2943                 }
2944
2945                 mtx_lock(&dev->sim->devq->send_mtx);
2946                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
2947                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
2948                                 /*
2949                                  * Just extend the old timeout and decrement
2950                                  * the freeze count so that a single timeout
2951                                  * is sufficient for releasing the queue.
2952                                  */
2953                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2954                                 callout_stop(&dev->callout);
2955                         } else {
2956                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2957                         }
2958
2959                         callout_reset_sbt(&dev->callout,
2960                             SBT_1MS * crs->release_timeout, SBT_1MS,
2961                             xpt_release_devq_timeout, dev, 0);
2962
2963                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
2964                 }
2965
2966                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
2967                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
2968                                 /*
2969                                  * Decrement the freeze count so that a single
2970                                  * completion is still sufficient to unfreeze
2971                                  * the queue.
2972                                  */
2973                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2974                         } else {
2975                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
2976                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2977                         }
2978                 }
2979
2980                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
2981                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
2982                          || (dev->ccbq.dev_active == 0)) {
2983                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2984                         } else {
2985                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
2986                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2987                         }
2988                 }
2989                 mtx_unlock(&dev->sim->devq->send_mtx);
2990
2991                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0)
2992                         xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
2993                 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt;
2994                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2995                 break;
2996         }
2997         case XPT_DEBUG: {
2998                 struct cam_path *oldpath;
2999
3000                 /* Check that all request bits are supported. */
3001                 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
3002                         start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3003                         break;
3004                 }
3005
3006                 cam_dflags = CAM_DEBUG_NONE;
3007                 if (cam_dpath != NULL) {
3008                         oldpath = cam_dpath;
3009                         cam_dpath = NULL;
3010                         xpt_free_path(oldpath);
3011                 }
3012                 if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
3013                         if (xpt_create_path(&cam_dpath, NULL,
3014                                             start_ccb->ccb_h.path_id,
3015                                             start_ccb->ccb_h.target_id,
3016                                             start_ccb->ccb_h.target_lun) !=
3017                                             CAM_REQ_CMP) {
3018                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3019                         } else {
3020                                 cam_dflags = start_ccb->cdbg.flags;
3021                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3022                                 xpt_print(cam_dpath, "debugging flags now %x\n",
3023                                     cam_dflags);
3024                         }
3025                 } else
3026                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3027                 break;
3028         }
3029         case XPT_NOOP:
3030                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3031                         xpt_freeze_devq(path, 1);
3032                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3033                 break;
3034         case XPT_REPROBE_LUN:
3035                 xpt_async(AC_INQ_CHANGED, path, NULL);
3036                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3037                 xpt_done(start_ccb);
3038                 break;
3039         case XPT_ASYNC:
3040                 /*
3041                  * Queue the async operation so it can be run from a sleepable
3042                  * context.
3043                  */
3044                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3045                 mtx_lock(&cam_async.cam_doneq_mtx);
3046                 STAILQ_INSERT_TAIL(&cam_async.cam_doneq, &start_ccb->ccb_h, sim_links.stqe);
3047                 start_ccb->ccb_h.pinfo.index = CAM_ASYNC_INDEX;
3048                 mtx_unlock(&cam_async.cam_doneq_mtx);
3049                 wakeup(&cam_async.cam_doneq);
3050                 break;
3051         default:
3052         case XPT_SDEV_TYPE:
3053         case XPT_TERM_IO:
3054         case XPT_ENG_INQ:
3055                 /* XXX Implement */
3056                 xpt_print(start_ccb->ccb_h.path,
3057                     "%s: CCB type %#x %s not supported\n", __func__,
3058                     start_ccb->ccb_h.func_code,
3059                     xpt_action_name(start_ccb->ccb_h.func_code));
3060                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3061                 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
3062                         xpt_done(start_ccb);
3063                 }
3064                 break;
3065         }
3066         CAM_DEBUG(path, CAM_DEBUG_TRACE,
3067             ("xpt_action_default: func= %#x %s status %#x\n",
3068                 start_ccb->ccb_h.func_code,
3069                 xpt_action_name(start_ccb->ccb_h.func_code),
3070                 start_ccb->ccb_h.status));
3071 }
3072
3073 /*
3074  * Call the sim poll routine to allow the sim to complete
3075  * any inflight requests, then call camisr_runqueue to
3076  * complete any CCB that the polling completed.
3077  */
3078 void
3079 xpt_sim_poll(struct cam_sim *sim)
3080 {
3081         struct mtx *mtx;
3082
3083         KASSERT(cam_sim_pollable(sim), ("%s: non-pollable sim", __func__));
3084         mtx = sim->mtx;
3085         if (mtx)
3086                 mtx_lock(mtx);
3087         (*(sim->sim_poll))(sim);
3088         if (mtx)
3089                 mtx_unlock(mtx);
3090         camisr_runqueue();
3091 }
3092
3093 uint32_t
3094 xpt_poll_setup(union ccb *start_ccb)
3095 {
3096         uint32_t timeout;
3097         struct    cam_sim *sim;
3098         struct    cam_devq *devq;
3099         struct    cam_ed *dev;
3100
3101         timeout = start_ccb->ccb_h.timeout * 10;
3102         sim = start_ccb->ccb_h.path->bus->sim;
3103         devq = sim->devq;
3104         dev = start_ccb->ccb_h.path->device;
3105
3106         KASSERT(cam_sim_pollable(sim), ("%s: non-pollable sim", __func__));
3107
3108         /*
3109          * Steal an opening so that no other queued requests
3110          * can get it before us while we simulate interrupts.
3111          */
3112         mtx_lock(&devq->send_mtx);
3113         dev->ccbq.dev_openings--;
3114         while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) &&
3115             (--timeout > 0)) {
3116                 mtx_unlock(&devq->send_mtx);
3117                 DELAY(100);
3118                 xpt_sim_poll(sim);
3119                 mtx_lock(&devq->send_mtx);
3120         }
3121         dev->ccbq.dev_openings++;
3122         mtx_unlock(&devq->send_mtx);
3123
3124         return (timeout);
3125 }
3126
3127 void
3128 xpt_pollwait(union ccb *start_ccb, uint32_t timeout)
3129 {
3130
3131         KASSERT(cam_sim_pollable(start_ccb->ccb_h.path->bus->sim),
3132             ("%s: non-pollable sim", __func__));
3133         while (--timeout > 0) {
3134                 xpt_sim_poll(start_ccb->ccb_h.path->bus->sim);
3135                 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3136                     != CAM_REQ_INPROG)
3137                         break;
3138                 DELAY(100);
3139         }
3140
3141         if (timeout == 0) {
3142                 /*
3143                  * XXX Is it worth adding a sim_timeout entry
3144                  * point so we can attempt recovery?  If
3145                  * this is only used for dumps, I don't think
3146                  * it is.
3147                  */
3148                 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3149         }
3150 }
3151
3152 /*
3153  * Schedule a peripheral driver to receive a ccb when its
3154  * target device has space for more transactions.
3155  */
3156 void
3157 xpt_schedule(struct cam_periph *periph, uint32_t new_priority)
3158 {
3159
3160         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3161         cam_periph_assert(periph, MA_OWNED);
3162         if (new_priority < periph->scheduled_priority) {
3163                 periph->scheduled_priority = new_priority;
3164                 xpt_run_allocq(periph, 0);
3165         }
3166 }
3167
3168 /*
3169  * Schedule a device to run on a given queue.
3170  * If the device was inserted as a new entry on the queue,
3171  * return 1 meaning the device queue should be run. If we
3172  * were already queued, implying someone else has already
3173  * started the queue, return 0 so the caller doesn't attempt
3174  * to run the queue.
3175  */
3176 static int
3177 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3178                  uint32_t new_priority)
3179 {
3180         int retval;
3181         uint32_t old_priority;
3182
3183         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3184
3185         old_priority = pinfo->priority;
3186
3187         /*
3188          * Are we already queued?
3189          */
3190         if (pinfo->index != CAM_UNQUEUED_INDEX) {
3191                 /* Simply reorder based on new priority */
3192                 if (new_priority < old_priority) {
3193                         camq_change_priority(queue, pinfo->index,
3194                                              new_priority);
3195                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3196                                         ("changed priority to %d\n",
3197                                          new_priority));
3198                         retval = 1;
3199                 } else
3200                         retval = 0;
3201         } else {
3202                 /* New entry on the queue */
3203                 if (new_priority < old_priority)
3204                         pinfo->priority = new_priority;
3205
3206                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3207                                 ("Inserting onto queue\n"));
3208                 pinfo->generation = ++queue->generation;
3209                 camq_insert(queue, pinfo);
3210                 retval = 1;
3211         }
3212         return (retval);
3213 }
3214
3215 static void
3216 xpt_run_allocq_task(void *context, int pending)
3217 {
3218         struct cam_periph *periph = context;
3219
3220         cam_periph_lock(periph);
3221         periph->flags &= ~CAM_PERIPH_RUN_TASK;
3222         xpt_run_allocq(periph, 1);
3223         cam_periph_unlock(periph);
3224         cam_periph_release(periph);
3225 }
3226
3227 static void
3228 xpt_run_allocq(struct cam_periph *periph, int sleep)
3229 {
3230         struct cam_ed   *device;
3231         union ccb       *ccb;
3232         uint32_t         prio;
3233
3234         cam_periph_assert(periph, MA_OWNED);
3235         if (periph->periph_allocating)
3236                 return;
3237         cam_periph_doacquire(periph);
3238         periph->periph_allocating = 1;
3239         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph));
3240         device = periph->path->device;
3241         ccb = NULL;
3242 restart:
3243         while ((prio = min(periph->scheduled_priority,
3244             periph->immediate_priority)) != CAM_PRIORITY_NONE &&
3245             (periph->periph_allocated - (ccb != NULL ? 1 : 0) <
3246              device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) {
3247                 if (ccb == NULL &&
3248                     (ccb = xpt_get_ccb_nowait(periph)) == NULL) {
3249                         if (sleep) {
3250                                 ccb = xpt_get_ccb(periph);
3251                                 goto restart;
3252                         }
3253                         if (periph->flags & CAM_PERIPH_RUN_TASK)
3254                                 break;
3255                         cam_periph_doacquire(periph);
3256                         periph->flags |= CAM_PERIPH_RUN_TASK;
3257                         taskqueue_enqueue(xsoftc.xpt_taskq,
3258                             &periph->periph_run_task);
3259                         break;
3260                 }
3261                 xpt_setup_ccb(&ccb->ccb_h, periph->path, prio);
3262                 if (prio == periph->immediate_priority) {
3263                         periph->immediate_priority = CAM_PRIORITY_NONE;
3264                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3265                                         ("waking cam_periph_getccb()\n"));
3266                         SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h,
3267                                           periph_links.sle);
3268                         wakeup(&periph->ccb_list);
3269                 } else {
3270                         periph->scheduled_priority = CAM_PRIORITY_NONE;
3271                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3272                                         ("calling periph_start()\n"));
3273                         periph->periph_start(periph, ccb);
3274                 }
3275                 ccb = NULL;
3276         }
3277         if (ccb != NULL)
3278                 xpt_release_ccb(ccb);
3279         periph->periph_allocating = 0;
3280         cam_periph_release_locked(periph);
3281 }
3282
3283 static void
3284 xpt_run_devq(struct cam_devq *devq)
3285 {
3286         struct mtx *mtx;
3287
3288         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n"));
3289
3290         devq->send_queue.qfrozen_cnt++;
3291         while ((devq->send_queue.entries > 0)
3292             && (devq->send_openings > 0)
3293             && (devq->send_queue.qfrozen_cnt <= 1)) {
3294                 struct  cam_ed *device;
3295                 union ccb *work_ccb;
3296                 struct  cam_sim *sim;
3297                 struct xpt_proto *proto;
3298
3299                 device = (struct cam_ed *)camq_remove(&devq->send_queue,
3300                                                            CAMQ_HEAD);
3301                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3302                                 ("running device %p\n", device));
3303
3304                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3305                 if (work_ccb == NULL) {
3306                         printf("device on run queue with no ccbs???\n");
3307                         continue;
3308                 }
3309
3310                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3311                         mtx_lock(&xsoftc.xpt_highpower_lock);
3312                         if (xsoftc.num_highpower <= 0) {
3313                                 /*
3314                                  * We got a high power command, but we
3315                                  * don't have any available slots.  Freeze
3316                                  * the device queue until we have a slot
3317                                  * available.
3318                                  */
3319                                 xpt_freeze_devq_device(device, 1);
3320                                 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device,
3321                                                    highpowerq_entry);
3322
3323                                 mtx_unlock(&xsoftc.xpt_highpower_lock);
3324                                 continue;
3325                         } else {
3326                                 /*
3327                                  * Consume a high power slot while
3328                                  * this ccb runs.
3329                                  */
3330                                 xsoftc.num_highpower--;
3331                         }
3332                         mtx_unlock(&xsoftc.xpt_highpower_lock);
3333                 }
3334                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3335                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3336                 devq->send_openings--;
3337                 devq->send_active++;
3338                 xpt_schedule_devq(devq, device);
3339                 mtx_unlock(&devq->send_mtx);
3340
3341                 if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) {
3342                         /*
3343                          * The client wants to freeze the queue
3344                          * after this CCB is sent.
3345                          */
3346                         xpt_freeze_devq(work_ccb->ccb_h.path, 1);
3347                 }
3348
3349                 /* In Target mode, the peripheral driver knows best... */
3350                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3351                         if ((device->inq_flags & SID_CmdQue) != 0
3352                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3353                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3354                         else
3355                                 /*
3356                                  * Clear this in case of a retried CCB that
3357                                  * failed due to a rejected tag.
3358                                  */
3359                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3360                 }
3361
3362                 KASSERT(device == work_ccb->ccb_h.path->device,
3363                     ("device (%p) / path->device (%p) mismatch",
3364                         device, work_ccb->ccb_h.path->device));
3365                 proto = xpt_proto_find(device->protocol);
3366                 if (proto && proto->ops->debug_out)
3367                         proto->ops->debug_out(work_ccb);
3368
3369                 /*
3370                  * Device queues can be shared among multiple SIM instances
3371                  * that reside on different buses.  Use the SIM from the
3372                  * queued device, rather than the one from the calling bus.
3373                  */
3374                 sim = device->sim;
3375                 mtx = sim->mtx;
3376                 if (mtx && !mtx_owned(mtx))
3377                         mtx_lock(mtx);
3378                 else
3379                         mtx = NULL;
3380                 work_ccb->ccb_h.qos.periph_data = cam_iosched_now();
3381                 (*(sim->sim_action))(sim, work_ccb);
3382                 if (mtx)
3383                         mtx_unlock(mtx);
3384                 mtx_lock(&devq->send_mtx);
3385         }
3386         devq->send_queue.qfrozen_cnt--;
3387 }
3388
3389 /*
3390  * This function merges stuff from the src ccb into the dst ccb, while keeping
3391  * important fields in the dst ccb constant.
3392  */
3393 void
3394 xpt_merge_ccb(union ccb *dst_ccb, union ccb *src_ccb)
3395 {
3396
3397         /*
3398          * Pull fields that are valid for peripheral drivers to set
3399          * into the dst CCB along with the CCB "payload".
3400          */
3401         dst_ccb->ccb_h.retry_count = src_ccb->ccb_h.retry_count;
3402         dst_ccb->ccb_h.func_code = src_ccb->ccb_h.func_code;
3403         dst_ccb->ccb_h.timeout = src_ccb->ccb_h.timeout;
3404         dst_ccb->ccb_h.flags = src_ccb->ccb_h.flags;
3405         bcopy(&(&src_ccb->ccb_h)[1], &(&dst_ccb->ccb_h)[1],
3406               sizeof(union ccb) - sizeof(struct ccb_hdr));
3407 }
3408
3409 void
3410 xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path,
3411                     uint32_t priority, uint32_t flags)
3412 {
3413
3414         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3415         ccb_h->pinfo.priority = priority;
3416         ccb_h->path = path;
3417         ccb_h->path_id = path->bus->path_id;
3418         if (path->target)
3419                 ccb_h->target_id = path->target->target_id;
3420         else
3421                 ccb_h->target_id = CAM_TARGET_WILDCARD;
3422         if (path->device) {
3423                 ccb_h->target_lun = path->device->lun_id;
3424                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3425         } else {
3426                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3427         }
3428         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3429         ccb_h->flags = flags;
3430         ccb_h->xflags = 0;
3431 }
3432
3433 void
3434 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, uint32_t priority)
3435 {
3436         xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0);
3437 }
3438
3439 /* Path manipulation functions */
3440 cam_status
3441 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3442                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3443 {
3444         struct     cam_path *path;
3445         cam_status status;
3446
3447         path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3448
3449         if (path == NULL) {
3450                 status = CAM_RESRC_UNAVAIL;
3451                 return(status);
3452         }
3453         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3454         if (status != CAM_REQ_CMP) {
3455                 free(path, M_CAMPATH);
3456                 path = NULL;
3457         }
3458         *new_path_ptr = path;
3459         return (status);
3460 }
3461
3462 cam_status
3463 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3464                          struct cam_periph *periph, path_id_t path_id,
3465                          target_id_t target_id, lun_id_t lun_id)
3466 {
3467
3468         return (xpt_create_path(new_path_ptr, periph, path_id, target_id,
3469             lun_id));
3470 }
3471
3472 cam_status
3473 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3474                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3475 {
3476         struct       cam_eb *bus;
3477         struct       cam_et *target;
3478         struct       cam_ed *device;
3479         cam_status   status;
3480
3481         status = CAM_REQ_CMP;   /* Completed without error */
3482         target = NULL;          /* Wildcarded */
3483         device = NULL;          /* Wildcarded */
3484
3485         /*
3486          * We will potentially modify the EDT, so block interrupts
3487          * that may attempt to create cam paths.
3488          */
3489         bus = xpt_find_bus(path_id);
3490         if (bus == NULL) {
3491                 status = CAM_PATH_INVALID;
3492         } else {
3493                 xpt_lock_buses();
3494                 mtx_lock(&bus->eb_mtx);
3495                 target = xpt_find_target(bus, target_id);
3496                 if (target == NULL) {
3497                         /* Create one */
3498                         struct cam_et *new_target;
3499
3500                         new_target = xpt_alloc_target(bus, target_id);
3501                         if (new_target == NULL) {
3502                                 status = CAM_RESRC_UNAVAIL;
3503                         } else {
3504                                 target = new_target;
3505                         }
3506                 }
3507                 xpt_unlock_buses();
3508                 if (target != NULL) {
3509                         device = xpt_find_device(target, lun_id);
3510                         if (device == NULL) {
3511                                 /* Create one */
3512                                 struct cam_ed *new_device;
3513
3514                                 new_device =
3515                                     (*(bus->xport->ops->alloc_device))(bus,
3516                                                                        target,
3517                                                                        lun_id);
3518                                 if (new_device == NULL) {
3519                                         status = CAM_RESRC_UNAVAIL;
3520                                 } else {
3521                                         device = new_device;
3522                                 }
3523                         }
3524                 }
3525                 mtx_unlock(&bus->eb_mtx);
3526         }
3527
3528         /*
3529          * Only touch the user's data if we are successful.
3530          */
3531         if (status == CAM_REQ_CMP) {
3532                 new_path->periph = perph;
3533                 new_path->bus = bus;
3534                 new_path->target = target;
3535                 new_path->device = device;
3536                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3537         } else {
3538                 if (device != NULL)
3539                         xpt_release_device(device);
3540                 if (target != NULL)
3541                         xpt_release_target(target);
3542                 if (bus != NULL)
3543                         xpt_release_bus(bus);
3544         }
3545         return (status);
3546 }
3547
3548 int
3549 xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path)
3550 {
3551         struct     cam_path *new_path;
3552
3553         new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3554         if (new_path == NULL)
3555                 return (ENOMEM);
3556         *new_path = *path;
3557         if (path->bus != NULL)
3558                 xpt_acquire_bus(path->bus);
3559         if (path->target != NULL)
3560                 xpt_acquire_target(path->target);
3561         if (path->device != NULL)
3562                 xpt_acquire_device(path->device);
3563         *new_path_ptr = new_path;
3564         return (0);
3565 }
3566
3567 void
3568 xpt_release_path(struct cam_path *path)
3569 {
3570         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3571         if (path->device != NULL) {
3572                 xpt_release_device(path->device);
3573                 path->device = NULL;
3574         }
3575         if (path->target != NULL) {
3576                 xpt_release_target(path->target);
3577                 path->target = NULL;
3578         }
3579         if (path->bus != NULL) {
3580                 xpt_release_bus(path->bus);
3581                 path->bus = NULL;
3582         }
3583 }
3584
3585 void
3586 xpt_free_path(struct cam_path *path)
3587 {
3588
3589         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3590         xpt_release_path(path);
3591         free(path, M_CAMPATH);
3592 }
3593
3594 void
3595 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
3596     uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
3597 {
3598
3599         xpt_lock_buses();
3600         if (bus_ref) {
3601                 if (path->bus)
3602                         *bus_ref = path->bus->refcount;
3603                 else
3604                         *bus_ref = 0;
3605         }
3606         if (periph_ref) {
3607                 if (path->periph)
3608                         *periph_ref = path->periph->refcount;
3609                 else
3610                         *periph_ref = 0;
3611         }
3612         xpt_unlock_buses();
3613         if (target_ref) {
3614                 if (path->target)
3615                         *target_ref = path->target->refcount;
3616                 else
3617                         *target_ref = 0;
3618         }
3619         if (device_ref) {
3620                 if (path->device)
3621                         *device_ref = path->device->refcount;
3622                 else
3623                         *device_ref = 0;
3624         }
3625 }
3626
3627 /*
3628  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3629  * in path1, 2 for match with wildcards in path2.
3630  */
3631 int
3632 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3633 {
3634         int retval = 0;
3635
3636         if (path1->bus != path2->bus) {
3637                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
3638                         retval = 1;
3639                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3640                         retval = 2;
3641                 else
3642                         return (-1);
3643         }
3644         if (path1->target != path2->target) {
3645                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3646                         if (retval == 0)
3647                                 retval = 1;
3648                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3649                         retval = 2;
3650                 else
3651                         return (-1);
3652         }
3653         if (path1->device != path2->device) {
3654                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3655                         if (retval == 0)
3656                                 retval = 1;
3657                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3658                         retval = 2;
3659                 else
3660                         return (-1);
3661         }
3662         return (retval);
3663 }
3664
3665 int
3666 xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev)
3667 {
3668         int retval = 0;
3669
3670         if (path->bus != dev->target->bus) {
3671                 if (path->bus->path_id == CAM_BUS_WILDCARD)
3672                         retval = 1;
3673                 else if (dev->target->bus->path_id == CAM_BUS_WILDCARD)
3674                         retval = 2;
3675                 else
3676                         return (-1);
3677         }
3678         if (path->target != dev->target) {
3679                 if (path->target->target_id == CAM_TARGET_WILDCARD) {
3680                         if (retval == 0)
3681                                 retval = 1;
3682                 } else if (dev->target->target_id == CAM_TARGET_WILDCARD)
3683                         retval = 2;
3684                 else
3685                         return (-1);
3686         }
3687         if (path->device != dev) {
3688                 if (path->device->lun_id == CAM_LUN_WILDCARD) {
3689                         if (retval == 0)
3690                                 retval = 1;
3691                 } else if (dev->lun_id == CAM_LUN_WILDCARD)
3692                         retval = 2;
3693                 else
3694                         return (-1);
3695         }
3696         return (retval);
3697 }
3698
3699 void
3700 xpt_print_path(struct cam_path *path)
3701 {
3702         struct sbuf sb;
3703         char buffer[XPT_PRINT_LEN];
3704
3705         sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN);
3706         xpt_path_sbuf(path, &sb);
3707         sbuf_finish(&sb);
3708         printf("%s", sbuf_data(&sb));
3709         sbuf_delete(&sb);
3710 }
3711
3712 static void
3713 xpt_device_sbuf(struct cam_ed *device, struct sbuf *sb)
3714 {
3715         if (device == NULL)
3716                 sbuf_printf(sb, "(nopath): ");
3717         else {
3718                 sbuf_printf(sb, "(noperiph:%s%d:%d:%d:%jx): ",
3719                     device->sim->sim_name,
3720                     device->sim->unit_number,
3721                     device->sim->bus_id,
3722                     device->target->target_id,
3723                     (uintmax_t)device->lun_id);
3724         }
3725 }
3726
3727 void
3728 xpt_print(struct cam_path *path, const char *fmt, ...)
3729 {
3730         va_list ap;
3731         struct sbuf sb;
3732         char buffer[XPT_PRINT_LEN];
3733
3734         sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN);
3735
3736         xpt_path_sbuf(path, &sb);
3737         va_start(ap, fmt);
3738         sbuf_vprintf(&sb, fmt, ap);
3739         va_end(ap);
3740
3741         sbuf_finish(&sb);
3742         printf("%s", sbuf_data(&sb));
3743         sbuf_delete(&sb);
3744 }
3745
3746 int
3747 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
3748 {
3749         struct sbuf sb;
3750         int len;
3751
3752         sbuf_new(&sb, str, str_len, 0);
3753         len = xpt_path_sbuf(path, &sb);
3754         sbuf_finish(&sb);
3755         return (len);
3756 }
3757
3758 int
3759 xpt_path_sbuf(struct cam_path *path, struct sbuf *sb)
3760 {
3761
3762         if (path == NULL)
3763                 sbuf_printf(sb, "(nopath): ");
3764         else {
3765                 if (path->periph != NULL)
3766                         sbuf_printf(sb, "(%s%d:", path->periph->periph_name,
3767                                     path->periph->unit_number);
3768                 else
3769                         sbuf_printf(sb, "(noperiph:");
3770
3771                 if (path->bus != NULL)
3772                         sbuf_printf(sb, "%s%d:%d:", path->bus->sim->sim_name,
3773                                     path->bus->sim->unit_number,
3774                                     path->bus->sim->bus_id);
3775                 else
3776                         sbuf_printf(sb, "nobus:");
3777
3778                 if (path->target != NULL)
3779                         sbuf_printf(sb, "%d:", path->target->target_id);
3780                 else
3781                         sbuf_printf(sb, "X:");
3782
3783                 if (path->device != NULL)
3784                         sbuf_printf(sb, "%jx): ",
3785                             (uintmax_t)path->device->lun_id);
3786                 else
3787                         sbuf_printf(sb, "X): ");
3788         }
3789
3790         return(sbuf_len(sb));
3791 }
3792
3793 path_id_t
3794 xpt_path_path_id(struct cam_path *path)
3795 {
3796         return(path->bus->path_id);
3797 }
3798
3799 target_id_t
3800 xpt_path_target_id(struct cam_path *path)
3801 {
3802         if (path->target != NULL)
3803                 return (path->target->target_id);
3804         else
3805                 return (CAM_TARGET_WILDCARD);
3806 }
3807
3808 lun_id_t
3809 xpt_path_lun_id(struct cam_path *path)
3810 {
3811         if (path->device != NULL)
3812                 return (path->device->lun_id);
3813         else
3814                 return (CAM_LUN_WILDCARD);
3815 }
3816
3817 struct cam_sim *
3818 xpt_path_sim(struct cam_path *path)
3819 {
3820
3821         return (path->bus->sim);
3822 }
3823
3824 struct cam_periph*
3825 xpt_path_periph(struct cam_path *path)
3826 {
3827
3828         return (path->periph);
3829 }
3830
3831 /*
3832  * Release a CAM control block for the caller.  Remit the cost of the structure
3833  * to the device referenced by the path.  If the this device had no 'credits'
3834  * and peripheral drivers have registered async callbacks for this notification
3835  * call them now.
3836  */
3837 void
3838 xpt_release_ccb(union ccb *free_ccb)
3839 {
3840         struct   cam_ed *device;
3841         struct   cam_periph *periph;
3842
3843         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3844         xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED);
3845         device = free_ccb->ccb_h.path->device;
3846         periph = free_ccb->ccb_h.path->periph;
3847
3848         xpt_free_ccb(free_ccb);
3849         periph->periph_allocated--;
3850         cam_ccbq_release_opening(&device->ccbq);
3851         xpt_run_allocq(periph, 0);
3852 }
3853
3854 /* Functions accessed by SIM drivers */
3855
3856 static struct xpt_xport_ops xport_default_ops = {
3857         .alloc_device = xpt_alloc_device_default,
3858         .action = xpt_action_default,
3859         .async = xpt_dev_async_default,
3860 };
3861 static struct xpt_xport xport_default = {
3862         .xport = XPORT_UNKNOWN,
3863         .name = "unknown",
3864         .ops = &xport_default_ops,
3865 };
3866
3867 CAM_XPT_XPORT(xport_default);
3868
3869 /*
3870  * A sim structure, listing the SIM entry points and instance
3871  * identification info is passed to xpt_bus_register to hook the SIM
3872  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
3873  * for this new bus and places it in the array of buses and assigns
3874  * it a path_id.  The path_id may be influenced by "hard wiring"
3875  * information specified by the user.  Once interrupt services are
3876  * available, the bus will be probed.
3877  */
3878 int
3879 xpt_bus_register(struct cam_sim *sim, device_t parent, uint32_t bus)
3880 {
3881         struct cam_eb *new_bus;
3882         struct cam_eb *old_bus;
3883         struct ccb_pathinq cpi;
3884         struct cam_path *path;
3885         cam_status status;
3886
3887         sim->bus_id = bus;
3888         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
3889                                           M_CAMXPT, M_NOWAIT|M_ZERO);
3890         if (new_bus == NULL) {
3891                 /* Couldn't satisfy request */
3892                 return (ENOMEM);
3893         }
3894
3895         mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF);
3896         TAILQ_INIT(&new_bus->et_entries);
3897         cam_sim_hold(sim);
3898         new_bus->sim = sim;
3899         timevalclear(&new_bus->last_reset);
3900         new_bus->flags = 0;
3901         new_bus->refcount = 1;  /* Held until a bus_deregister event */
3902         new_bus->generation = 0;
3903         new_bus->parent_dev = parent;
3904
3905         xpt_lock_buses();
3906         sim->path_id = new_bus->path_id =
3907             xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
3908         old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3909         while (old_bus != NULL
3910             && old_bus->path_id < new_bus->path_id)
3911                 old_bus = TAILQ_NEXT(old_bus, links);
3912         if (old_bus != NULL)
3913                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
3914         else
3915                 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
3916         xsoftc.bus_generation++;
3917         xpt_unlock_buses();
3918
3919         /*
3920          * Set a default transport so that a PATH_INQ can be issued to
3921          * the SIM.  This will then allow for probing and attaching of
3922          * a more appropriate transport.
3923          */
3924         new_bus->xport = &xport_default;
3925
3926         status = xpt_create_path(&path, /*periph*/NULL, sim->path_id,
3927                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3928         if (status != CAM_REQ_CMP) {
3929                 xpt_release_bus(new_bus);
3930                 return (ENOMEM);
3931         }
3932
3933         xpt_path_inq(&cpi, path);
3934
3935         /*
3936          * Use the results of PATH_INQ to pick a transport.  Note that
3937          * the xpt bus (which uses XPORT_UNSPECIFIED) always uses
3938          * xport_default instead of a transport from
3939          * cam_xpt_port_set.
3940          */
3941         if (cam_ccb_success((union ccb *)&cpi) &&
3942             cpi.transport != XPORT_UNSPECIFIED) {
3943                 struct xpt_xport **xpt;
3944
3945                 SET_FOREACH(xpt, cam_xpt_xport_set) {
3946                         if ((*xpt)->xport == cpi.transport) {
3947                                 new_bus->xport = *xpt;
3948                                 break;
3949                         }
3950                 }
3951                 if (new_bus->xport == &xport_default) {
3952                         xpt_print(path,
3953                             "No transport found for %d\n", cpi.transport);
3954                         xpt_release_bus(new_bus);
3955                         xpt_free_path(path);
3956                         return (EINVAL);
3957                 }
3958         }
3959
3960         /* Notify interested parties */
3961         if (sim->path_id != CAM_XPT_PATH_ID) {
3962                 xpt_async(AC_PATH_REGISTERED, path, &cpi);
3963                 if ((cpi.hba_misc & PIM_NOSCAN) == 0) {
3964                         union   ccb *scan_ccb;
3965
3966                         /* Initiate bus rescan. */
3967                         scan_ccb = xpt_alloc_ccb_nowait();
3968                         if (scan_ccb != NULL) {
3969                                 scan_ccb->ccb_h.path = path;
3970                                 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
3971                                 scan_ccb->crcn.flags = 0;
3972                                 xpt_rescan(scan_ccb);
3973                         } else {
3974                                 xpt_print(path,
3975                                           "Can't allocate CCB to scan bus\n");
3976                                 xpt_free_path(path);
3977                         }
3978                 } else
3979                         xpt_free_path(path);
3980         } else
3981                 xpt_free_path(path);
3982         return (CAM_SUCCESS);
3983 }
3984
3985 int
3986 xpt_bus_deregister(path_id_t pathid)
3987 {
3988         struct cam_path bus_path;
3989         cam_status status;
3990
3991         status = xpt_compile_path(&bus_path, NULL, pathid,
3992                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3993         if (status != CAM_REQ_CMP)
3994                 return (ENOMEM);
3995
3996         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
3997         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
3998
3999         /* Release the reference count held while registered. */
4000         xpt_release_bus(bus_path.bus);
4001         xpt_release_path(&bus_path);
4002
4003         return (CAM_SUCCESS);
4004 }
4005
4006 static path_id_t
4007 xptnextfreepathid(void)
4008 {
4009         struct cam_eb *bus;
4010         path_id_t pathid;
4011         const char *strval;
4012
4013         mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4014         pathid = 0;
4015         bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4016 retry:
4017         /* Find an unoccupied pathid */
4018         while (bus != NULL && bus->path_id <= pathid) {
4019                 if (bus->path_id == pathid)
4020                         pathid++;
4021                 bus = TAILQ_NEXT(bus, links);
4022         }
4023
4024         /*
4025          * Ensure that this pathid is not reserved for
4026          * a bus that may be registered in the future.
4027          */
4028         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4029                 ++pathid;
4030                 /* Start the search over */
4031                 goto retry;
4032         }
4033         return (pathid);
4034 }
4035
4036 static path_id_t
4037 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4038 {
4039         path_id_t pathid;
4040         int i, dunit, val;
4041         char buf[32];
4042         const char *dname;
4043
4044         pathid = CAM_XPT_PATH_ID;
4045         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4046         if (strcmp(buf, "xpt0") == 0 && sim_bus == 0)
4047                 return (pathid);
4048         i = 0;
4049         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4050                 if (strcmp(dname, "scbus")) {
4051                         /* Avoid a bit of foot shooting. */
4052                         continue;
4053                 }
4054                 if (dunit < 0)          /* unwired?! */
4055                         continue;
4056                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4057                         if (sim_bus == val) {
4058                                 pathid = dunit;
4059                                 break;
4060                         }
4061                 } else if (sim_bus == 0) {
4062                         /* Unspecified matches bus 0 */
4063                         pathid = dunit;
4064                         break;
4065                 } else {
4066                         printf("Ambiguous scbus configuration for %s%d "
4067                                "bus %d, cannot wire down.  The kernel "
4068                                "config entry for scbus%d should "
4069                                "specify a controller bus.\n"
4070                                "Scbus will be assigned dynamically.\n",
4071                                sim_name, sim_unit, sim_bus, dunit);
4072                         break;
4073                 }
4074         }
4075
4076         if (pathid == CAM_XPT_PATH_ID)
4077                 pathid = xptnextfreepathid();
4078         return (pathid);
4079 }
4080
4081 static const char *
4082 xpt_async_string(uint32_t async_code)
4083 {
4084
4085         switch (async_code) {
4086         case AC_BUS_RESET: return ("AC_BUS_RESET");
4087         case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
4088         case AC_SCSI_AEN: return ("AC_SCSI_AEN");
4089         case AC_SENT_BDR: return ("AC_SENT_BDR");
4090         case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
4091         case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
4092         case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
4093         case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
4094         case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
4095         case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
4096         case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
4097         case AC_CONTRACT: return ("AC_CONTRACT");
4098         case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED");
4099         case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION");
4100         }
4101         return ("AC_UNKNOWN");
4102 }
4103
4104 static int
4105 xpt_async_size(uint32_t async_code)
4106 {
4107
4108         switch (async_code) {
4109         case AC_BUS_RESET: return (0);
4110         case AC_UNSOL_RESEL: return (0);
4111         case AC_SCSI_AEN: return (0);
4112         case AC_SENT_BDR: return (0);
4113         case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq));
4114         case AC_PATH_DEREGISTERED: return (0);
4115         case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev));
4116         case AC_LOST_DEVICE: return (0);
4117         case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings));
4118         case AC_INQ_CHANGED: return (0);
4119         case AC_GETDEV_CHANGED: return (0);
4120         case AC_CONTRACT: return (sizeof(struct ac_contract));
4121         case AC_ADVINFO_CHANGED: return (-1);
4122         case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio));
4123         }
4124         return (0);
4125 }
4126
4127 static int
4128 xpt_async_process_dev(struct cam_ed *device, void *arg)
4129 {
4130         union ccb *ccb = arg;
4131         struct cam_path *path = ccb->ccb_h.path;
4132         void *async_arg = ccb->casync.async_arg_ptr;
4133         uint32_t async_code = ccb->casync.async_code;
4134         bool relock;
4135
4136         if (path->device != device
4137          && path->device->lun_id != CAM_LUN_WILDCARD
4138          && device->lun_id != CAM_LUN_WILDCARD)
4139                 return (1);
4140
4141         /*
4142          * The async callback could free the device.
4143          * If it is a broadcast async, it doesn't hold
4144          * device reference, so take our own reference.
4145          */
4146         xpt_acquire_device(device);
4147
4148         /*
4149          * If async for specific device is to be delivered to
4150          * the wildcard client, take the specific device lock.
4151          * XXX: We may need a way for client to specify it.
4152          */
4153         if ((device->lun_id == CAM_LUN_WILDCARD &&
4154              path->device->lun_id != CAM_LUN_WILDCARD) ||
4155             (device->target->target_id == CAM_TARGET_WILDCARD &&
4156              path->target->target_id != CAM_TARGET_WILDCARD) ||
4157             (device->target->bus->path_id == CAM_BUS_WILDCARD &&
4158              path->target->bus->path_id != CAM_BUS_WILDCARD)) {
4159                 mtx_unlock(&device->device_mtx);
4160                 xpt_path_lock(path);
4161                 relock = true;
4162         } else
4163                 relock = false;
4164
4165         (*(device->target->bus->xport->ops->async))(async_code,
4166             device->target->bus, device->target, device, async_arg);
4167         xpt_async_bcast(&device->asyncs, async_code, path, async_arg);
4168
4169         if (relock) {
4170                 xpt_path_unlock(path);
4171                 mtx_lock(&device->device_mtx);
4172         }
4173         xpt_release_device(device);
4174         return (1);
4175 }
4176
4177 static int
4178 xpt_async_process_tgt(struct cam_et *target, void *arg)
4179 {
4180         union ccb *ccb = arg;
4181         struct cam_path *path = ccb->ccb_h.path;
4182
4183         if (path->target != target
4184          && path->target->target_id != CAM_TARGET_WILDCARD
4185          && target->target_id != CAM_TARGET_WILDCARD)
4186                 return (1);
4187
4188         if (ccb->casync.async_code == AC_SENT_BDR) {
4189                 /* Update our notion of when the last reset occurred */
4190                 microtime(&target->last_reset);
4191         }
4192
4193         return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb));
4194 }
4195
4196 static void
4197 xpt_async_process(struct cam_periph *periph, union ccb *ccb)
4198 {
4199         struct cam_eb *bus;
4200         struct cam_path *path;
4201         void *async_arg;
4202         uint32_t async_code;
4203
4204         path = ccb->ccb_h.path;
4205         async_code = ccb->casync.async_code;
4206         async_arg = ccb->casync.async_arg_ptr;
4207         CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
4208             ("xpt_async(%s)\n", xpt_async_string(async_code)));
4209         bus = path->bus;
4210
4211         if (async_code == AC_BUS_RESET) {
4212                 /* Update our notion of when the last reset occurred */
4213                 microtime(&bus->last_reset);
4214         }
4215
4216         xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb);
4217
4218         /*
4219          * If this wasn't a fully wildcarded async, tell all
4220          * clients that want all async events.
4221          */
4222         if (bus != xpt_periph->path->bus) {
4223                 xpt_path_lock(xpt_periph->path);
4224                 xpt_async_process_dev(xpt_periph->path->device, ccb);
4225                 xpt_path_unlock(xpt_periph->path);
4226         }
4227
4228         if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4229                 xpt_release_devq(path, 1, TRUE);
4230         else
4231                 xpt_release_simq(path->bus->sim, TRUE);
4232         if (ccb->casync.async_arg_size > 0)
4233                 free(async_arg, M_CAMXPT);
4234         xpt_free_path(path);
4235         xpt_free_ccb(ccb);
4236 }
4237
4238 static void
4239 xpt_async_bcast(struct async_list *async_head,
4240                 uint32_t async_code,
4241                 struct cam_path *path, void *async_arg)
4242 {
4243         struct async_node *cur_entry;
4244         struct mtx *mtx;
4245
4246         cur_entry = SLIST_FIRST(async_head);
4247         while (cur_entry != NULL) {
4248                 struct async_node *next_entry;
4249                 /*
4250                  * Grab the next list entry before we call the current
4251                  * entry's callback.  This is because the callback function
4252                  * can delete its async callback entry.
4253                  */
4254                 next_entry = SLIST_NEXT(cur_entry, links);
4255                 if ((cur_entry->event_enable & async_code) != 0) {
4256                         mtx = cur_entry->event_lock ?
4257                             path->device->sim->mtx : NULL;
4258                         if (mtx)
4259                                 mtx_lock(mtx);
4260                         cur_entry->callback(cur_entry->callback_arg,
4261                                             async_code, path,
4262                                             async_arg);
4263                         if (mtx)
4264                                 mtx_unlock(mtx);
4265                 }
4266                 cur_entry = next_entry;
4267         }
4268 }
4269
4270 void
4271 xpt_async(uint32_t async_code, struct cam_path *path, void *async_arg)
4272 {
4273         union ccb *ccb;
4274         int size;
4275
4276         ccb = xpt_alloc_ccb_nowait();
4277         if (ccb == NULL) {
4278                 xpt_print(path, "Can't allocate CCB to send %s\n",
4279                     xpt_async_string(async_code));
4280                 return;
4281         }
4282
4283         if (xpt_clone_path(&ccb->ccb_h.path, path) != 0) {
4284                 xpt_print(path, "Can't allocate path to send %s\n",
4285                     xpt_async_string(async_code));
4286                 xpt_free_ccb(ccb);
4287                 return;
4288         }
4289         ccb->ccb_h.path->periph = NULL;
4290         ccb->ccb_h.func_code = XPT_ASYNC;
4291         ccb->ccb_h.cbfcnp = xpt_async_process;
4292         ccb->ccb_h.flags |= CAM_UNLOCKED;
4293         ccb->casync.async_code = async_code;
4294         ccb->casync.async_arg_size = 0;
4295         size = xpt_async_size(async_code);
4296         CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
4297             ("xpt_async: func %#x %s aync_code %d %s\n",
4298                 ccb->ccb_h.func_code,
4299                 xpt_action_name(ccb->ccb_h.func_code),
4300                 async_code,
4301                 xpt_async_string(async_code)));
4302         if (size > 0 && async_arg != NULL) {
4303                 ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT);
4304                 if (ccb->casync.async_arg_ptr == NULL) {
4305                         xpt_print(path, "Can't allocate argument to send %s\n",
4306                             xpt_async_string(async_code));
4307                         xpt_free_path(ccb->ccb_h.path);
4308                         xpt_free_ccb(ccb);
4309                         return;
4310                 }
4311                 memcpy(ccb->casync.async_arg_ptr, async_arg, size);
4312                 ccb->casync.async_arg_size = size;
4313         } else if (size < 0) {
4314                 ccb->casync.async_arg_ptr = async_arg;
4315                 ccb->casync.async_arg_size = size;
4316         }
4317         if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4318                 xpt_freeze_devq(path, 1);
4319         else
4320                 xpt_freeze_simq(path->bus->sim, 1);
4321         xpt_action(ccb);
4322 }
4323
4324 static void
4325 xpt_dev_async_default(uint32_t async_code, struct cam_eb *bus,
4326                       struct cam_et *target, struct cam_ed *device,
4327                       void *async_arg)
4328 {
4329
4330         /*
4331          * We only need to handle events for real devices.
4332          */
4333         if (target->target_id == CAM_TARGET_WILDCARD
4334          || device->lun_id == CAM_LUN_WILDCARD)
4335                 return;
4336
4337         printf("%s called\n", __func__);
4338 }
4339
4340 static uint32_t
4341 xpt_freeze_devq_device(struct cam_ed *dev, u_int count)
4342 {
4343         struct cam_devq *devq;
4344         uint32_t freeze;
4345
4346         devq = dev->sim->devq;
4347         mtx_assert(&devq->send_mtx, MA_OWNED);
4348         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4349             ("xpt_freeze_devq_device(%d) %u->%u\n", count,
4350             dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count));
4351         freeze = (dev->ccbq.queue.qfrozen_cnt += count);
4352         /* Remove frozen device from sendq. */
4353         if (device_is_queued(dev))
4354                 camq_remove(&devq->send_queue, dev->devq_entry.index);
4355         return (freeze);
4356 }
4357
4358 uint32_t
4359 xpt_freeze_devq(struct cam_path *path, u_int count)
4360 {
4361         struct cam_ed   *dev = path->device;
4362         struct cam_devq *devq;
4363         uint32_t         freeze;
4364
4365         devq = dev->sim->devq;
4366         mtx_lock(&devq->send_mtx);
4367         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count));
4368         freeze = xpt_freeze_devq_device(dev, count);
4369         mtx_unlock(&devq->send_mtx);
4370         return (freeze);
4371 }
4372
4373 uint32_t
4374 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4375 {
4376         struct cam_devq *devq;
4377         uint32_t         freeze;
4378
4379         devq = sim->devq;
4380         mtx_lock(&devq->send_mtx);
4381         freeze = (devq->send_queue.qfrozen_cnt += count);
4382         mtx_unlock(&devq->send_mtx);
4383         return (freeze);
4384 }
4385
4386 static void
4387 xpt_release_devq_timeout(void *arg)
4388 {
4389         struct cam_ed *dev;
4390         struct cam_devq *devq;
4391
4392         dev = (struct cam_ed *)arg;
4393         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n"));
4394         devq = dev->sim->devq;
4395         mtx_assert(&devq->send_mtx, MA_OWNED);
4396         if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE))
4397                 xpt_run_devq(devq);
4398 }
4399
4400 void
4401 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4402 {
4403         struct cam_ed *dev;
4404         struct cam_devq *devq;
4405
4406         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n",
4407             count, run_queue));
4408         dev = path->device;
4409         devq = dev->sim->devq;
4410         mtx_lock(&devq->send_mtx);
4411         if (xpt_release_devq_device(dev, count, run_queue))
4412                 xpt_run_devq(dev->sim->devq);
4413         mtx_unlock(&devq->send_mtx);
4414 }
4415
4416 static int
4417 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4418 {
4419
4420         mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED);
4421         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4422             ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue,
4423             dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count));
4424         if (count > dev->ccbq.queue.qfrozen_cnt) {
4425 #ifdef INVARIANTS
4426                 printf("xpt_release_devq(): requested %u > present %u\n",
4427                     count, dev->ccbq.queue.qfrozen_cnt);
4428 #endif
4429                 count = dev->ccbq.queue.qfrozen_cnt;
4430         }
4431         dev->ccbq.queue.qfrozen_cnt -= count;
4432         if (dev->ccbq.queue.qfrozen_cnt == 0) {
4433                 /*
4434                  * No longer need to wait for a successful
4435                  * command completion.
4436                  */
4437                 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4438                 /*
4439                  * Remove any timeouts that might be scheduled
4440                  * to release this queue.
4441                  */
4442                 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4443                         callout_stop(&dev->callout);
4444                         dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4445                 }
4446                 /*
4447                  * Now that we are unfrozen schedule the
4448                  * device so any pending transactions are
4449                  * run.
4450                  */
4451                 xpt_schedule_devq(dev->sim->devq, dev);
4452         } else
4453                 run_queue = 0;
4454         return (run_queue);
4455 }
4456
4457 void
4458 xpt_release_simq(struct cam_sim *sim, int run_queue)
4459 {
4460         struct cam_devq *devq;
4461
4462         devq = sim->devq;
4463         mtx_lock(&devq->send_mtx);
4464         if (devq->send_queue.qfrozen_cnt <= 0) {
4465 #ifdef INVARIANTS
4466                 printf("xpt_release_simq: requested 1 > present %u\n",
4467                     devq->send_queue.qfrozen_cnt);
4468 #endif
4469         } else
4470                 devq->send_queue.qfrozen_cnt--;
4471         if (devq->send_queue.qfrozen_cnt == 0) {
4472                 if (run_queue) {
4473                         /*
4474                          * Now that we are unfrozen run the send queue.
4475                          */
4476                         xpt_run_devq(sim->devq);
4477                 }
4478         }
4479         mtx_unlock(&devq->send_mtx);
4480 }
4481
4482 void
4483 xpt_done(union ccb *done_ccb)
4484 {
4485         struct cam_doneq *queue;
4486         int     run, hash;
4487
4488 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4489         if (done_ccb->ccb_h.func_code == XPT_SCSI_IO &&
4490             done_ccb->csio.bio != NULL)
4491                 biotrack(done_ccb->csio.bio, __func__);
4492 #endif
4493
4494         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4495             ("xpt_done: func= %#x %s status %#x\n",
4496                 done_ccb->ccb_h.func_code,
4497                 xpt_action_name(done_ccb->ccb_h.func_code),
4498                 done_ccb->ccb_h.status));
4499         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4500                 return;
4501
4502         /* Store the time the ccb was in the sim */
4503         done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data);
4504         done_ccb->ccb_h.status |= CAM_QOS_VALID;
4505         hash = (u_int)(done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id +
4506             done_ccb->ccb_h.target_lun) % cam_num_doneqs;
4507         queue = &cam_doneqs[hash];
4508         mtx_lock(&queue->cam_doneq_mtx);
4509         run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq));
4510         STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe);
4511         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4512         mtx_unlock(&queue->cam_doneq_mtx);
4513         if (run && !dumping)
4514                 wakeup(&queue->cam_doneq);
4515 }
4516
4517 void
4518 xpt_done_direct(union ccb *done_ccb)
4519 {
4520
4521         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4522             ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status));
4523         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4524                 return;
4525
4526         /* Store the time the ccb was in the sim */
4527         done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data);
4528         done_ccb->ccb_h.status |= CAM_QOS_VALID;
4529         xpt_done_process(&done_ccb->ccb_h);
4530 }
4531
4532 union ccb *
4533 xpt_alloc_ccb(void)
4534 {
4535         union ccb *new_ccb;
4536
4537         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4538         return (new_ccb);
4539 }
4540
4541 union ccb *
4542 xpt_alloc_ccb_nowait(void)
4543 {
4544         union ccb *new_ccb;
4545
4546         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4547         return (new_ccb);
4548 }
4549
4550 void
4551 xpt_free_ccb(union ccb *free_ccb)
4552 {
4553         struct cam_periph *periph;
4554
4555         if (free_ccb->ccb_h.alloc_flags & CAM_CCB_FROM_UMA) {
4556                 /*
4557                  * Looks like a CCB allocated from a periph UMA zone.
4558                  */
4559                 periph = free_ccb->ccb_h.path->periph;
4560                 uma_zfree(periph->ccb_zone, free_ccb);
4561         } else {
4562                 free(free_ccb, M_CAMCCB);
4563         }
4564 }
4565
4566 /* Private XPT functions */
4567
4568 /*
4569  * Get a CAM control block for the caller. Charge the structure to the device
4570  * referenced by the path.  If we don't have sufficient resources to allocate
4571  * more ccbs, we return NULL.
4572  */
4573 static union ccb *
4574 xpt_get_ccb_nowait(struct cam_periph *periph)
4575 {
4576         union ccb *new_ccb;
4577         int alloc_flags;
4578
4579         if (periph->ccb_zone != NULL) {
4580                 alloc_flags = CAM_CCB_FROM_UMA;
4581                 new_ccb = uma_zalloc(periph->ccb_zone, M_ZERO|M_NOWAIT);
4582         } else {
4583                 alloc_flags = 0;
4584                 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4585         }
4586         if (new_ccb == NULL)
4587                 return (NULL);
4588         new_ccb->ccb_h.alloc_flags = alloc_flags;
4589         periph->periph_allocated++;
4590         cam_ccbq_take_opening(&periph->path->device->ccbq);
4591         return (new_ccb);
4592 }
4593
4594 static union ccb *
4595 xpt_get_ccb(struct cam_periph *periph)
4596 {
4597         union ccb *new_ccb;
4598         int alloc_flags;
4599
4600         cam_periph_unlock(periph);
4601         if (periph->ccb_zone != NULL) {
4602                 alloc_flags = CAM_CCB_FROM_UMA;
4603                 new_ccb = uma_zalloc(periph->ccb_zone, M_ZERO|M_WAITOK);
4604         } else {
4605                 alloc_flags = 0;
4606                 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4607         }
4608         new_ccb->ccb_h.alloc_flags = alloc_flags;
4609         cam_periph_lock(periph);
4610         periph->periph_allocated++;
4611         cam_ccbq_take_opening(&periph->path->device->ccbq);
4612         return (new_ccb);
4613 }
4614
4615 union ccb *
4616 cam_periph_getccb(struct cam_periph *periph, uint32_t priority)
4617 {
4618         struct ccb_hdr *ccb_h;
4619
4620         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n"));
4621         cam_periph_assert(periph, MA_OWNED);
4622         while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL ||
4623             ccb_h->pinfo.priority != priority) {
4624                 if (priority < periph->immediate_priority) {
4625                         periph->immediate_priority = priority;
4626                         xpt_run_allocq(periph, 0);
4627                 } else
4628                         cam_periph_sleep(periph, &periph->ccb_list, PRIBIO,
4629                             "cgticb", 0);
4630         }
4631         SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
4632         return ((union ccb *)ccb_h);
4633 }
4634
4635 static void
4636 xpt_acquire_bus(struct cam_eb *bus)
4637 {
4638
4639         xpt_lock_buses();
4640         bus->refcount++;
4641         xpt_unlock_buses();
4642 }
4643
4644 static void
4645 xpt_release_bus(struct cam_eb *bus)
4646 {
4647
4648         xpt_lock_buses();
4649         KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
4650         if (--bus->refcount > 0) {
4651                 xpt_unlock_buses();
4652                 return;
4653         }
4654         TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4655         xsoftc.bus_generation++;
4656         xpt_unlock_buses();
4657         KASSERT(TAILQ_EMPTY(&bus->et_entries),
4658             ("destroying bus, but target list is not empty"));
4659         cam_sim_release(bus->sim);
4660         mtx_destroy(&bus->eb_mtx);
4661         free(bus, M_CAMXPT);
4662 }
4663
4664 static struct cam_et *
4665 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4666 {
4667         struct cam_et *cur_target, *target;
4668
4669         mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4670         mtx_assert(&bus->eb_mtx, MA_OWNED);
4671         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
4672                                          M_NOWAIT|M_ZERO);
4673         if (target == NULL)
4674                 return (NULL);
4675
4676         TAILQ_INIT(&target->ed_entries);
4677         target->bus = bus;
4678         target->target_id = target_id;
4679         target->refcount = 1;
4680         target->generation = 0;
4681         target->luns = NULL;
4682         mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF);
4683         timevalclear(&target->last_reset);
4684         /*
4685          * Hold a reference to our parent bus so it
4686          * will not go away before we do.
4687          */
4688         bus->refcount++;
4689
4690         /* Insertion sort into our bus's target list */
4691         cur_target = TAILQ_FIRST(&bus->et_entries);
4692         while (cur_target != NULL && cur_target->target_id < target_id)
4693                 cur_target = TAILQ_NEXT(cur_target, links);
4694         if (cur_target != NULL) {
4695                 TAILQ_INSERT_BEFORE(cur_target, target, links);
4696         } else {
4697                 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4698         }
4699         bus->generation++;
4700         return (target);
4701 }
4702
4703 static void
4704 xpt_acquire_target(struct cam_et *target)
4705 {
4706         struct cam_eb *bus = target->bus;
4707
4708         mtx_lock(&bus->eb_mtx);
4709         target->refcount++;
4710         mtx_unlock(&bus->eb_mtx);
4711 }
4712
4713 static void
4714 xpt_release_target(struct cam_et *target)
4715 {
4716         struct cam_eb *bus = target->bus;
4717
4718         mtx_lock(&bus->eb_mtx);
4719         if (--target->refcount > 0) {
4720                 mtx_unlock(&bus->eb_mtx);
4721                 return;
4722         }
4723         TAILQ_REMOVE(&bus->et_entries, target, links);
4724         bus->generation++;
4725         mtx_unlock(&bus->eb_mtx);
4726         KASSERT(TAILQ_EMPTY(&target->ed_entries),
4727             ("destroying target, but device list is not empty"));
4728         xpt_release_bus(bus);
4729         mtx_destroy(&target->luns_mtx);
4730         if (target->luns)
4731                 free(target->luns, M_CAMXPT);
4732         free(target, M_CAMXPT);
4733 }
4734
4735 static struct cam_ed *
4736 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
4737                          lun_id_t lun_id)
4738 {
4739         struct cam_ed *device;
4740
4741         device = xpt_alloc_device(bus, target, lun_id);
4742         if (device == NULL)
4743                 return (NULL);
4744
4745         device->mintags = 1;
4746         device->maxtags = 1;
4747         return (device);
4748 }
4749
4750 static void
4751 xpt_destroy_device(void *context, int pending)
4752 {
4753         struct cam_ed   *device = context;
4754
4755         mtx_lock(&device->device_mtx);
4756         mtx_destroy(&device->device_mtx);
4757         free(device, M_CAMDEV);
4758 }
4759
4760 struct cam_ed *
4761 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4762 {
4763         struct cam_ed   *cur_device, *device;
4764         struct cam_devq *devq;
4765         cam_status status;
4766
4767         mtx_assert(&bus->eb_mtx, MA_OWNED);
4768         /* Make space for us in the device queue on our bus */
4769         devq = bus->sim->devq;
4770         mtx_lock(&devq->send_mtx);
4771         status = cam_devq_resize(devq, devq->send_queue.array_size + 1);
4772         mtx_unlock(&devq->send_mtx);
4773         if (status != CAM_REQ_CMP)
4774                 return (NULL);
4775
4776         device = (struct cam_ed *)malloc(sizeof(*device),
4777                                          M_CAMDEV, M_NOWAIT|M_ZERO);
4778         if (device == NULL)
4779                 return (NULL);
4780
4781         cam_init_pinfo(&device->devq_entry);
4782         device->target = target;
4783         device->lun_id = lun_id;
4784         device->sim = bus->sim;
4785         if (cam_ccbq_init(&device->ccbq,
4786                           bus->sim->max_dev_openings) != 0) {
4787                 free(device, M_CAMDEV);
4788                 return (NULL);
4789         }
4790         SLIST_INIT(&device->asyncs);
4791         SLIST_INIT(&device->periphs);
4792         device->generation = 0;
4793         device->flags = CAM_DEV_UNCONFIGURED;
4794         device->tag_delay_count = 0;
4795         device->tag_saved_openings = 0;
4796         device->refcount = 1;
4797         mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF);
4798         callout_init_mtx(&device->callout, &devq->send_mtx, 0);
4799         TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device);
4800         /*
4801          * Hold a reference to our parent bus so it
4802          * will not go away before we do.
4803          */
4804         target->refcount++;
4805
4806         cur_device = TAILQ_FIRST(&target->ed_entries);
4807         while (cur_device != NULL && cur_device->lun_id < lun_id)
4808                 cur_device = TAILQ_NEXT(cur_device, links);
4809         if (cur_device != NULL)
4810                 TAILQ_INSERT_BEFORE(cur_device, device, links);
4811         else
4812                 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4813         target->generation++;
4814         return (device);
4815 }
4816
4817 void
4818 xpt_acquire_device(struct cam_ed *device)
4819 {
4820         struct cam_eb *bus = device->target->bus;
4821
4822         mtx_lock(&bus->eb_mtx);
4823         device->refcount++;
4824         mtx_unlock(&bus->eb_mtx);
4825 }
4826
4827 void
4828 xpt_release_device(struct cam_ed *device)
4829 {
4830         struct cam_eb *bus = device->target->bus;
4831         struct cam_devq *devq;
4832
4833         mtx_lock(&bus->eb_mtx);
4834         if (--device->refcount > 0) {
4835                 mtx_unlock(&bus->eb_mtx);
4836                 return;
4837         }
4838
4839         TAILQ_REMOVE(&device->target->ed_entries, device,links);
4840         device->target->generation++;
4841         mtx_unlock(&bus->eb_mtx);
4842
4843         /* Release our slot in the devq */
4844         devq = bus->sim->devq;
4845         mtx_lock(&devq->send_mtx);
4846         cam_devq_resize(devq, devq->send_queue.array_size - 1);
4847
4848         KASSERT(SLIST_EMPTY(&device->periphs),
4849             ("destroying device, but periphs list is not empty"));
4850         KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX,
4851             ("destroying device while still queued for ccbs"));
4852
4853         /* The send_mtx must be held when accessing the callout */
4854         if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
4855                 callout_stop(&device->callout);
4856
4857         mtx_unlock(&devq->send_mtx);
4858
4859         xpt_release_target(device->target);
4860
4861         cam_ccbq_fini(&device->ccbq);
4862         /*
4863          * Free allocated memory.  free(9) does nothing if the
4864          * supplied pointer is NULL, so it is safe to call without
4865          * checking.
4866          */
4867         free(device->supported_vpds, M_CAMXPT);
4868         free(device->device_id, M_CAMXPT);
4869         free(device->ext_inq, M_CAMXPT);
4870         free(device->physpath, M_CAMXPT);
4871         free(device->rcap_buf, M_CAMXPT);
4872         free(device->serial_num, M_CAMXPT);
4873         free(device->nvme_data, M_CAMXPT);
4874         free(device->nvme_cdata, M_CAMXPT);
4875         taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task);
4876 }
4877
4878 uint32_t
4879 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4880 {
4881         int     result;
4882         struct  cam_ed *dev;
4883
4884         dev = path->device;
4885         mtx_lock(&dev->sim->devq->send_mtx);
4886         result = cam_ccbq_resize(&dev->ccbq, newopenings);
4887         mtx_unlock(&dev->sim->devq->send_mtx);
4888         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
4889          || (dev->inq_flags & SID_CmdQue) != 0)
4890                 dev->tag_saved_openings = newopenings;
4891         return (result);
4892 }
4893
4894 static struct cam_eb *
4895 xpt_find_bus(path_id_t path_id)
4896 {
4897         struct cam_eb *bus;
4898
4899         xpt_lock_buses();
4900         for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4901              bus != NULL;
4902              bus = TAILQ_NEXT(bus, links)) {
4903                 if (bus->path_id == path_id) {
4904                         bus->refcount++;
4905                         break;
4906                 }
4907         }
4908         xpt_unlock_buses();
4909         return (bus);
4910 }
4911
4912 static struct cam_et *
4913 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
4914 {
4915         struct cam_et *target;
4916
4917         mtx_assert(&bus->eb_mtx, MA_OWNED);
4918         for (target = TAILQ_FIRST(&bus->et_entries);
4919              target != NULL;
4920              target = TAILQ_NEXT(target, links)) {
4921                 if (target->target_id == target_id) {
4922                         target->refcount++;
4923                         break;
4924                 }
4925         }
4926         return (target);
4927 }
4928
4929 static struct cam_ed *
4930 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4931 {
4932         struct cam_ed *device;
4933
4934         mtx_assert(&target->bus->eb_mtx, MA_OWNED);
4935         for (device = TAILQ_FIRST(&target->ed_entries);
4936              device != NULL;
4937              device = TAILQ_NEXT(device, links)) {
4938                 if (device->lun_id == lun_id) {
4939                         device->refcount++;
4940                         break;
4941                 }
4942         }
4943         return (device);
4944 }
4945
4946 void
4947 xpt_start_tags(struct cam_path *path)
4948 {
4949         struct ccb_relsim crs;
4950         struct cam_ed *device;
4951         struct cam_sim *sim;
4952         int    newopenings;
4953
4954         device = path->device;
4955         sim = path->bus->sim;
4956         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4957         xpt_freeze_devq(path, /*count*/1);
4958         device->inq_flags |= SID_CmdQue;
4959         if (device->tag_saved_openings != 0)
4960                 newopenings = device->tag_saved_openings;
4961         else
4962                 newopenings = min(device->maxtags,
4963                                   sim->max_tagged_dev_openings);
4964         xpt_dev_ccbq_resize(path, newopenings);
4965         xpt_async(AC_GETDEV_CHANGED, path, NULL);
4966         memset(&crs, 0, sizeof(crs));
4967         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4968         crs.ccb_h.func_code = XPT_REL_SIMQ;
4969         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4970         crs.openings
4971             = crs.release_timeout
4972             = crs.qfrozen_cnt
4973             = 0;
4974         xpt_action((union ccb *)&crs);
4975 }
4976
4977 void
4978 xpt_stop_tags(struct cam_path *path)
4979 {
4980         struct ccb_relsim crs;
4981         struct cam_ed *device;
4982         struct cam_sim *sim;
4983
4984         device = path->device;
4985         sim = path->bus->sim;
4986         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4987         device->tag_delay_count = 0;
4988         xpt_freeze_devq(path, /*count*/1);
4989         device->inq_flags &= ~SID_CmdQue;
4990         xpt_dev_ccbq_resize(path, sim->max_dev_openings);
4991         xpt_async(AC_GETDEV_CHANGED, path, NULL);
4992         memset(&crs, 0, sizeof(crs));
4993         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4994         crs.ccb_h.func_code = XPT_REL_SIMQ;
4995         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4996         crs.openings
4997             = crs.release_timeout
4998             = crs.qfrozen_cnt
4999             = 0;
5000         xpt_action((union ccb *)&crs);
5001 }
5002
5003 /*
5004  * Assume all possible buses are detected by this time, so allow boot
5005  * as soon as they all are scanned.
5006  */
5007 static void
5008 xpt_boot_delay(void *arg)
5009 {
5010
5011         xpt_release_boot();
5012 }
5013
5014 /*
5015  * Now that all config hooks have completed, start boot_delay timer,
5016  * waiting for possibly still undetected buses (USB) to appear.
5017  */
5018 static void
5019 xpt_ch_done(void *arg)
5020 {
5021
5022         callout_init(&xsoftc.boot_callout, 1);
5023         callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay,
5024             SBT_1MS, xpt_boot_delay, NULL, 0);
5025 }
5026 SYSINIT(xpt_hw_delay, SI_SUB_INT_CONFIG_HOOKS, SI_ORDER_ANY, xpt_ch_done, NULL);
5027
5028 /*
5029  * Now that interrupts are enabled, go find our devices
5030  */
5031 static void
5032 xpt_config(void *arg)
5033 {
5034         if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq"))
5035                 printf("xpt_config: failed to create taskqueue thread.\n");
5036
5037         /* Setup debugging path */
5038         if (cam_dflags != CAM_DEBUG_NONE) {
5039                 if (xpt_create_path(&cam_dpath, NULL,
5040                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
5041                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
5042                         printf("xpt_config: xpt_create_path() failed for debug"
5043                                " target %d:%d:%d, debugging disabled\n",
5044                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
5045                         cam_dflags = CAM_DEBUG_NONE;
5046                 }
5047         } else
5048                 cam_dpath = NULL;
5049
5050         periphdriver_init(1);
5051         xpt_hold_boot();
5052
5053         /* Fire up rescan thread. */
5054         if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0,
5055             "cam", "scanner")) {
5056                 printf("xpt_config: failed to create rescan thread.\n");
5057         }
5058 }
5059
5060 void
5061 xpt_hold_boot_locked(void)
5062 {
5063
5064         if (xsoftc.buses_to_config++ == 0)
5065                 root_mount_hold_token("CAM", &xsoftc.xpt_rootmount);
5066 }
5067
5068 void
5069 xpt_hold_boot(void)
5070 {
5071
5072         xpt_lock_buses();
5073         xpt_hold_boot_locked();
5074         xpt_unlock_buses();
5075 }
5076
5077 void
5078 xpt_release_boot(void)
5079 {
5080
5081         xpt_lock_buses();
5082         if (--xsoftc.buses_to_config == 0) {
5083                 if (xsoftc.buses_config_done == 0) {
5084                         xsoftc.buses_config_done = 1;
5085                         xsoftc.buses_to_config++;
5086                         TASK_INIT(&xsoftc.boot_task, 0, xpt_finishconfig_task,
5087                             NULL);
5088                         taskqueue_enqueue(taskqueue_thread, &xsoftc.boot_task);
5089                 } else
5090                         root_mount_rel(&xsoftc.xpt_rootmount);
5091         }
5092         xpt_unlock_buses();
5093 }
5094
5095 /*
5096  * If the given device only has one peripheral attached to it, and if that
5097  * peripheral is the passthrough driver, announce it.  This insures that the
5098  * user sees some sort of announcement for every peripheral in their system.
5099  */
5100 static int
5101 xptpassannouncefunc(struct cam_ed *device, void *arg)
5102 {
5103         struct cam_periph *periph;
5104         int i;
5105
5106         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
5107              periph = SLIST_NEXT(periph, periph_links), i++);
5108
5109         periph = SLIST_FIRST(&device->periphs);
5110         if ((i == 1)
5111          && (strncmp(periph->periph_name, "pass", 4) == 0))
5112                 xpt_announce_periph(periph, NULL);
5113
5114         return(1);
5115 }
5116
5117 static void
5118 xpt_finishconfig_task(void *context, int pending)
5119 {
5120
5121         periphdriver_init(2);
5122         /*
5123          * Check for devices with no "standard" peripheral driver
5124          * attached.  For any devices like that, announce the
5125          * passthrough driver so the user will see something.
5126          */
5127         if (!bootverbose)
5128                 xpt_for_all_devices(xptpassannouncefunc, NULL);
5129
5130         xpt_release_boot();
5131 }
5132
5133 cam_status
5134 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
5135                    struct cam_path *path)
5136 {
5137         struct ccb_setasync csa;
5138         cam_status status;
5139         bool xptpath = false;
5140
5141         if (path == NULL) {
5142                 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
5143                                          CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
5144                 if (status != CAM_REQ_CMP)
5145                         return (status);
5146                 xpt_path_lock(path);
5147                 xptpath = true;
5148         }
5149
5150         memset(&csa, 0, sizeof(csa));
5151         xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
5152         csa.ccb_h.func_code = XPT_SASYNC_CB;
5153         csa.event_enable = event;
5154         csa.callback = cbfunc;
5155         csa.callback_arg = cbarg;
5156         xpt_action((union ccb *)&csa);
5157         status = csa.ccb_h.status;
5158
5159         CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE,
5160             ("xpt_register_async: func %p\n", cbfunc));
5161
5162         if (xptpath) {
5163                 xpt_path_unlock(path);
5164                 xpt_free_path(path);
5165         }
5166
5167         if ((status == CAM_REQ_CMP) &&
5168             (csa.event_enable & AC_FOUND_DEVICE)) {
5169                 /*
5170                  * Get this peripheral up to date with all
5171                  * the currently existing devices.
5172                  */
5173                 xpt_for_all_devices(xptsetasyncfunc, &csa);
5174         }
5175         if ((status == CAM_REQ_CMP) &&
5176             (csa.event_enable & AC_PATH_REGISTERED)) {
5177                 /*
5178                  * Get this peripheral up to date with all
5179                  * the currently existing buses.
5180                  */
5181                 xpt_for_all_busses(xptsetasyncbusfunc, &csa);
5182         }
5183
5184         return (status);
5185 }
5186
5187 static void
5188 xptaction(struct cam_sim *sim, union ccb *work_ccb)
5189 {
5190         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
5191
5192         switch (work_ccb->ccb_h.func_code) {
5193         /* Common cases first */
5194         case XPT_PATH_INQ:              /* Path routing inquiry */
5195         {
5196                 struct ccb_pathinq *cpi;
5197
5198                 cpi = &work_ccb->cpi;
5199                 cpi->version_num = 1; /* XXX??? */
5200                 cpi->hba_inquiry = 0;
5201                 cpi->target_sprt = 0;
5202                 cpi->hba_misc = 0;
5203                 cpi->hba_eng_cnt = 0;
5204                 cpi->max_target = 0;
5205                 cpi->max_lun = 0;
5206                 cpi->initiator_id = 0;
5207                 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
5208                 strlcpy(cpi->hba_vid, "", HBA_IDLEN);
5209                 strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
5210                 cpi->unit_number = sim->unit_number;
5211                 cpi->bus_id = sim->bus_id;
5212                 cpi->base_transfer_speed = 0;
5213                 cpi->protocol = PROTO_UNSPECIFIED;
5214                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
5215                 cpi->transport = XPORT_UNSPECIFIED;
5216                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
5217                 cpi->ccb_h.status = CAM_REQ_CMP;
5218                 break;
5219         }
5220         default:
5221                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
5222                 break;
5223         }
5224         xpt_done(work_ccb);
5225 }
5226
5227 /*
5228  * The xpt as a "controller" has no interrupt sources, so polling
5229  * is a no-op.
5230  */
5231 static void
5232 xptpoll(struct cam_sim *sim)
5233 {
5234 }
5235
5236 void
5237 xpt_lock_buses(void)
5238 {
5239         mtx_lock(&xsoftc.xpt_topo_lock);
5240 }
5241
5242 void
5243 xpt_unlock_buses(void)
5244 {
5245         mtx_unlock(&xsoftc.xpt_topo_lock);
5246 }
5247
5248 struct mtx *
5249 xpt_path_mtx(struct cam_path *path)
5250 {
5251
5252         return (&path->device->device_mtx);
5253 }
5254
5255 static void
5256 xpt_done_process(struct ccb_hdr *ccb_h)
5257 {
5258         struct cam_sim *sim = NULL;
5259         struct cam_devq *devq = NULL;
5260         struct mtx *mtx = NULL;
5261
5262 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
5263         struct ccb_scsiio *csio;
5264
5265         if (ccb_h->func_code == XPT_SCSI_IO) {
5266                 csio = &((union ccb *)ccb_h)->csio;
5267                 if (csio->bio != NULL)
5268                         biotrack(csio->bio, __func__);
5269         }
5270 #endif
5271
5272         if (ccb_h->flags & CAM_HIGH_POWER) {
5273                 struct highpowerlist    *hphead;
5274                 struct cam_ed           *device;
5275
5276                 mtx_lock(&xsoftc.xpt_highpower_lock);
5277                 hphead = &xsoftc.highpowerq;
5278
5279                 device = STAILQ_FIRST(hphead);
5280
5281                 /*
5282                  * Increment the count since this command is done.
5283                  */
5284                 xsoftc.num_highpower++;
5285
5286                 /*
5287                  * Any high powered commands queued up?
5288                  */
5289                 if (device != NULL) {
5290                         STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
5291                         mtx_unlock(&xsoftc.xpt_highpower_lock);
5292
5293                         mtx_lock(&device->sim->devq->send_mtx);
5294                         xpt_release_devq_device(device,
5295                                          /*count*/1, /*runqueue*/TRUE);
5296                         mtx_unlock(&device->sim->devq->send_mtx);
5297                 } else
5298                         mtx_unlock(&xsoftc.xpt_highpower_lock);
5299         }
5300
5301         /*
5302          * Insulate against a race where the periph is destroyed but CCBs are
5303          * still not all processed. This shouldn't happen, but allows us better
5304          * bug diagnostic when it does.
5305          */
5306         if (ccb_h->path->bus)
5307                 sim = ccb_h->path->bus->sim;
5308
5309         if (ccb_h->status & CAM_RELEASE_SIMQ) {
5310                 KASSERT(sim, ("sim missing for CAM_RELEASE_SIMQ request"));
5311                 xpt_release_simq(sim, /*run_queue*/FALSE);
5312                 ccb_h->status &= ~CAM_RELEASE_SIMQ;
5313         }
5314
5315         if ((ccb_h->flags & CAM_DEV_QFRZDIS)
5316          && (ccb_h->status & CAM_DEV_QFRZN)) {
5317                 xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE);
5318                 ccb_h->status &= ~CAM_DEV_QFRZN;
5319         }
5320
5321         if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
5322                 struct cam_ed *dev = ccb_h->path->device;
5323
5324                 if (sim)
5325                         devq = sim->devq;
5326                 KASSERT(devq, ("Periph disappeared with CCB %p %s request pending.",
5327                         ccb_h, xpt_action_name(ccb_h->func_code)));
5328
5329                 mtx_lock(&devq->send_mtx);
5330                 devq->send_active--;
5331                 devq->send_openings++;
5332                 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
5333
5334                 if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
5335                   && (dev->ccbq.dev_active == 0))) {
5336                         dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
5337                         xpt_release_devq_device(dev, /*count*/1,
5338                                          /*run_queue*/FALSE);
5339                 }
5340
5341                 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
5342                   && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
5343                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
5344                         xpt_release_devq_device(dev, /*count*/1,
5345                                          /*run_queue*/FALSE);
5346                 }
5347
5348                 if (!device_is_queued(dev))
5349                         (void)xpt_schedule_devq(devq, dev);
5350                 xpt_run_devq(devq);
5351                 mtx_unlock(&devq->send_mtx);
5352
5353                 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) {
5354                         mtx = xpt_path_mtx(ccb_h->path);
5355                         mtx_lock(mtx);
5356
5357                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5358                          && (--dev->tag_delay_count == 0))
5359                                 xpt_start_tags(ccb_h->path);
5360                 }
5361         }
5362
5363         if ((ccb_h->flags & CAM_UNLOCKED) == 0) {
5364                 if (mtx == NULL) {
5365                         mtx = xpt_path_mtx(ccb_h->path);
5366                         mtx_lock(mtx);
5367                 }
5368         } else {
5369                 if (mtx != NULL) {
5370                         mtx_unlock(mtx);
5371                         mtx = NULL;
5372                 }
5373         }
5374
5375         /* Call the peripheral driver's callback */
5376         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
5377         (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
5378         if (mtx != NULL)
5379                 mtx_unlock(mtx);
5380 }
5381
5382 /*
5383  * Parameterize instead and use xpt_done_td?
5384  */
5385 static void
5386 xpt_async_td(void *arg)
5387 {
5388         struct cam_doneq *queue = arg;
5389         struct ccb_hdr *ccb_h;
5390         STAILQ_HEAD(, ccb_hdr)  doneq;
5391
5392         STAILQ_INIT(&doneq);
5393         mtx_lock(&queue->cam_doneq_mtx);
5394         while (1) {
5395                 while (STAILQ_EMPTY(&queue->cam_doneq))
5396                         msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
5397                             PRIBIO, "-", 0);
5398                 STAILQ_CONCAT(&doneq, &queue->cam_doneq);
5399                 mtx_unlock(&queue->cam_doneq_mtx);
5400
5401                 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
5402                         STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
5403                         xpt_done_process(ccb_h);
5404                 }
5405
5406                 mtx_lock(&queue->cam_doneq_mtx);
5407         }
5408 }
5409
5410 void
5411 xpt_done_td(void *arg)
5412 {
5413         struct cam_doneq *queue = arg;
5414         struct ccb_hdr *ccb_h;
5415         STAILQ_HEAD(, ccb_hdr)  doneq;
5416
5417         STAILQ_INIT(&doneq);
5418         mtx_lock(&queue->cam_doneq_mtx);
5419         while (1) {
5420                 while (STAILQ_EMPTY(&queue->cam_doneq)) {
5421                         queue->cam_doneq_sleep = 1;
5422                         msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
5423                             PRIBIO, "-", 0);
5424                         queue->cam_doneq_sleep = 0;
5425                 }
5426                 STAILQ_CONCAT(&doneq, &queue->cam_doneq);
5427                 mtx_unlock(&queue->cam_doneq_mtx);
5428
5429                 THREAD_NO_SLEEPING();
5430                 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
5431                         STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
5432                         xpt_done_process(ccb_h);
5433                 }
5434                 THREAD_SLEEPING_OK();
5435
5436                 mtx_lock(&queue->cam_doneq_mtx);
5437         }
5438 }
5439
5440 static void
5441 camisr_runqueue(void)
5442 {
5443         struct  ccb_hdr *ccb_h;
5444         struct cam_doneq *queue;
5445         int i;
5446
5447         /* Process global queues. */
5448         for (i = 0; i < cam_num_doneqs; i++) {
5449                 queue = &cam_doneqs[i];
5450                 mtx_lock(&queue->cam_doneq_mtx);
5451                 while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) {
5452                         STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe);
5453                         mtx_unlock(&queue->cam_doneq_mtx);
5454                         xpt_done_process(ccb_h);
5455                         mtx_lock(&queue->cam_doneq_mtx);
5456                 }
5457                 mtx_unlock(&queue->cam_doneq_mtx);
5458         }
5459 }
5460
5461 /**
5462  * @brief Return the device_t associated with the path
5463  *
5464  * When a SIM is created, it registers a bus with a NEWBUS device_t. This is
5465  * stored in the internal cam_eb bus structure. There is no guarnatee any given
5466  * path will have a @c device_t associated with it (it's legal to call @c
5467  * xpt_bus_register with a @c NULL @c device_t.
5468  *
5469  * @param path          Path to return the device_t for.
5470  */
5471 device_t
5472 xpt_path_sim_device(const struct cam_path *path)
5473 {
5474         return (path->bus->parent_dev);
5475 }
5476
5477 struct kv 
5478 {
5479         uint32_t v;
5480         const char *name;
5481 };
5482
5483 static struct kv map[] = {
5484         { XPT_NOOP, "XPT_NOOP" },
5485         { XPT_SCSI_IO, "XPT_SCSI_IO" },
5486         { XPT_GDEV_TYPE, "XPT_GDEV_TYPE" },
5487         { XPT_GDEVLIST, "XPT_GDEVLIST" },
5488         { XPT_PATH_INQ, "XPT_PATH_INQ" },
5489         { XPT_REL_SIMQ, "XPT_REL_SIMQ" },
5490         { XPT_SASYNC_CB, "XPT_SASYNC_CB" },
5491         { XPT_SDEV_TYPE, "XPT_SDEV_TYPE" },
5492         { XPT_SCAN_BUS, "XPT_SCAN_BUS" },
5493         { XPT_DEV_MATCH, "XPT_DEV_MATCH" },
5494         { XPT_DEBUG, "XPT_DEBUG" },
5495         { XPT_PATH_STATS, "XPT_PATH_STATS" },
5496         { XPT_GDEV_STATS, "XPT_GDEV_STATS" },
5497         { XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" },
5498         { XPT_ASYNC, "XPT_ASYNC" },
5499         { XPT_ABORT, "XPT_ABORT" },
5500         { XPT_RESET_BUS, "XPT_RESET_BUS" },
5501         { XPT_RESET_DEV, "XPT_RESET_DEV" },
5502         { XPT_TERM_IO, "XPT_TERM_IO" },
5503         { XPT_SCAN_LUN, "XPT_SCAN_LUN" },
5504         { XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" },
5505         { XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" },
5506         { XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" },
5507         { XPT_ATA_IO, "XPT_ATA_IO" },
5508         { XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" },
5509         { XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" },
5510         { XPT_NVME_IO, "XPT_NVME_IO" },
5511         { XPT_MMC_IO, "XPT_MMC_IO" },
5512         { XPT_SMP_IO, "XPT_SMP_IO" },
5513         { XPT_SCAN_TGT, "XPT_SCAN_TGT" },
5514         { XPT_NVME_ADMIN, "XPT_NVME_ADMIN" },
5515         { XPT_ENG_INQ, "XPT_ENG_INQ" },
5516         { XPT_ENG_EXEC, "XPT_ENG_EXEC" },
5517         { XPT_EN_LUN, "XPT_EN_LUN" },
5518         { XPT_TARGET_IO, "XPT_TARGET_IO" },
5519         { XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" },
5520         { XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" },
5521         { XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" },
5522         { XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" },
5523         { XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" },
5524         { XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" },
5525         { 0, 0 }
5526 };
5527
5528 const char *
5529 xpt_action_name(uint32_t action)
5530 {
5531         static char buffer[32]; /* Only for unknown messages -- racy */
5532         struct kv *walker = map;
5533
5534         while (walker->name != NULL) {
5535                 if (walker->v == action)
5536                         return (walker->name);
5537                 walker++;
5538         }
5539
5540         snprintf(buffer, sizeof(buffer), "%#x", action);
5541         return (buffer);
5542 }
5543
5544 void
5545 xpt_cam_path_debug(struct cam_path *path, const char *fmt, ...)
5546 {
5547         struct sbuf sbuf;
5548         char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */
5549         struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN);
5550         va_list ap;
5551
5552         sbuf_set_drain(sb, sbuf_printf_drain, NULL);
5553         xpt_path_sbuf(path, sb);
5554         va_start(ap, fmt);
5555         sbuf_vprintf(sb, fmt, ap);
5556         va_end(ap);
5557         sbuf_finish(sb);
5558         sbuf_delete(sb);
5559         if (cam_debug_delay != 0)
5560                 DELAY(cam_debug_delay);
5561 }
5562
5563 void
5564 xpt_cam_dev_debug(struct cam_ed *dev, const char *fmt, ...)
5565 {
5566         struct sbuf sbuf;
5567         char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */
5568         struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN);
5569         va_list ap;
5570
5571         sbuf_set_drain(sb, sbuf_printf_drain, NULL);
5572         xpt_device_sbuf(dev, sb);
5573         va_start(ap, fmt);
5574         sbuf_vprintf(sb, fmt, ap);
5575         va_end(ap);
5576         sbuf_finish(sb);
5577         sbuf_delete(sb);
5578         if (cam_debug_delay != 0)
5579                 DELAY(cam_debug_delay);
5580 }
5581
5582 void
5583 xpt_cam_debug(const char *fmt, ...)
5584 {
5585         struct sbuf sbuf;
5586         char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */
5587         struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN);
5588         va_list ap;
5589
5590         sbuf_set_drain(sb, sbuf_printf_drain, NULL);
5591         sbuf_printf(sb, "cam_debug: ");
5592         va_start(ap, fmt);
5593         sbuf_vprintf(sb, fmt, ap);
5594         va_end(ap);
5595         sbuf_finish(sb);
5596         sbuf_delete(sb);
5597         if (cam_debug_delay != 0)
5598                 DELAY(cam_debug_delay);
5599 }