]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/cam/cam_xpt.c
cam: Avoiding waking up doneq threads if we're dumping
[FreeBSD/FreeBSD.git] / sys / cam / cam_xpt.c
1 /*-
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
5  *
6  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
7  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions, and the following disclaimer,
15  *    without modification, immediately at the beginning of the file.
16  * 2. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31
32 #include "opt_printf.h"
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include <sys/param.h>
38 #include <sys/bio.h>
39 #include <sys/bus.h>
40 #include <sys/systm.h>
41 #include <sys/types.h>
42 #include <sys/malloc.h>
43 #include <sys/kernel.h>
44 #include <sys/time.h>
45 #include <sys/conf.h>
46 #include <sys/fcntl.h>
47 #include <sys/proc.h>
48 #include <sys/sbuf.h>
49 #include <sys/smp.h>
50 #include <sys/taskqueue.h>
51
52 #include <sys/lock.h>
53 #include <sys/mutex.h>
54 #include <sys/sysctl.h>
55 #include <sys/kthread.h>
56
57 #include <cam/cam.h>
58 #include <cam/cam_ccb.h>
59 #include <cam/cam_iosched.h>
60 #include <cam/cam_periph.h>
61 #include <cam/cam_queue.h>
62 #include <cam/cam_sim.h>
63 #include <cam/cam_xpt.h>
64 #include <cam/cam_xpt_sim.h>
65 #include <cam/cam_xpt_periph.h>
66 #include <cam/cam_xpt_internal.h>
67 #include <cam/cam_debug.h>
68 #include <cam/cam_compat.h>
69
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #include <cam/scsi/scsi_pass.h>
73
74 #include <machine/stdarg.h>     /* for xpt_print below */
75
76 #include "opt_cam.h"
77
78 /* Wild guess based on not wanting to grow the stack too much */
79 #define XPT_PRINT_MAXLEN        512
80 #ifdef PRINTF_BUFR_SIZE
81 #define XPT_PRINT_LEN   PRINTF_BUFR_SIZE
82 #else
83 #define XPT_PRINT_LEN   128
84 #endif
85 _Static_assert(XPT_PRINT_LEN <= XPT_PRINT_MAXLEN, "XPT_PRINT_LEN is too large");
86
87 /*
88  * This is the maximum number of high powered commands (e.g. start unit)
89  * that can be outstanding at a particular time.
90  */
91 #ifndef CAM_MAX_HIGHPOWER
92 #define CAM_MAX_HIGHPOWER  4
93 #endif
94
95 /* Datastructures internal to the xpt layer */
96 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
97 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices");
98 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs");
99 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths");
100
101 struct xpt_softc {
102         uint32_t                xpt_generation;
103
104         /* number of high powered commands that can go through right now */
105         struct mtx              xpt_highpower_lock;
106         STAILQ_HEAD(highpowerlist, cam_ed)      highpowerq;
107         int                     num_highpower;
108
109         /* queue for handling async rescan requests. */
110         TAILQ_HEAD(, ccb_hdr) ccb_scanq;
111         int buses_to_config;
112         int buses_config_done;
113         int announce_nosbuf;
114
115         /*
116          * Registered buses
117          *
118          * N.B., "busses" is an archaic spelling of "buses".  In new code
119          * "buses" is preferred.
120          */
121         TAILQ_HEAD(,cam_eb)     xpt_busses;
122         u_int                   bus_generation;
123
124         int                     boot_delay;
125         struct callout          boot_callout;
126         struct task             boot_task;
127         struct root_hold_token  xpt_rootmount;
128
129         struct mtx              xpt_topo_lock;
130         struct taskqueue        *xpt_taskq;
131 };
132
133 typedef enum {
134         DM_RET_COPY             = 0x01,
135         DM_RET_FLAG_MASK        = 0x0f,
136         DM_RET_NONE             = 0x00,
137         DM_RET_STOP             = 0x10,
138         DM_RET_DESCEND          = 0x20,
139         DM_RET_ERROR            = 0x30,
140         DM_RET_ACTION_MASK      = 0xf0
141 } dev_match_ret;
142
143 typedef enum {
144         XPT_DEPTH_BUS,
145         XPT_DEPTH_TARGET,
146         XPT_DEPTH_DEVICE,
147         XPT_DEPTH_PERIPH
148 } xpt_traverse_depth;
149
150 struct xpt_traverse_config {
151         xpt_traverse_depth      depth;
152         void                    *tr_func;
153         void                    *tr_arg;
154 };
155
156 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
157 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
158 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
159 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
160 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
161
162 /* Transport layer configuration information */
163 static struct xpt_softc xsoftc;
164
165 MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF);
166
167 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
168            &xsoftc.boot_delay, 0, "Bus registration wait time");
169 SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD,
170             &xsoftc.xpt_generation, 0, "CAM peripheral generation count");
171 SYSCTL_INT(_kern_cam, OID_AUTO, announce_nosbuf, CTLFLAG_RWTUN,
172             &xsoftc.announce_nosbuf, 0, "Don't use sbuf for announcements");
173
174 struct cam_doneq {
175         struct mtx_padalign     cam_doneq_mtx;
176         STAILQ_HEAD(, ccb_hdr)  cam_doneq;
177         int                     cam_doneq_sleep;
178 };
179
180 static struct cam_doneq cam_doneqs[MAXCPU];
181 static u_int __read_mostly cam_num_doneqs;
182 static struct proc *cam_proc;
183
184 SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
185            &cam_num_doneqs, 0, "Number of completion queues/threads");
186
187 struct cam_periph *xpt_periph;
188
189 static periph_init_t xpt_periph_init;
190
191 static struct periph_driver xpt_driver =
192 {
193         xpt_periph_init, "xpt",
194         TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
195         CAM_PERIPH_DRV_EARLY
196 };
197
198 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
199
200 static d_open_t xptopen;
201 static d_close_t xptclose;
202 static d_ioctl_t xptioctl;
203 static d_ioctl_t xptdoioctl;
204
205 static struct cdevsw xpt_cdevsw = {
206         .d_version =    D_VERSION,
207         .d_flags =      0,
208         .d_open =       xptopen,
209         .d_close =      xptclose,
210         .d_ioctl =      xptioctl,
211         .d_name =       "xpt",
212 };
213
214 /* Storage for debugging datastructures */
215 struct cam_path *cam_dpath;
216 u_int32_t __read_mostly cam_dflags = CAM_DEBUG_FLAGS;
217 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN,
218         &cam_dflags, 0, "Enabled debug flags");
219 u_int32_t cam_debug_delay = CAM_DEBUG_DELAY;
220 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN,
221         &cam_debug_delay, 0, "Delay in us after each debug message");
222
223 /* Our boot-time initialization hook */
224 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
225
226 static moduledata_t cam_moduledata = {
227         "cam",
228         cam_module_event_handler,
229         NULL
230 };
231
232 static int      xpt_init(void *);
233
234 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
235 MODULE_VERSION(cam, 1);
236
237 static void             xpt_async_bcast(struct async_list *async_head,
238                                         u_int32_t async_code,
239                                         struct cam_path *path,
240                                         void *async_arg);
241 static path_id_t xptnextfreepathid(void);
242 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
243 static union ccb *xpt_get_ccb(struct cam_periph *periph);
244 static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph);
245 static void      xpt_run_allocq(struct cam_periph *periph, int sleep);
246 static void      xpt_run_allocq_task(void *context, int pending);
247 static void      xpt_run_devq(struct cam_devq *devq);
248 static callout_func_t xpt_release_devq_timeout;
249 static void      xpt_acquire_bus(struct cam_eb *bus);
250 static void      xpt_release_bus(struct cam_eb *bus);
251 static uint32_t  xpt_freeze_devq_device(struct cam_ed *dev, u_int count);
252 static int       xpt_release_devq_device(struct cam_ed *dev, u_int count,
253                     int run_queue);
254 static struct cam_et*
255                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
256 static void      xpt_acquire_target(struct cam_et *target);
257 static void      xpt_release_target(struct cam_et *target);
258 static struct cam_eb*
259                  xpt_find_bus(path_id_t path_id);
260 static struct cam_et*
261                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
262 static struct cam_ed*
263                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
264 static void      xpt_config(void *arg);
265 static void      xpt_hold_boot_locked(void);
266 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
267                                  u_int32_t new_priority);
268 static xpt_devicefunc_t xptpassannouncefunc;
269 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
270 static void      xptpoll(struct cam_sim *sim);
271 static void      camisr_runqueue(void);
272 static void      xpt_done_process(struct ccb_hdr *ccb_h);
273 static void      xpt_done_td(void *);
274 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
275                                     u_int num_patterns, struct cam_eb *bus);
276 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
277                                        u_int num_patterns,
278                                        struct cam_ed *device);
279 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
280                                        u_int num_patterns,
281                                        struct cam_periph *periph);
282 static xpt_busfunc_t    xptedtbusfunc;
283 static xpt_targetfunc_t xptedttargetfunc;
284 static xpt_devicefunc_t xptedtdevicefunc;
285 static xpt_periphfunc_t xptedtperiphfunc;
286 static xpt_pdrvfunc_t   xptplistpdrvfunc;
287 static xpt_periphfunc_t xptplistperiphfunc;
288 static int              xptedtmatch(struct ccb_dev_match *cdm);
289 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
290 static int              xptbustraverse(struct cam_eb *start_bus,
291                                        xpt_busfunc_t *tr_func, void *arg);
292 static int              xpttargettraverse(struct cam_eb *bus,
293                                           struct cam_et *start_target,
294                                           xpt_targetfunc_t *tr_func, void *arg);
295 static int              xptdevicetraverse(struct cam_et *target,
296                                           struct cam_ed *start_device,
297                                           xpt_devicefunc_t *tr_func, void *arg);
298 static int              xptperiphtraverse(struct cam_ed *device,
299                                           struct cam_periph *start_periph,
300                                           xpt_periphfunc_t *tr_func, void *arg);
301 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
302                                         xpt_pdrvfunc_t *tr_func, void *arg);
303 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
304                                             struct cam_periph *start_periph,
305                                             xpt_periphfunc_t *tr_func,
306                                             void *arg);
307 static xpt_busfunc_t    xptdefbusfunc;
308 static xpt_targetfunc_t xptdeftargetfunc;
309 static xpt_devicefunc_t xptdefdevicefunc;
310 static xpt_periphfunc_t xptdefperiphfunc;
311 static void             xpt_finishconfig_task(void *context, int pending);
312 static void             xpt_dev_async_default(u_int32_t async_code,
313                                               struct cam_eb *bus,
314                                               struct cam_et *target,
315                                               struct cam_ed *device,
316                                               void *async_arg);
317 static struct cam_ed *  xpt_alloc_device_default(struct cam_eb *bus,
318                                                  struct cam_et *target,
319                                                  lun_id_t lun_id);
320 static xpt_devicefunc_t xptsetasyncfunc;
321 static xpt_busfunc_t    xptsetasyncbusfunc;
322 static cam_status       xptregister(struct cam_periph *periph,
323                                     void *arg);
324
325 static __inline int
326 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
327 {
328         int     retval;
329
330         mtx_assert(&devq->send_mtx, MA_OWNED);
331         if ((dev->ccbq.queue.entries > 0) &&
332             (dev->ccbq.dev_openings > 0) &&
333             (dev->ccbq.queue.qfrozen_cnt == 0)) {
334                 /*
335                  * The priority of a device waiting for controller
336                  * resources is that of the highest priority CCB
337                  * enqueued.
338                  */
339                 retval =
340                     xpt_schedule_dev(&devq->send_queue,
341                                      &dev->devq_entry,
342                                      CAMQ_GET_PRIO(&dev->ccbq.queue));
343         } else {
344                 retval = 0;
345         }
346         return (retval);
347 }
348
349 static __inline int
350 device_is_queued(struct cam_ed *device)
351 {
352         return (device->devq_entry.index != CAM_UNQUEUED_INDEX);
353 }
354
355 static void
356 xpt_periph_init(void)
357 {
358         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
359 }
360
361 static int
362 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
363 {
364
365         /*
366          * Only allow read-write access.
367          */
368         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
369                 return(EPERM);
370
371         /*
372          * We don't allow nonblocking access.
373          */
374         if ((flags & O_NONBLOCK) != 0) {
375                 printf("%s: can't do nonblocking access\n", devtoname(dev));
376                 return(ENODEV);
377         }
378
379         return(0);
380 }
381
382 static int
383 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
384 {
385
386         return(0);
387 }
388
389 /*
390  * Don't automatically grab the xpt softc lock here even though this is going
391  * through the xpt device.  The xpt device is really just a back door for
392  * accessing other devices and SIMs, so the right thing to do is to grab
393  * the appropriate SIM lock once the bus/SIM is located.
394  */
395 static int
396 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
397 {
398         int error;
399
400         if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
401                 error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl);
402         }
403         return (error);
404 }
405
406 static int
407 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
408 {
409         int error;
410
411         error = 0;
412
413         switch(cmd) {
414         /*
415          * For the transport layer CAMIOCOMMAND ioctl, we really only want
416          * to accept CCB types that don't quite make sense to send through a
417          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
418          * in the CAM spec.
419          */
420         case CAMIOCOMMAND: {
421                 union ccb *ccb;
422                 union ccb *inccb;
423                 struct cam_eb *bus;
424
425                 inccb = (union ccb *)addr;
426 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
427                 if (inccb->ccb_h.func_code == XPT_SCSI_IO)
428                         inccb->csio.bio = NULL;
429 #endif
430
431                 if (inccb->ccb_h.flags & CAM_UNLOCKED)
432                         return (EINVAL);
433
434                 bus = xpt_find_bus(inccb->ccb_h.path_id);
435                 if (bus == NULL)
436                         return (EINVAL);
437
438                 switch (inccb->ccb_h.func_code) {
439                 case XPT_SCAN_BUS:
440                 case XPT_RESET_BUS:
441                         if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
442                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
443                                 xpt_release_bus(bus);
444                                 return (EINVAL);
445                         }
446                         break;
447                 case XPT_SCAN_TGT:
448                         if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
449                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
450                                 xpt_release_bus(bus);
451                                 return (EINVAL);
452                         }
453                         break;
454                 default:
455                         break;
456                 }
457
458                 switch(inccb->ccb_h.func_code) {
459                 case XPT_SCAN_BUS:
460                 case XPT_RESET_BUS:
461                 case XPT_PATH_INQ:
462                 case XPT_ENG_INQ:
463                 case XPT_SCAN_LUN:
464                 case XPT_SCAN_TGT:
465
466                         ccb = xpt_alloc_ccb();
467
468                         /*
469                          * Create a path using the bus, target, and lun the
470                          * user passed in.
471                          */
472                         if (xpt_create_path(&ccb->ccb_h.path, NULL,
473                                             inccb->ccb_h.path_id,
474                                             inccb->ccb_h.target_id,
475                                             inccb->ccb_h.target_lun) !=
476                                             CAM_REQ_CMP){
477                                 error = EINVAL;
478                                 xpt_free_ccb(ccb);
479                                 break;
480                         }
481                         /* Ensure all of our fields are correct */
482                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
483                                       inccb->ccb_h.pinfo.priority);
484                         xpt_merge_ccb(ccb, inccb);
485                         xpt_path_lock(ccb->ccb_h.path);
486                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
487                         xpt_path_unlock(ccb->ccb_h.path);
488                         bcopy(ccb, inccb, sizeof(union ccb));
489                         xpt_free_path(ccb->ccb_h.path);
490                         xpt_free_ccb(ccb);
491                         break;
492
493                 case XPT_DEBUG: {
494                         union ccb ccb;
495
496                         /*
497                          * This is an immediate CCB, so it's okay to
498                          * allocate it on the stack.
499                          */
500
501                         /*
502                          * Create a path using the bus, target, and lun the
503                          * user passed in.
504                          */
505                         if (xpt_create_path(&ccb.ccb_h.path, NULL,
506                                             inccb->ccb_h.path_id,
507                                             inccb->ccb_h.target_id,
508                                             inccb->ccb_h.target_lun) !=
509                                             CAM_REQ_CMP){
510                                 error = EINVAL;
511                                 break;
512                         }
513                         /* Ensure all of our fields are correct */
514                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
515                                       inccb->ccb_h.pinfo.priority);
516                         xpt_merge_ccb(&ccb, inccb);
517                         xpt_action(&ccb);
518                         bcopy(&ccb, inccb, sizeof(union ccb));
519                         xpt_free_path(ccb.ccb_h.path);
520                         break;
521                 }
522                 case XPT_DEV_MATCH: {
523                         struct cam_periph_map_info mapinfo;
524                         struct cam_path *old_path;
525
526                         /*
527                          * We can't deal with physical addresses for this
528                          * type of transaction.
529                          */
530                         if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
531                             CAM_DATA_VADDR) {
532                                 error = EINVAL;
533                                 break;
534                         }
535
536                         /*
537                          * Save this in case the caller had it set to
538                          * something in particular.
539                          */
540                         old_path = inccb->ccb_h.path;
541
542                         /*
543                          * We really don't need a path for the matching
544                          * code.  The path is needed because of the
545                          * debugging statements in xpt_action().  They
546                          * assume that the CCB has a valid path.
547                          */
548                         inccb->ccb_h.path = xpt_periph->path;
549
550                         bzero(&mapinfo, sizeof(mapinfo));
551
552                         /*
553                          * Map the pattern and match buffers into kernel
554                          * virtual address space.
555                          */
556                         error = cam_periph_mapmem(inccb, &mapinfo, maxphys);
557
558                         if (error) {
559                                 inccb->ccb_h.path = old_path;
560                                 break;
561                         }
562
563                         /*
564                          * This is an immediate CCB, we can send it on directly.
565                          */
566                         xpt_action(inccb);
567
568                         /*
569                          * Map the buffers back into user space.
570                          */
571                         cam_periph_unmapmem(inccb, &mapinfo);
572
573                         inccb->ccb_h.path = old_path;
574
575                         error = 0;
576                         break;
577                 }
578                 default:
579                         error = ENOTSUP;
580                         break;
581                 }
582                 xpt_release_bus(bus);
583                 break;
584         }
585         /*
586          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
587          * with the periphal driver name and unit name filled in.  The other
588          * fields don't really matter as input.  The passthrough driver name
589          * ("pass"), and unit number are passed back in the ccb.  The current
590          * device generation number, and the index into the device peripheral
591          * driver list, and the status are also passed back.  Note that
592          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
593          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
594          * (or rather should be) impossible for the device peripheral driver
595          * list to change since we look at the whole thing in one pass, and
596          * we do it with lock protection.
597          *
598          */
599         case CAMGETPASSTHRU: {
600                 union ccb *ccb;
601                 struct cam_periph *periph;
602                 struct periph_driver **p_drv;
603                 char   *name;
604                 u_int unit;
605                 int base_periph_found;
606
607                 ccb = (union ccb *)addr;
608                 unit = ccb->cgdl.unit_number;
609                 name = ccb->cgdl.periph_name;
610                 base_periph_found = 0;
611 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
612                 if (ccb->ccb_h.func_code == XPT_SCSI_IO)
613                         ccb->csio.bio = NULL;
614 #endif
615
616                 /*
617                  * Sanity check -- make sure we don't get a null peripheral
618                  * driver name.
619                  */
620                 if (*ccb->cgdl.periph_name == '\0') {
621                         error = EINVAL;
622                         break;
623                 }
624
625                 /* Keep the list from changing while we traverse it */
626                 xpt_lock_buses();
627
628                 /* first find our driver in the list of drivers */
629                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
630                         if (strcmp((*p_drv)->driver_name, name) == 0)
631                                 break;
632
633                 if (*p_drv == NULL) {
634                         xpt_unlock_buses();
635                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
636                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
637                         *ccb->cgdl.periph_name = '\0';
638                         ccb->cgdl.unit_number = 0;
639                         error = ENOENT;
640                         break;
641                 }
642
643                 /*
644                  * Run through every peripheral instance of this driver
645                  * and check to see whether it matches the unit passed
646                  * in by the user.  If it does, get out of the loops and
647                  * find the passthrough driver associated with that
648                  * peripheral driver.
649                  */
650                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
651                      periph = TAILQ_NEXT(periph, unit_links)) {
652                         if (periph->unit_number == unit)
653                                 break;
654                 }
655                 /*
656                  * If we found the peripheral driver that the user passed
657                  * in, go through all of the peripheral drivers for that
658                  * particular device and look for a passthrough driver.
659                  */
660                 if (periph != NULL) {
661                         struct cam_ed *device;
662                         int i;
663
664                         base_periph_found = 1;
665                         device = periph->path->device;
666                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
667                              periph != NULL;
668                              periph = SLIST_NEXT(periph, periph_links), i++) {
669                                 /*
670                                  * Check to see whether we have a
671                                  * passthrough device or not.
672                                  */
673                                 if (strcmp(periph->periph_name, "pass") == 0) {
674                                         /*
675                                          * Fill in the getdevlist fields.
676                                          */
677                                         strlcpy(ccb->cgdl.periph_name,
678                                                periph->periph_name,
679                                                sizeof(ccb->cgdl.periph_name));
680                                         ccb->cgdl.unit_number =
681                                                 periph->unit_number;
682                                         if (SLIST_NEXT(periph, periph_links))
683                                                 ccb->cgdl.status =
684                                                         CAM_GDEVLIST_MORE_DEVS;
685                                         else
686                                                 ccb->cgdl.status =
687                                                        CAM_GDEVLIST_LAST_DEVICE;
688                                         ccb->cgdl.generation =
689                                                 device->generation;
690                                         ccb->cgdl.index = i;
691                                         /*
692                                          * Fill in some CCB header fields
693                                          * that the user may want.
694                                          */
695                                         ccb->ccb_h.path_id =
696                                                 periph->path->bus->path_id;
697                                         ccb->ccb_h.target_id =
698                                                 periph->path->target->target_id;
699                                         ccb->ccb_h.target_lun =
700                                                 periph->path->device->lun_id;
701                                         ccb->ccb_h.status = CAM_REQ_CMP;
702                                         break;
703                                 }
704                         }
705                 }
706
707                 /*
708                  * If the periph is null here, one of two things has
709                  * happened.  The first possibility is that we couldn't
710                  * find the unit number of the particular peripheral driver
711                  * that the user is asking about.  e.g. the user asks for
712                  * the passthrough driver for "da11".  We find the list of
713                  * "da" peripherals all right, but there is no unit 11.
714                  * The other possibility is that we went through the list
715                  * of peripheral drivers attached to the device structure,
716                  * but didn't find one with the name "pass".  Either way,
717                  * we return ENOENT, since we couldn't find something.
718                  */
719                 if (periph == NULL) {
720                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
721                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
722                         *ccb->cgdl.periph_name = '\0';
723                         ccb->cgdl.unit_number = 0;
724                         error = ENOENT;
725                         /*
726                          * It is unfortunate that this is even necessary,
727                          * but there are many, many clueless users out there.
728                          * If this is true, the user is looking for the
729                          * passthrough driver, but doesn't have one in his
730                          * kernel.
731                          */
732                         if (base_periph_found == 1) {
733                                 printf("xptioctl: pass driver is not in the "
734                                        "kernel\n");
735                                 printf("xptioctl: put \"device pass\" in "
736                                        "your kernel config file\n");
737                         }
738                 }
739                 xpt_unlock_buses();
740                 break;
741                 }
742         default:
743                 error = ENOTTY;
744                 break;
745         }
746
747         return(error);
748 }
749
750 static int
751 cam_module_event_handler(module_t mod, int what, void *arg)
752 {
753         int error;
754
755         switch (what) {
756         case MOD_LOAD:
757                 if ((error = xpt_init(NULL)) != 0)
758                         return (error);
759                 break;
760         case MOD_UNLOAD:
761                 return EBUSY;
762         default:
763                 return EOPNOTSUPP;
764         }
765
766         return 0;
767 }
768
769 static struct xpt_proto *
770 xpt_proto_find(cam_proto proto)
771 {
772         struct xpt_proto **pp;
773
774         SET_FOREACH(pp, cam_xpt_proto_set) {
775                 if ((*pp)->proto == proto)
776                         return *pp;
777         }
778
779         return NULL;
780 }
781
782 static void
783 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
784 {
785
786         if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
787                 xpt_free_path(done_ccb->ccb_h.path);
788                 xpt_free_ccb(done_ccb);
789         } else {
790                 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
791                 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
792         }
793         xpt_release_boot();
794 }
795
796 /* thread to handle bus rescans */
797 static void
798 xpt_scanner_thread(void *dummy)
799 {
800         union ccb       *ccb;
801         struct mtx      *mtx;
802         struct cam_ed   *device;
803
804         xpt_lock_buses();
805         for (;;) {
806                 if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
807                         msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
808                                "-", 0);
809                 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
810                         TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
811                         xpt_unlock_buses();
812
813                         /*
814                          * We need to lock the device's mutex which we use as
815                          * the path mutex. We can't do it directly because the
816                          * cam_path in the ccb may wind up going away because
817                          * the path lock may be dropped and the path retired in
818                          * the completion callback. We do this directly to keep
819                          * the reference counts in cam_path sane. We also have
820                          * to copy the device pointer because ccb_h.path may
821                          * be freed in the callback.
822                          */
823                         mtx = xpt_path_mtx(ccb->ccb_h.path);
824                         device = ccb->ccb_h.path->device;
825                         xpt_acquire_device(device);
826                         mtx_lock(mtx);
827                         xpt_action(ccb);
828                         mtx_unlock(mtx);
829                         xpt_release_device(device);
830
831                         xpt_lock_buses();
832                 }
833         }
834 }
835
836 void
837 xpt_rescan(union ccb *ccb)
838 {
839         struct ccb_hdr *hdr;
840
841         /* Prepare request */
842         if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
843             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
844                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
845         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
846             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
847                 ccb->ccb_h.func_code = XPT_SCAN_TGT;
848         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
849             ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
850                 ccb->ccb_h.func_code = XPT_SCAN_LUN;
851         else {
852                 xpt_print(ccb->ccb_h.path, "illegal scan path\n");
853                 xpt_free_path(ccb->ccb_h.path);
854                 xpt_free_ccb(ccb);
855                 return;
856         }
857         CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
858             ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code,
859                 xpt_action_name(ccb->ccb_h.func_code)));
860
861         ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
862         ccb->ccb_h.cbfcnp = xpt_rescan_done;
863         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
864         /* Don't make duplicate entries for the same paths. */
865         xpt_lock_buses();
866         if (ccb->ccb_h.ppriv_ptr1 == NULL) {
867                 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
868                         if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
869                                 wakeup(&xsoftc.ccb_scanq);
870                                 xpt_unlock_buses();
871                                 xpt_print(ccb->ccb_h.path, "rescan already queued\n");
872                                 xpt_free_path(ccb->ccb_h.path);
873                                 xpt_free_ccb(ccb);
874                                 return;
875                         }
876                 }
877         }
878         TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
879         xpt_hold_boot_locked();
880         wakeup(&xsoftc.ccb_scanq);
881         xpt_unlock_buses();
882 }
883
884 /* Functions accessed by the peripheral drivers */
885 static int
886 xpt_init(void *dummy)
887 {
888         struct cam_sim *xpt_sim;
889         struct cam_path *path;
890         struct cam_devq *devq;
891         cam_status status;
892         int error, i;
893
894         TAILQ_INIT(&xsoftc.xpt_busses);
895         TAILQ_INIT(&xsoftc.ccb_scanq);
896         STAILQ_INIT(&xsoftc.highpowerq);
897         xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
898
899         mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF);
900         xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK,
901             taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq);
902
903 #ifdef CAM_BOOT_DELAY
904         /*
905          * Override this value at compile time to assist our users
906          * who don't use loader to boot a kernel.
907          */
908         xsoftc.boot_delay = CAM_BOOT_DELAY;
909 #endif
910
911         /*
912          * The xpt layer is, itself, the equivalent of a SIM.
913          * Allow 16 ccbs in the ccb pool for it.  This should
914          * give decent parallelism when we probe buses and
915          * perform other XPT functions.
916          */
917         devq = cam_simq_alloc(16);
918         xpt_sim = cam_sim_alloc(xptaction,
919                                 xptpoll,
920                                 "xpt",
921                                 /*softc*/NULL,
922                                 /*unit*/0,
923                                 /*mtx*/NULL,
924                                 /*max_dev_transactions*/0,
925                                 /*max_tagged_dev_transactions*/0,
926                                 devq);
927         if (xpt_sim == NULL)
928                 return (ENOMEM);
929
930         if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
931                 printf("xpt_init: xpt_bus_register failed with status %#x,"
932                        " failing attach\n", status);
933                 return (EINVAL);
934         }
935
936         /*
937          * Looking at the XPT from the SIM layer, the XPT is
938          * the equivalent of a peripheral driver.  Allocate
939          * a peripheral driver entry for us.
940          */
941         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
942                                       CAM_TARGET_WILDCARD,
943                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
944                 printf("xpt_init: xpt_create_path failed with status %#x,"
945                        " failing attach\n", status);
946                 return (EINVAL);
947         }
948         xpt_path_lock(path);
949         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
950                          path, NULL, 0, xpt_sim);
951         xpt_path_unlock(path);
952         xpt_free_path(path);
953
954         if (cam_num_doneqs < 1)
955                 cam_num_doneqs = 1 + mp_ncpus / 6;
956         else if (cam_num_doneqs > MAXCPU)
957                 cam_num_doneqs = MAXCPU;
958         for (i = 0; i < cam_num_doneqs; i++) {
959                 mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL,
960                     MTX_DEF);
961                 STAILQ_INIT(&cam_doneqs[i].cam_doneq);
962                 error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i],
963                     &cam_proc, NULL, 0, 0, "cam", "doneq%d", i);
964                 if (error != 0) {
965                         cam_num_doneqs = i;
966                         break;
967                 }
968         }
969         if (cam_num_doneqs < 1) {
970                 printf("xpt_init: Cannot init completion queues "
971                        "- failing attach\n");
972                 return (ENOMEM);
973         }
974
975         /*
976          * Register a callback for when interrupts are enabled.
977          */
978         config_intrhook_oneshot(xpt_config, NULL);
979
980         return (0);
981 }
982
983 static cam_status
984 xptregister(struct cam_periph *periph, void *arg)
985 {
986         struct cam_sim *xpt_sim;
987
988         if (periph == NULL) {
989                 printf("xptregister: periph was NULL!!\n");
990                 return(CAM_REQ_CMP_ERR);
991         }
992
993         xpt_sim = (struct cam_sim *)arg;
994         xpt_sim->softc = periph;
995         xpt_periph = periph;
996         periph->softc = NULL;
997
998         return(CAM_REQ_CMP);
999 }
1000
1001 int32_t
1002 xpt_add_periph(struct cam_periph *periph)
1003 {
1004         struct cam_ed *device;
1005         int32_t  status;
1006
1007         TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph);
1008         device = periph->path->device;
1009         status = CAM_REQ_CMP;
1010         if (device != NULL) {
1011                 mtx_lock(&device->target->bus->eb_mtx);
1012                 device->generation++;
1013                 SLIST_INSERT_HEAD(&device->periphs, periph, periph_links);
1014                 mtx_unlock(&device->target->bus->eb_mtx);
1015                 atomic_add_32(&xsoftc.xpt_generation, 1);
1016         }
1017
1018         return (status);
1019 }
1020
1021 void
1022 xpt_remove_periph(struct cam_periph *periph)
1023 {
1024         struct cam_ed *device;
1025
1026         device = periph->path->device;
1027         if (device != NULL) {
1028                 mtx_lock(&device->target->bus->eb_mtx);
1029                 device->generation++;
1030                 SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links);
1031                 mtx_unlock(&device->target->bus->eb_mtx);
1032                 atomic_add_32(&xsoftc.xpt_generation, 1);
1033         }
1034 }
1035
1036 void
1037 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1038 {
1039         struct  cam_path *path = periph->path;
1040         struct  xpt_proto *proto;
1041
1042         cam_periph_assert(periph, MA_OWNED);
1043         periph->flags |= CAM_PERIPH_ANNOUNCED;
1044
1045         printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1046                periph->periph_name, periph->unit_number,
1047                path->bus->sim->sim_name,
1048                path->bus->sim->unit_number,
1049                path->bus->sim->bus_id,
1050                path->bus->path_id,
1051                path->target->target_id,
1052                (uintmax_t)path->device->lun_id);
1053         printf("%s%d: ", periph->periph_name, periph->unit_number);
1054         proto = xpt_proto_find(path->device->protocol);
1055         if (proto)
1056                 proto->ops->announce(path->device);
1057         else
1058                 printf("%s%d: Unknown protocol device %d\n",
1059                     periph->periph_name, periph->unit_number,
1060                     path->device->protocol);
1061         if (path->device->serial_num_len > 0) {
1062                 /* Don't wrap the screen  - print only the first 60 chars */
1063                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1064                        periph->unit_number, path->device->serial_num);
1065         }
1066         /* Announce transport details. */
1067         path->bus->xport->ops->announce(periph);
1068         /* Announce command queueing. */
1069         if (path->device->inq_flags & SID_CmdQue
1070          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1071                 printf("%s%d: Command Queueing enabled\n",
1072                        periph->periph_name, periph->unit_number);
1073         }
1074         /* Announce caller's details if they've passed in. */
1075         if (announce_string != NULL)
1076                 printf("%s%d: %s\n", periph->periph_name,
1077                        periph->unit_number, announce_string);
1078 }
1079
1080 void
1081 xpt_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb,
1082     char *announce_string)
1083 {
1084         struct  cam_path *path = periph->path;
1085         struct  xpt_proto *proto;
1086
1087         cam_periph_assert(periph, MA_OWNED);
1088         periph->flags |= CAM_PERIPH_ANNOUNCED;
1089
1090         /* Fall back to the non-sbuf method if necessary */
1091         if (xsoftc.announce_nosbuf != 0) {
1092                 xpt_announce_periph(periph, announce_string);
1093                 return;
1094         }
1095         proto = xpt_proto_find(path->device->protocol);
1096         if (((proto != NULL) && (proto->ops->announce_sbuf == NULL)) ||
1097             (path->bus->xport->ops->announce_sbuf == NULL)) {
1098                 xpt_announce_periph(periph, announce_string);
1099                 return;
1100         }
1101
1102         sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1103             periph->periph_name, periph->unit_number,
1104             path->bus->sim->sim_name,
1105             path->bus->sim->unit_number,
1106             path->bus->sim->bus_id,
1107             path->bus->path_id,
1108             path->target->target_id,
1109             (uintmax_t)path->device->lun_id);
1110         sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number);
1111
1112         if (proto)
1113                 proto->ops->announce_sbuf(path->device, sb);
1114         else
1115                 sbuf_printf(sb, "%s%d: Unknown protocol device %d\n",
1116                     periph->periph_name, periph->unit_number,
1117                     path->device->protocol);
1118         if (path->device->serial_num_len > 0) {
1119                 /* Don't wrap the screen  - print only the first 60 chars */
1120                 sbuf_printf(sb, "%s%d: Serial Number %.60s\n",
1121                     periph->periph_name, periph->unit_number,
1122                     path->device->serial_num);
1123         }
1124         /* Announce transport details. */
1125         path->bus->xport->ops->announce_sbuf(periph, sb);
1126         /* Announce command queueing. */
1127         if (path->device->inq_flags & SID_CmdQue
1128          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1129                 sbuf_printf(sb, "%s%d: Command Queueing enabled\n",
1130                     periph->periph_name, periph->unit_number);
1131         }
1132         /* Announce caller's details if they've passed in. */
1133         if (announce_string != NULL)
1134                 sbuf_printf(sb, "%s%d: %s\n", periph->periph_name,
1135                     periph->unit_number, announce_string);
1136 }
1137
1138 void
1139 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string)
1140 {
1141         if (quirks != 0) {
1142                 printf("%s%d: quirks=0x%b\n", periph->periph_name,
1143                     periph->unit_number, quirks, bit_string);
1144         }
1145 }
1146
1147 void
1148 xpt_announce_quirks_sbuf(struct cam_periph *periph, struct sbuf *sb,
1149                          int quirks, char *bit_string)
1150 {
1151         if (xsoftc.announce_nosbuf != 0) {
1152                 xpt_announce_quirks(periph, quirks, bit_string);
1153                 return;
1154         }
1155
1156         if (quirks != 0) {
1157                 sbuf_printf(sb, "%s%d: quirks=0x%b\n", periph->periph_name,
1158                     periph->unit_number, quirks, bit_string);
1159         }
1160 }
1161
1162 void
1163 xpt_denounce_periph(struct cam_periph *periph)
1164 {
1165         struct  cam_path *path = periph->path;
1166         struct  xpt_proto *proto;
1167
1168         cam_periph_assert(periph, MA_OWNED);
1169         printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1170                periph->periph_name, periph->unit_number,
1171                path->bus->sim->sim_name,
1172                path->bus->sim->unit_number,
1173                path->bus->sim->bus_id,
1174                path->bus->path_id,
1175                path->target->target_id,
1176                (uintmax_t)path->device->lun_id);
1177         printf("%s%d: ", periph->periph_name, periph->unit_number);
1178         proto = xpt_proto_find(path->device->protocol);
1179         if (proto)
1180                 proto->ops->denounce(path->device);
1181         else
1182                 printf("%s%d: Unknown protocol device %d\n",
1183                     periph->periph_name, periph->unit_number,
1184                     path->device->protocol);
1185         if (path->device->serial_num_len > 0)
1186                 printf(" s/n %.60s", path->device->serial_num);
1187         printf(" detached\n");
1188 }
1189
1190 void
1191 xpt_denounce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb)
1192 {
1193         struct cam_path *path = periph->path;
1194         struct xpt_proto *proto;
1195
1196         cam_periph_assert(periph, MA_OWNED);
1197
1198         /* Fall back to the non-sbuf method if necessary */
1199         if (xsoftc.announce_nosbuf != 0) {
1200                 xpt_denounce_periph(periph);
1201                 return;
1202         }
1203         proto = xpt_proto_find(path->device->protocol);
1204         if ((proto != NULL) && (proto->ops->denounce_sbuf == NULL)) {
1205                 xpt_denounce_periph(periph);
1206                 return;
1207         }
1208
1209         sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1210             periph->periph_name, periph->unit_number,
1211             path->bus->sim->sim_name,
1212             path->bus->sim->unit_number,
1213             path->bus->sim->bus_id,
1214             path->bus->path_id,
1215             path->target->target_id,
1216             (uintmax_t)path->device->lun_id);
1217         sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number);
1218
1219         if (proto)
1220                 proto->ops->denounce_sbuf(path->device, sb);
1221         else
1222                 sbuf_printf(sb, "%s%d: Unknown protocol device %d\n",
1223                     periph->periph_name, periph->unit_number,
1224                     path->device->protocol);
1225         if (path->device->serial_num_len > 0)
1226                 sbuf_printf(sb, " s/n %.60s", path->device->serial_num);
1227         sbuf_printf(sb, " detached\n");
1228 }
1229
1230 int
1231 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
1232 {
1233         int ret = -1, l, o;
1234         struct ccb_dev_advinfo cdai;
1235         struct scsi_vpd_device_id *did;
1236         struct scsi_vpd_id_descriptor *idd;
1237
1238         xpt_path_assert(path, MA_OWNED);
1239
1240         memset(&cdai, 0, sizeof(cdai));
1241         xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
1242         cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
1243         cdai.flags = CDAI_FLAG_NONE;
1244         cdai.bufsiz = len;
1245         cdai.buf = buf;
1246
1247         if (!strcmp(attr, "GEOM::ident"))
1248                 cdai.buftype = CDAI_TYPE_SERIAL_NUM;
1249         else if (!strcmp(attr, "GEOM::physpath"))
1250                 cdai.buftype = CDAI_TYPE_PHYS_PATH;
1251         else if (strcmp(attr, "GEOM::lunid") == 0 ||
1252                  strcmp(attr, "GEOM::lunname") == 0) {
1253                 cdai.buftype = CDAI_TYPE_SCSI_DEVID;
1254                 cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
1255                 cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT);
1256                 if (cdai.buf == NULL) {
1257                         ret = ENOMEM;
1258                         goto out;
1259                 }
1260         } else
1261                 goto out;
1262
1263         xpt_action((union ccb *)&cdai); /* can only be synchronous */
1264         if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
1265                 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
1266         if (cdai.provsiz == 0)
1267                 goto out;
1268         switch(cdai.buftype) {
1269         case CDAI_TYPE_SCSI_DEVID:
1270                 did = (struct scsi_vpd_device_id *)cdai.buf;
1271                 if (strcmp(attr, "GEOM::lunid") == 0) {
1272                         idd = scsi_get_devid(did, cdai.provsiz,
1273                             scsi_devid_is_lun_naa);
1274                         if (idd == NULL)
1275                                 idd = scsi_get_devid(did, cdai.provsiz,
1276                                     scsi_devid_is_lun_eui64);
1277                         if (idd == NULL)
1278                                 idd = scsi_get_devid(did, cdai.provsiz,
1279                                     scsi_devid_is_lun_uuid);
1280                         if (idd == NULL)
1281                                 idd = scsi_get_devid(did, cdai.provsiz,
1282                                     scsi_devid_is_lun_md5);
1283                 } else
1284                         idd = NULL;
1285
1286                 if (idd == NULL)
1287                         idd = scsi_get_devid(did, cdai.provsiz,
1288                             scsi_devid_is_lun_t10);
1289                 if (idd == NULL)
1290                         idd = scsi_get_devid(did, cdai.provsiz,
1291                             scsi_devid_is_lun_name);
1292                 if (idd == NULL)
1293                         break;
1294
1295                 ret = 0;
1296                 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) ==
1297                     SVPD_ID_CODESET_ASCII) {
1298                         if (idd->length < len) {
1299                                 for (l = 0; l < idd->length; l++)
1300                                         buf[l] = idd->identifier[l] ?
1301                                             idd->identifier[l] : ' ';
1302                                 buf[l] = 0;
1303                         } else
1304                                 ret = EFAULT;
1305                         break;
1306                 }
1307                 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) ==
1308                     SVPD_ID_CODESET_UTF8) {
1309                         l = strnlen(idd->identifier, idd->length);
1310                         if (l < len) {
1311                                 bcopy(idd->identifier, buf, l);
1312                                 buf[l] = 0;
1313                         } else
1314                                 ret = EFAULT;
1315                         break;
1316                 }
1317                 if ((idd->id_type & SVPD_ID_TYPE_MASK) ==
1318                     SVPD_ID_TYPE_UUID && idd->identifier[0] == 0x10) {
1319                         if ((idd->length - 2) * 2 + 4 >= len) {
1320                                 ret = EFAULT;
1321                                 break;
1322                         }
1323                         for (l = 2, o = 0; l < idd->length; l++) {
1324                                 if (l == 6 || l == 8 || l == 10 || l == 12)
1325                                     o += sprintf(buf + o, "-");
1326                                 o += sprintf(buf + o, "%02x",
1327                                     idd->identifier[l]);
1328                         }
1329                         break;
1330                 }
1331                 if (idd->length * 2 < len) {
1332                         for (l = 0; l < idd->length; l++)
1333                                 sprintf(buf + l * 2, "%02x",
1334                                     idd->identifier[l]);
1335                 } else
1336                                 ret = EFAULT;
1337                 break;
1338         default:
1339                 if (cdai.provsiz < len) {
1340                         cdai.buf[cdai.provsiz] = 0;
1341                         ret = 0;
1342                 } else
1343                         ret = EFAULT;
1344                 break;
1345         }
1346
1347 out:
1348         if ((char *)cdai.buf != buf)
1349                 free(cdai.buf, M_CAMXPT);
1350         return ret;
1351 }
1352
1353 static dev_match_ret
1354 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1355             struct cam_eb *bus)
1356 {
1357         dev_match_ret retval;
1358         u_int i;
1359
1360         retval = DM_RET_NONE;
1361
1362         /*
1363          * If we aren't given something to match against, that's an error.
1364          */
1365         if (bus == NULL)
1366                 return(DM_RET_ERROR);
1367
1368         /*
1369          * If there are no match entries, then this bus matches no
1370          * matter what.
1371          */
1372         if ((patterns == NULL) || (num_patterns == 0))
1373                 return(DM_RET_DESCEND | DM_RET_COPY);
1374
1375         for (i = 0; i < num_patterns; i++) {
1376                 struct bus_match_pattern *cur_pattern;
1377
1378                 /*
1379                  * If the pattern in question isn't for a bus node, we
1380                  * aren't interested.  However, we do indicate to the
1381                  * calling routine that we should continue descending the
1382                  * tree, since the user wants to match against lower-level
1383                  * EDT elements.
1384                  */
1385                 if (patterns[i].type != DEV_MATCH_BUS) {
1386                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1387                                 retval |= DM_RET_DESCEND;
1388                         continue;
1389                 }
1390
1391                 cur_pattern = &patterns[i].pattern.bus_pattern;
1392
1393                 /*
1394                  * If they want to match any bus node, we give them any
1395                  * device node.
1396                  */
1397                 if (cur_pattern->flags == BUS_MATCH_ANY) {
1398                         /* set the copy flag */
1399                         retval |= DM_RET_COPY;
1400
1401                         /*
1402                          * If we've already decided on an action, go ahead
1403                          * and return.
1404                          */
1405                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1406                                 return(retval);
1407                 }
1408
1409                 /*
1410                  * Not sure why someone would do this...
1411                  */
1412                 if (cur_pattern->flags == BUS_MATCH_NONE)
1413                         continue;
1414
1415                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1416                  && (cur_pattern->path_id != bus->path_id))
1417                         continue;
1418
1419                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1420                  && (cur_pattern->bus_id != bus->sim->bus_id))
1421                         continue;
1422
1423                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1424                  && (cur_pattern->unit_number != bus->sim->unit_number))
1425                         continue;
1426
1427                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1428                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1429                              DEV_IDLEN) != 0))
1430                         continue;
1431
1432                 /*
1433                  * If we get to this point, the user definitely wants
1434                  * information on this bus.  So tell the caller to copy the
1435                  * data out.
1436                  */
1437                 retval |= DM_RET_COPY;
1438
1439                 /*
1440                  * If the return action has been set to descend, then we
1441                  * know that we've already seen a non-bus matching
1442                  * expression, therefore we need to further descend the tree.
1443                  * This won't change by continuing around the loop, so we
1444                  * go ahead and return.  If we haven't seen a non-bus
1445                  * matching expression, we keep going around the loop until
1446                  * we exhaust the matching expressions.  We'll set the stop
1447                  * flag once we fall out of the loop.
1448                  */
1449                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1450                         return(retval);
1451         }
1452
1453         /*
1454          * If the return action hasn't been set to descend yet, that means
1455          * we haven't seen anything other than bus matching patterns.  So
1456          * tell the caller to stop descending the tree -- the user doesn't
1457          * want to match against lower level tree elements.
1458          */
1459         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1460                 retval |= DM_RET_STOP;
1461
1462         return(retval);
1463 }
1464
1465 static dev_match_ret
1466 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1467                struct cam_ed *device)
1468 {
1469         dev_match_ret retval;
1470         u_int i;
1471
1472         retval = DM_RET_NONE;
1473
1474         /*
1475          * If we aren't given something to match against, that's an error.
1476          */
1477         if (device == NULL)
1478                 return(DM_RET_ERROR);
1479
1480         /*
1481          * If there are no match entries, then this device matches no
1482          * matter what.
1483          */
1484         if ((patterns == NULL) || (num_patterns == 0))
1485                 return(DM_RET_DESCEND | DM_RET_COPY);
1486
1487         for (i = 0; i < num_patterns; i++) {
1488                 struct device_match_pattern *cur_pattern;
1489                 struct scsi_vpd_device_id *device_id_page;
1490
1491                 /*
1492                  * If the pattern in question isn't for a device node, we
1493                  * aren't interested.
1494                  */
1495                 if (patterns[i].type != DEV_MATCH_DEVICE) {
1496                         if ((patterns[i].type == DEV_MATCH_PERIPH)
1497                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1498                                 retval |= DM_RET_DESCEND;
1499                         continue;
1500                 }
1501
1502                 cur_pattern = &patterns[i].pattern.device_pattern;
1503
1504                 /* Error out if mutually exclusive options are specified. */
1505                 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1506                  == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1507                         return(DM_RET_ERROR);
1508
1509                 /*
1510                  * If they want to match any device node, we give them any
1511                  * device node.
1512                  */
1513                 if (cur_pattern->flags == DEV_MATCH_ANY)
1514                         goto copy_dev_node;
1515
1516                 /*
1517                  * Not sure why someone would do this...
1518                  */
1519                 if (cur_pattern->flags == DEV_MATCH_NONE)
1520                         continue;
1521
1522                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1523                  && (cur_pattern->path_id != device->target->bus->path_id))
1524                         continue;
1525
1526                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1527                  && (cur_pattern->target_id != device->target->target_id))
1528                         continue;
1529
1530                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1531                  && (cur_pattern->target_lun != device->lun_id))
1532                         continue;
1533
1534                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1535                  && (cam_quirkmatch((caddr_t)&device->inq_data,
1536                                     (caddr_t)&cur_pattern->data.inq_pat,
1537                                     1, sizeof(cur_pattern->data.inq_pat),
1538                                     scsi_static_inquiry_match) == NULL))
1539                         continue;
1540
1541                 device_id_page = (struct scsi_vpd_device_id *)device->device_id;
1542                 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
1543                  && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
1544                   || scsi_devid_match((uint8_t *)device_id_page->desc_list,
1545                                       device->device_id_len
1546                                     - SVPD_DEVICE_ID_HDR_LEN,
1547                                       cur_pattern->data.devid_pat.id,
1548                                       cur_pattern->data.devid_pat.id_len) != 0))
1549                         continue;
1550
1551 copy_dev_node:
1552                 /*
1553                  * If we get to this point, the user definitely wants
1554                  * information on this device.  So tell the caller to copy
1555                  * the data out.
1556                  */
1557                 retval |= DM_RET_COPY;
1558
1559                 /*
1560                  * If the return action has been set to descend, then we
1561                  * know that we've already seen a peripheral matching
1562                  * expression, therefore we need to further descend the tree.
1563                  * This won't change by continuing around the loop, so we
1564                  * go ahead and return.  If we haven't seen a peripheral
1565                  * matching expression, we keep going around the loop until
1566                  * we exhaust the matching expressions.  We'll set the stop
1567                  * flag once we fall out of the loop.
1568                  */
1569                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1570                         return(retval);
1571         }
1572
1573         /*
1574          * If the return action hasn't been set to descend yet, that means
1575          * we haven't seen any peripheral matching patterns.  So tell the
1576          * caller to stop descending the tree -- the user doesn't want to
1577          * match against lower level tree elements.
1578          */
1579         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1580                 retval |= DM_RET_STOP;
1581
1582         return(retval);
1583 }
1584
1585 /*
1586  * Match a single peripheral against any number of match patterns.
1587  */
1588 static dev_match_ret
1589 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1590                struct cam_periph *periph)
1591 {
1592         dev_match_ret retval;
1593         u_int i;
1594
1595         /*
1596          * If we aren't given something to match against, that's an error.
1597          */
1598         if (periph == NULL)
1599                 return(DM_RET_ERROR);
1600
1601         /*
1602          * If there are no match entries, then this peripheral matches no
1603          * matter what.
1604          */
1605         if ((patterns == NULL) || (num_patterns == 0))
1606                 return(DM_RET_STOP | DM_RET_COPY);
1607
1608         /*
1609          * There aren't any nodes below a peripheral node, so there's no
1610          * reason to descend the tree any further.
1611          */
1612         retval = DM_RET_STOP;
1613
1614         for (i = 0; i < num_patterns; i++) {
1615                 struct periph_match_pattern *cur_pattern;
1616
1617                 /*
1618                  * If the pattern in question isn't for a peripheral, we
1619                  * aren't interested.
1620                  */
1621                 if (patterns[i].type != DEV_MATCH_PERIPH)
1622                         continue;
1623
1624                 cur_pattern = &patterns[i].pattern.periph_pattern;
1625
1626                 /*
1627                  * If they want to match on anything, then we will do so.
1628                  */
1629                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1630                         /* set the copy flag */
1631                         retval |= DM_RET_COPY;
1632
1633                         /*
1634                          * We've already set the return action to stop,
1635                          * since there are no nodes below peripherals in
1636                          * the tree.
1637                          */
1638                         return(retval);
1639                 }
1640
1641                 /*
1642                  * Not sure why someone would do this...
1643                  */
1644                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
1645                         continue;
1646
1647                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1648                  && (cur_pattern->path_id != periph->path->bus->path_id))
1649                         continue;
1650
1651                 /*
1652                  * For the target and lun id's, we have to make sure the
1653                  * target and lun pointers aren't NULL.  The xpt peripheral
1654                  * has a wildcard target and device.
1655                  */
1656                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1657                  && ((periph->path->target == NULL)
1658                  ||(cur_pattern->target_id != periph->path->target->target_id)))
1659                         continue;
1660
1661                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1662                  && ((periph->path->device == NULL)
1663                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
1664                         continue;
1665
1666                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1667                  && (cur_pattern->unit_number != periph->unit_number))
1668                         continue;
1669
1670                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1671                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
1672                              DEV_IDLEN) != 0))
1673                         continue;
1674
1675                 /*
1676                  * If we get to this point, the user definitely wants
1677                  * information on this peripheral.  So tell the caller to
1678                  * copy the data out.
1679                  */
1680                 retval |= DM_RET_COPY;
1681
1682                 /*
1683                  * The return action has already been set to stop, since
1684                  * peripherals don't have any nodes below them in the EDT.
1685                  */
1686                 return(retval);
1687         }
1688
1689         /*
1690          * If we get to this point, the peripheral that was passed in
1691          * doesn't match any of the patterns.
1692          */
1693         return(retval);
1694 }
1695
1696 static int
1697 xptedtbusfunc(struct cam_eb *bus, void *arg)
1698 {
1699         struct ccb_dev_match *cdm;
1700         struct cam_et *target;
1701         dev_match_ret retval;
1702
1703         cdm = (struct ccb_dev_match *)arg;
1704
1705         /*
1706          * If our position is for something deeper in the tree, that means
1707          * that we've already seen this node.  So, we keep going down.
1708          */
1709         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1710          && (cdm->pos.cookie.bus == bus)
1711          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1712          && (cdm->pos.cookie.target != NULL))
1713                 retval = DM_RET_DESCEND;
1714         else
1715                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1716
1717         /*
1718          * If we got an error, bail out of the search.
1719          */
1720         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1721                 cdm->status = CAM_DEV_MATCH_ERROR;
1722                 return(0);
1723         }
1724
1725         /*
1726          * If the copy flag is set, copy this bus out.
1727          */
1728         if (retval & DM_RET_COPY) {
1729                 int spaceleft, j;
1730
1731                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1732                         sizeof(struct dev_match_result));
1733
1734                 /*
1735                  * If we don't have enough space to put in another
1736                  * match result, save our position and tell the
1737                  * user there are more devices to check.
1738                  */
1739                 if (spaceleft < sizeof(struct dev_match_result)) {
1740                         bzero(&cdm->pos, sizeof(cdm->pos));
1741                         cdm->pos.position_type =
1742                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1743
1744                         cdm->pos.cookie.bus = bus;
1745                         cdm->pos.generations[CAM_BUS_GENERATION]=
1746                                 xsoftc.bus_generation;
1747                         cdm->status = CAM_DEV_MATCH_MORE;
1748                         return(0);
1749                 }
1750                 j = cdm->num_matches;
1751                 cdm->num_matches++;
1752                 cdm->matches[j].type = DEV_MATCH_BUS;
1753                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
1754                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1755                 cdm->matches[j].result.bus_result.unit_number =
1756                         bus->sim->unit_number;
1757                 strlcpy(cdm->matches[j].result.bus_result.dev_name,
1758                         bus->sim->sim_name,
1759                         sizeof(cdm->matches[j].result.bus_result.dev_name));
1760         }
1761
1762         /*
1763          * If the user is only interested in buses, there's no
1764          * reason to descend to the next level in the tree.
1765          */
1766         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1767                 return(1);
1768
1769         /*
1770          * If there is a target generation recorded, check it to
1771          * make sure the target list hasn't changed.
1772          */
1773         mtx_lock(&bus->eb_mtx);
1774         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1775          && (cdm->pos.cookie.bus == bus)
1776          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1777          && (cdm->pos.cookie.target != NULL)) {
1778                 if ((cdm->pos.generations[CAM_TARGET_GENERATION] !=
1779                     bus->generation)) {
1780                         mtx_unlock(&bus->eb_mtx);
1781                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1782                         return (0);
1783                 }
1784                 target = (struct cam_et *)cdm->pos.cookie.target;
1785                 target->refcount++;
1786         } else
1787                 target = NULL;
1788         mtx_unlock(&bus->eb_mtx);
1789
1790         return (xpttargettraverse(bus, target, xptedttargetfunc, arg));
1791 }
1792
1793 static int
1794 xptedttargetfunc(struct cam_et *target, void *arg)
1795 {
1796         struct ccb_dev_match *cdm;
1797         struct cam_eb *bus;
1798         struct cam_ed *device;
1799
1800         cdm = (struct ccb_dev_match *)arg;
1801         bus = target->bus;
1802
1803         /*
1804          * If there is a device list generation recorded, check it to
1805          * make sure the device list hasn't changed.
1806          */
1807         mtx_lock(&bus->eb_mtx);
1808         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1809          && (cdm->pos.cookie.bus == bus)
1810          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1811          && (cdm->pos.cookie.target == target)
1812          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1813          && (cdm->pos.cookie.device != NULL)) {
1814                 if (cdm->pos.generations[CAM_DEV_GENERATION] !=
1815                     target->generation) {
1816                         mtx_unlock(&bus->eb_mtx);
1817                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1818                         return(0);
1819                 }
1820                 device = (struct cam_ed *)cdm->pos.cookie.device;
1821                 device->refcount++;
1822         } else
1823                 device = NULL;
1824         mtx_unlock(&bus->eb_mtx);
1825
1826         return (xptdevicetraverse(target, device, xptedtdevicefunc, arg));
1827 }
1828
1829 static int
1830 xptedtdevicefunc(struct cam_ed *device, void *arg)
1831 {
1832         struct cam_eb *bus;
1833         struct cam_periph *periph;
1834         struct ccb_dev_match *cdm;
1835         dev_match_ret retval;
1836
1837         cdm = (struct ccb_dev_match *)arg;
1838         bus = device->target->bus;
1839
1840         /*
1841          * If our position is for something deeper in the tree, that means
1842          * that we've already seen this node.  So, we keep going down.
1843          */
1844         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1845          && (cdm->pos.cookie.device == device)
1846          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1847          && (cdm->pos.cookie.periph != NULL))
1848                 retval = DM_RET_DESCEND;
1849         else
1850                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
1851                                         device);
1852
1853         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1854                 cdm->status = CAM_DEV_MATCH_ERROR;
1855                 return(0);
1856         }
1857
1858         /*
1859          * If the copy flag is set, copy this device out.
1860          */
1861         if (retval & DM_RET_COPY) {
1862                 int spaceleft, j;
1863
1864                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1865                         sizeof(struct dev_match_result));
1866
1867                 /*
1868                  * If we don't have enough space to put in another
1869                  * match result, save our position and tell the
1870                  * user there are more devices to check.
1871                  */
1872                 if (spaceleft < sizeof(struct dev_match_result)) {
1873                         bzero(&cdm->pos, sizeof(cdm->pos));
1874                         cdm->pos.position_type =
1875                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1876                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
1877
1878                         cdm->pos.cookie.bus = device->target->bus;
1879                         cdm->pos.generations[CAM_BUS_GENERATION]=
1880                                 xsoftc.bus_generation;
1881                         cdm->pos.cookie.target = device->target;
1882                         cdm->pos.generations[CAM_TARGET_GENERATION] =
1883                                 device->target->bus->generation;
1884                         cdm->pos.cookie.device = device;
1885                         cdm->pos.generations[CAM_DEV_GENERATION] =
1886                                 device->target->generation;
1887                         cdm->status = CAM_DEV_MATCH_MORE;
1888                         return(0);
1889                 }
1890                 j = cdm->num_matches;
1891                 cdm->num_matches++;
1892                 cdm->matches[j].type = DEV_MATCH_DEVICE;
1893                 cdm->matches[j].result.device_result.path_id =
1894                         device->target->bus->path_id;
1895                 cdm->matches[j].result.device_result.target_id =
1896                         device->target->target_id;
1897                 cdm->matches[j].result.device_result.target_lun =
1898                         device->lun_id;
1899                 cdm->matches[j].result.device_result.protocol =
1900                         device->protocol;
1901                 bcopy(&device->inq_data,
1902                       &cdm->matches[j].result.device_result.inq_data,
1903                       sizeof(struct scsi_inquiry_data));
1904                 bcopy(&device->ident_data,
1905                       &cdm->matches[j].result.device_result.ident_data,
1906                       sizeof(struct ata_params));
1907
1908                 /* Let the user know whether this device is unconfigured */
1909                 if (device->flags & CAM_DEV_UNCONFIGURED)
1910                         cdm->matches[j].result.device_result.flags =
1911                                 DEV_RESULT_UNCONFIGURED;
1912                 else
1913                         cdm->matches[j].result.device_result.flags =
1914                                 DEV_RESULT_NOFLAG;
1915         }
1916
1917         /*
1918          * If the user isn't interested in peripherals, don't descend
1919          * the tree any further.
1920          */
1921         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1922                 return(1);
1923
1924         /*
1925          * If there is a peripheral list generation recorded, make sure
1926          * it hasn't changed.
1927          */
1928         xpt_lock_buses();
1929         mtx_lock(&bus->eb_mtx);
1930         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1931          && (cdm->pos.cookie.bus == bus)
1932          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1933          && (cdm->pos.cookie.target == device->target)
1934          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1935          && (cdm->pos.cookie.device == device)
1936          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1937          && (cdm->pos.cookie.periph != NULL)) {
1938                 if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1939                     device->generation) {
1940                         mtx_unlock(&bus->eb_mtx);
1941                         xpt_unlock_buses();
1942                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1943                         return(0);
1944                 }
1945                 periph = (struct cam_periph *)cdm->pos.cookie.periph;
1946                 periph->refcount++;
1947         } else
1948                 periph = NULL;
1949         mtx_unlock(&bus->eb_mtx);
1950         xpt_unlock_buses();
1951
1952         return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg));
1953 }
1954
1955 static int
1956 xptedtperiphfunc(struct cam_periph *periph, void *arg)
1957 {
1958         struct ccb_dev_match *cdm;
1959         dev_match_ret retval;
1960
1961         cdm = (struct ccb_dev_match *)arg;
1962
1963         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1964
1965         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1966                 cdm->status = CAM_DEV_MATCH_ERROR;
1967                 return(0);
1968         }
1969
1970         /*
1971          * If the copy flag is set, copy this peripheral out.
1972          */
1973         if (retval & DM_RET_COPY) {
1974                 int spaceleft, j;
1975                 size_t l;
1976
1977                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1978                         sizeof(struct dev_match_result));
1979
1980                 /*
1981                  * If we don't have enough space to put in another
1982                  * match result, save our position and tell the
1983                  * user there are more devices to check.
1984                  */
1985                 if (spaceleft < sizeof(struct dev_match_result)) {
1986                         bzero(&cdm->pos, sizeof(cdm->pos));
1987                         cdm->pos.position_type =
1988                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1989                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
1990                                 CAM_DEV_POS_PERIPH;
1991
1992                         cdm->pos.cookie.bus = periph->path->bus;
1993                         cdm->pos.generations[CAM_BUS_GENERATION]=
1994                                 xsoftc.bus_generation;
1995                         cdm->pos.cookie.target = periph->path->target;
1996                         cdm->pos.generations[CAM_TARGET_GENERATION] =
1997                                 periph->path->bus->generation;
1998                         cdm->pos.cookie.device = periph->path->device;
1999                         cdm->pos.generations[CAM_DEV_GENERATION] =
2000                                 periph->path->target->generation;
2001                         cdm->pos.cookie.periph = periph;
2002                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2003                                 periph->path->device->generation;
2004                         cdm->status = CAM_DEV_MATCH_MORE;
2005                         return(0);
2006                 }
2007
2008                 j = cdm->num_matches;
2009                 cdm->num_matches++;
2010                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2011                 cdm->matches[j].result.periph_result.path_id =
2012                         periph->path->bus->path_id;
2013                 cdm->matches[j].result.periph_result.target_id =
2014                         periph->path->target->target_id;
2015                 cdm->matches[j].result.periph_result.target_lun =
2016                         periph->path->device->lun_id;
2017                 cdm->matches[j].result.periph_result.unit_number =
2018                         periph->unit_number;
2019                 l = sizeof(cdm->matches[j].result.periph_result.periph_name);
2020                 strlcpy(cdm->matches[j].result.periph_result.periph_name,
2021                         periph->periph_name, l);
2022         }
2023
2024         return(1);
2025 }
2026
2027 static int
2028 xptedtmatch(struct ccb_dev_match *cdm)
2029 {
2030         struct cam_eb *bus;
2031         int ret;
2032
2033         cdm->num_matches = 0;
2034
2035         /*
2036          * Check the bus list generation.  If it has changed, the user
2037          * needs to reset everything and start over.
2038          */
2039         xpt_lock_buses();
2040         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2041          && (cdm->pos.cookie.bus != NULL)) {
2042                 if (cdm->pos.generations[CAM_BUS_GENERATION] !=
2043                     xsoftc.bus_generation) {
2044                         xpt_unlock_buses();
2045                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2046                         return(0);
2047                 }
2048                 bus = (struct cam_eb *)cdm->pos.cookie.bus;
2049                 bus->refcount++;
2050         } else
2051                 bus = NULL;
2052         xpt_unlock_buses();
2053
2054         ret = xptbustraverse(bus, xptedtbusfunc, cdm);
2055
2056         /*
2057          * If we get back 0, that means that we had to stop before fully
2058          * traversing the EDT.  It also means that one of the subroutines
2059          * has set the status field to the proper value.  If we get back 1,
2060          * we've fully traversed the EDT and copied out any matching entries.
2061          */
2062         if (ret == 1)
2063                 cdm->status = CAM_DEV_MATCH_LAST;
2064
2065         return(ret);
2066 }
2067
2068 static int
2069 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2070 {
2071         struct cam_periph *periph;
2072         struct ccb_dev_match *cdm;
2073
2074         cdm = (struct ccb_dev_match *)arg;
2075
2076         xpt_lock_buses();
2077         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2078          && (cdm->pos.cookie.pdrv == pdrv)
2079          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2080          && (cdm->pos.cookie.periph != NULL)) {
2081                 if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2082                     (*pdrv)->generation) {
2083                         xpt_unlock_buses();
2084                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2085                         return(0);
2086                 }
2087                 periph = (struct cam_periph *)cdm->pos.cookie.periph;
2088                 periph->refcount++;
2089         } else
2090                 periph = NULL;
2091         xpt_unlock_buses();
2092
2093         return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg));
2094 }
2095
2096 static int
2097 xptplistperiphfunc(struct cam_periph *periph, void *arg)
2098 {
2099         struct ccb_dev_match *cdm;
2100         dev_match_ret retval;
2101
2102         cdm = (struct ccb_dev_match *)arg;
2103
2104         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2105
2106         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2107                 cdm->status = CAM_DEV_MATCH_ERROR;
2108                 return(0);
2109         }
2110
2111         /*
2112          * If the copy flag is set, copy this peripheral out.
2113          */
2114         if (retval & DM_RET_COPY) {
2115                 int spaceleft, j;
2116                 size_t l;
2117
2118                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2119                         sizeof(struct dev_match_result));
2120
2121                 /*
2122                  * If we don't have enough space to put in another
2123                  * match result, save our position and tell the
2124                  * user there are more devices to check.
2125                  */
2126                 if (spaceleft < sizeof(struct dev_match_result)) {
2127                         struct periph_driver **pdrv;
2128
2129                         pdrv = NULL;
2130                         bzero(&cdm->pos, sizeof(cdm->pos));
2131                         cdm->pos.position_type =
2132                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2133                                 CAM_DEV_POS_PERIPH;
2134
2135                         /*
2136                          * This may look a bit non-sensical, but it is
2137                          * actually quite logical.  There are very few
2138                          * peripheral drivers, and bloating every peripheral
2139                          * structure with a pointer back to its parent
2140                          * peripheral driver linker set entry would cost
2141                          * more in the long run than doing this quick lookup.
2142                          */
2143                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2144                                 if (strcmp((*pdrv)->driver_name,
2145                                     periph->periph_name) == 0)
2146                                         break;
2147                         }
2148
2149                         if (*pdrv == NULL) {
2150                                 cdm->status = CAM_DEV_MATCH_ERROR;
2151                                 return(0);
2152                         }
2153
2154                         cdm->pos.cookie.pdrv = pdrv;
2155                         /*
2156                          * The periph generation slot does double duty, as
2157                          * does the periph pointer slot.  They are used for
2158                          * both edt and pdrv lookups and positioning.
2159                          */
2160                         cdm->pos.cookie.periph = periph;
2161                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2162                                 (*pdrv)->generation;
2163                         cdm->status = CAM_DEV_MATCH_MORE;
2164                         return(0);
2165                 }
2166
2167                 j = cdm->num_matches;
2168                 cdm->num_matches++;
2169                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2170                 cdm->matches[j].result.periph_result.path_id =
2171                         periph->path->bus->path_id;
2172
2173                 /*
2174                  * The transport layer peripheral doesn't have a target or
2175                  * lun.
2176                  */
2177                 if (periph->path->target)
2178                         cdm->matches[j].result.periph_result.target_id =
2179                                 periph->path->target->target_id;
2180                 else
2181                         cdm->matches[j].result.periph_result.target_id =
2182                                 CAM_TARGET_WILDCARD;
2183
2184                 if (periph->path->device)
2185                         cdm->matches[j].result.periph_result.target_lun =
2186                                 periph->path->device->lun_id;
2187                 else
2188                         cdm->matches[j].result.periph_result.target_lun =
2189                                 CAM_LUN_WILDCARD;
2190
2191                 cdm->matches[j].result.periph_result.unit_number =
2192                         periph->unit_number;
2193                 l = sizeof(cdm->matches[j].result.periph_result.periph_name);
2194                 strlcpy(cdm->matches[j].result.periph_result.periph_name,
2195                         periph->periph_name, l);
2196         }
2197
2198         return(1);
2199 }
2200
2201 static int
2202 xptperiphlistmatch(struct ccb_dev_match *cdm)
2203 {
2204         int ret;
2205
2206         cdm->num_matches = 0;
2207
2208         /*
2209          * At this point in the edt traversal function, we check the bus
2210          * list generation to make sure that no buses have been added or
2211          * removed since the user last sent a XPT_DEV_MATCH ccb through.
2212          * For the peripheral driver list traversal function, however, we
2213          * don't have to worry about new peripheral driver types coming or
2214          * going; they're in a linker set, and therefore can't change
2215          * without a recompile.
2216          */
2217
2218         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2219          && (cdm->pos.cookie.pdrv != NULL))
2220                 ret = xptpdrvtraverse(
2221                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
2222                                 xptplistpdrvfunc, cdm);
2223         else
2224                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2225
2226         /*
2227          * If we get back 0, that means that we had to stop before fully
2228          * traversing the peripheral driver tree.  It also means that one of
2229          * the subroutines has set the status field to the proper value.  If
2230          * we get back 1, we've fully traversed the EDT and copied out any
2231          * matching entries.
2232          */
2233         if (ret == 1)
2234                 cdm->status = CAM_DEV_MATCH_LAST;
2235
2236         return(ret);
2237 }
2238
2239 static int
2240 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2241 {
2242         struct cam_eb *bus, *next_bus;
2243         int retval;
2244
2245         retval = 1;
2246         if (start_bus)
2247                 bus = start_bus;
2248         else {
2249                 xpt_lock_buses();
2250                 bus = TAILQ_FIRST(&xsoftc.xpt_busses);
2251                 if (bus == NULL) {
2252                         xpt_unlock_buses();
2253                         return (retval);
2254                 }
2255                 bus->refcount++;
2256                 xpt_unlock_buses();
2257         }
2258         for (; bus != NULL; bus = next_bus) {
2259                 retval = tr_func(bus, arg);
2260                 if (retval == 0) {
2261                         xpt_release_bus(bus);
2262                         break;
2263                 }
2264                 xpt_lock_buses();
2265                 next_bus = TAILQ_NEXT(bus, links);
2266                 if (next_bus)
2267                         next_bus->refcount++;
2268                 xpt_unlock_buses();
2269                 xpt_release_bus(bus);
2270         }
2271         return(retval);
2272 }
2273
2274 static int
2275 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2276                   xpt_targetfunc_t *tr_func, void *arg)
2277 {
2278         struct cam_et *target, *next_target;
2279         int retval;
2280
2281         retval = 1;
2282         if (start_target)
2283                 target = start_target;
2284         else {
2285                 mtx_lock(&bus->eb_mtx);
2286                 target = TAILQ_FIRST(&bus->et_entries);
2287                 if (target == NULL) {
2288                         mtx_unlock(&bus->eb_mtx);
2289                         return (retval);
2290                 }
2291                 target->refcount++;
2292                 mtx_unlock(&bus->eb_mtx);
2293         }
2294         for (; target != NULL; target = next_target) {
2295                 retval = tr_func(target, arg);
2296                 if (retval == 0) {
2297                         xpt_release_target(target);
2298                         break;
2299                 }
2300                 mtx_lock(&bus->eb_mtx);
2301                 next_target = TAILQ_NEXT(target, links);
2302                 if (next_target)
2303                         next_target->refcount++;
2304                 mtx_unlock(&bus->eb_mtx);
2305                 xpt_release_target(target);
2306         }
2307         return(retval);
2308 }
2309
2310 static int
2311 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2312                   xpt_devicefunc_t *tr_func, void *arg)
2313 {
2314         struct cam_eb *bus;
2315         struct cam_ed *device, *next_device;
2316         int retval;
2317
2318         retval = 1;
2319         bus = target->bus;
2320         if (start_device)
2321                 device = start_device;
2322         else {
2323                 mtx_lock(&bus->eb_mtx);
2324                 device = TAILQ_FIRST(&target->ed_entries);
2325                 if (device == NULL) {
2326                         mtx_unlock(&bus->eb_mtx);
2327                         return (retval);
2328                 }
2329                 device->refcount++;
2330                 mtx_unlock(&bus->eb_mtx);
2331         }
2332         for (; device != NULL; device = next_device) {
2333                 mtx_lock(&device->device_mtx);
2334                 retval = tr_func(device, arg);
2335                 mtx_unlock(&device->device_mtx);
2336                 if (retval == 0) {
2337                         xpt_release_device(device);
2338                         break;
2339                 }
2340                 mtx_lock(&bus->eb_mtx);
2341                 next_device = TAILQ_NEXT(device, links);
2342                 if (next_device)
2343                         next_device->refcount++;
2344                 mtx_unlock(&bus->eb_mtx);
2345                 xpt_release_device(device);
2346         }
2347         return(retval);
2348 }
2349
2350 static int
2351 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2352                   xpt_periphfunc_t *tr_func, void *arg)
2353 {
2354         struct cam_eb *bus;
2355         struct cam_periph *periph, *next_periph;
2356         int retval;
2357
2358         retval = 1;
2359
2360         bus = device->target->bus;
2361         if (start_periph)
2362                 periph = start_periph;
2363         else {
2364                 xpt_lock_buses();
2365                 mtx_lock(&bus->eb_mtx);
2366                 periph = SLIST_FIRST(&device->periphs);
2367                 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2368                         periph = SLIST_NEXT(periph, periph_links);
2369                 if (periph == NULL) {
2370                         mtx_unlock(&bus->eb_mtx);
2371                         xpt_unlock_buses();
2372                         return (retval);
2373                 }
2374                 periph->refcount++;
2375                 mtx_unlock(&bus->eb_mtx);
2376                 xpt_unlock_buses();
2377         }
2378         for (; periph != NULL; periph = next_periph) {
2379                 retval = tr_func(periph, arg);
2380                 if (retval == 0) {
2381                         cam_periph_release_locked(periph);
2382                         break;
2383                 }
2384                 xpt_lock_buses();
2385                 mtx_lock(&bus->eb_mtx);
2386                 next_periph = SLIST_NEXT(periph, periph_links);
2387                 while (next_periph != NULL &&
2388                     (next_periph->flags & CAM_PERIPH_FREE) != 0)
2389                         next_periph = SLIST_NEXT(next_periph, periph_links);
2390                 if (next_periph)
2391                         next_periph->refcount++;
2392                 mtx_unlock(&bus->eb_mtx);
2393                 xpt_unlock_buses();
2394                 cam_periph_release_locked(periph);
2395         }
2396         return(retval);
2397 }
2398
2399 static int
2400 xptpdrvtraverse(struct periph_driver **start_pdrv,
2401                 xpt_pdrvfunc_t *tr_func, void *arg)
2402 {
2403         struct periph_driver **pdrv;
2404         int retval;
2405
2406         retval = 1;
2407
2408         /*
2409          * We don't traverse the peripheral driver list like we do the
2410          * other lists, because it is a linker set, and therefore cannot be
2411          * changed during runtime.  If the peripheral driver list is ever
2412          * re-done to be something other than a linker set (i.e. it can
2413          * change while the system is running), the list traversal should
2414          * be modified to work like the other traversal functions.
2415          */
2416         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2417              *pdrv != NULL; pdrv++) {
2418                 retval = tr_func(pdrv, arg);
2419
2420                 if (retval == 0)
2421                         return(retval);
2422         }
2423
2424         return(retval);
2425 }
2426
2427 static int
2428 xptpdperiphtraverse(struct periph_driver **pdrv,
2429                     struct cam_periph *start_periph,
2430                     xpt_periphfunc_t *tr_func, void *arg)
2431 {
2432         struct cam_periph *periph, *next_periph;
2433         int retval;
2434
2435         retval = 1;
2436
2437         if (start_periph)
2438                 periph = start_periph;
2439         else {
2440                 xpt_lock_buses();
2441                 periph = TAILQ_FIRST(&(*pdrv)->units);
2442                 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2443                         periph = TAILQ_NEXT(periph, unit_links);
2444                 if (periph == NULL) {
2445                         xpt_unlock_buses();
2446                         return (retval);
2447                 }
2448                 periph->refcount++;
2449                 xpt_unlock_buses();
2450         }
2451         for (; periph != NULL; periph = next_periph) {
2452                 cam_periph_lock(periph);
2453                 retval = tr_func(periph, arg);
2454                 cam_periph_unlock(periph);
2455                 if (retval == 0) {
2456                         cam_periph_release(periph);
2457                         break;
2458                 }
2459                 xpt_lock_buses();
2460                 next_periph = TAILQ_NEXT(periph, unit_links);
2461                 while (next_periph != NULL &&
2462                     (next_periph->flags & CAM_PERIPH_FREE) != 0)
2463                         next_periph = TAILQ_NEXT(next_periph, unit_links);
2464                 if (next_periph)
2465                         next_periph->refcount++;
2466                 xpt_unlock_buses();
2467                 cam_periph_release(periph);
2468         }
2469         return(retval);
2470 }
2471
2472 static int
2473 xptdefbusfunc(struct cam_eb *bus, void *arg)
2474 {
2475         struct xpt_traverse_config *tr_config;
2476
2477         tr_config = (struct xpt_traverse_config *)arg;
2478
2479         if (tr_config->depth == XPT_DEPTH_BUS) {
2480                 xpt_busfunc_t *tr_func;
2481
2482                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2483
2484                 return(tr_func(bus, tr_config->tr_arg));
2485         } else
2486                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2487 }
2488
2489 static int
2490 xptdeftargetfunc(struct cam_et *target, void *arg)
2491 {
2492         struct xpt_traverse_config *tr_config;
2493
2494         tr_config = (struct xpt_traverse_config *)arg;
2495
2496         if (tr_config->depth == XPT_DEPTH_TARGET) {
2497                 xpt_targetfunc_t *tr_func;
2498
2499                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2500
2501                 return(tr_func(target, tr_config->tr_arg));
2502         } else
2503                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2504 }
2505
2506 static int
2507 xptdefdevicefunc(struct cam_ed *device, void *arg)
2508 {
2509         struct xpt_traverse_config *tr_config;
2510
2511         tr_config = (struct xpt_traverse_config *)arg;
2512
2513         if (tr_config->depth == XPT_DEPTH_DEVICE) {
2514                 xpt_devicefunc_t *tr_func;
2515
2516                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2517
2518                 return(tr_func(device, tr_config->tr_arg));
2519         } else
2520                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2521 }
2522
2523 static int
2524 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2525 {
2526         struct xpt_traverse_config *tr_config;
2527         xpt_periphfunc_t *tr_func;
2528
2529         tr_config = (struct xpt_traverse_config *)arg;
2530
2531         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2532
2533         /*
2534          * Unlike the other default functions, we don't check for depth
2535          * here.  The peripheral driver level is the last level in the EDT,
2536          * so if we're here, we should execute the function in question.
2537          */
2538         return(tr_func(periph, tr_config->tr_arg));
2539 }
2540
2541 /*
2542  * Execute the given function for every bus in the EDT.
2543  */
2544 static int
2545 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2546 {
2547         struct xpt_traverse_config tr_config;
2548
2549         tr_config.depth = XPT_DEPTH_BUS;
2550         tr_config.tr_func = tr_func;
2551         tr_config.tr_arg = arg;
2552
2553         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2554 }
2555
2556 /*
2557  * Execute the given function for every device in the EDT.
2558  */
2559 static int
2560 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2561 {
2562         struct xpt_traverse_config tr_config;
2563
2564         tr_config.depth = XPT_DEPTH_DEVICE;
2565         tr_config.tr_func = tr_func;
2566         tr_config.tr_arg = arg;
2567
2568         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2569 }
2570
2571 static int
2572 xptsetasyncfunc(struct cam_ed *device, void *arg)
2573 {
2574         struct cam_path path;
2575         struct ccb_getdev cgd;
2576         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2577
2578         /*
2579          * Don't report unconfigured devices (Wildcard devs,
2580          * devices only for target mode, device instances
2581          * that have been invalidated but are waiting for
2582          * their last reference count to be released).
2583          */
2584         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2585                 return (1);
2586
2587         xpt_compile_path(&path,
2588                          NULL,
2589                          device->target->bus->path_id,
2590                          device->target->target_id,
2591                          device->lun_id);
2592         xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
2593         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2594         xpt_action((union ccb *)&cgd);
2595         csa->callback(csa->callback_arg,
2596                             AC_FOUND_DEVICE,
2597                             &path, &cgd);
2598         xpt_release_path(&path);
2599
2600         return(1);
2601 }
2602
2603 static int
2604 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2605 {
2606         struct cam_path path;
2607         struct ccb_pathinq cpi;
2608         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2609
2610         xpt_compile_path(&path, /*periph*/NULL,
2611                          bus->path_id,
2612                          CAM_TARGET_WILDCARD,
2613                          CAM_LUN_WILDCARD);
2614         xpt_path_lock(&path);
2615         xpt_path_inq(&cpi, &path);
2616         csa->callback(csa->callback_arg,
2617                             AC_PATH_REGISTERED,
2618                             &path, &cpi);
2619         xpt_path_unlock(&path);
2620         xpt_release_path(&path);
2621
2622         return(1);
2623 }
2624
2625 void
2626 xpt_action(union ccb *start_ccb)
2627 {
2628
2629         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE,
2630             ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code,
2631                 xpt_action_name(start_ccb->ccb_h.func_code)));
2632
2633         start_ccb->ccb_h.status = CAM_REQ_INPROG;
2634         (*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb);
2635 }
2636
2637 void
2638 xpt_action_default(union ccb *start_ccb)
2639 {
2640         struct cam_path *path;
2641         struct cam_sim *sim;
2642         struct mtx *mtx;
2643
2644         path = start_ccb->ccb_h.path;
2645         CAM_DEBUG(path, CAM_DEBUG_TRACE,
2646             ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code,
2647                 xpt_action_name(start_ccb->ccb_h.func_code)));
2648
2649         switch (start_ccb->ccb_h.func_code) {
2650         case XPT_SCSI_IO:
2651         {
2652                 struct cam_ed *device;
2653
2654                 /*
2655                  * For the sake of compatibility with SCSI-1
2656                  * devices that may not understand the identify
2657                  * message, we include lun information in the
2658                  * second byte of all commands.  SCSI-1 specifies
2659                  * that luns are a 3 bit value and reserves only 3
2660                  * bits for lun information in the CDB.  Later
2661                  * revisions of the SCSI spec allow for more than 8
2662                  * luns, but have deprecated lun information in the
2663                  * CDB.  So, if the lun won't fit, we must omit.
2664                  *
2665                  * Also be aware that during initial probing for devices,
2666                  * the inquiry information is unknown but initialized to 0.
2667                  * This means that this code will be exercised while probing
2668                  * devices with an ANSI revision greater than 2.
2669                  */
2670                 device = path->device;
2671                 if (device->protocol_version <= SCSI_REV_2
2672                  && start_ccb->ccb_h.target_lun < 8
2673                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2674                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
2675                             start_ccb->ccb_h.target_lun << 5;
2676                 }
2677                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2678         }
2679         /* FALLTHROUGH */
2680         case XPT_TARGET_IO:
2681         case XPT_CONT_TARGET_IO:
2682                 start_ccb->csio.sense_resid = 0;
2683                 start_ccb->csio.resid = 0;
2684                 /* FALLTHROUGH */
2685         case XPT_ATA_IO:
2686                 if (start_ccb->ccb_h.func_code == XPT_ATA_IO)
2687                         start_ccb->ataio.resid = 0;
2688                 /* FALLTHROUGH */
2689         case XPT_NVME_IO:
2690         case XPT_NVME_ADMIN:
2691         case XPT_MMC_IO:
2692         case XPT_MMC_GET_TRAN_SETTINGS:
2693         case XPT_MMC_SET_TRAN_SETTINGS:
2694         case XPT_RESET_DEV:
2695         case XPT_ENG_EXEC:
2696         case XPT_SMP_IO:
2697         {
2698                 struct cam_devq *devq;
2699
2700                 devq = path->bus->sim->devq;
2701                 mtx_lock(&devq->send_mtx);
2702                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2703                 if (xpt_schedule_devq(devq, path->device) != 0)
2704                         xpt_run_devq(devq);
2705                 mtx_unlock(&devq->send_mtx);
2706                 break;
2707         }
2708         case XPT_CALC_GEOMETRY:
2709                 /* Filter out garbage */
2710                 if (start_ccb->ccg.block_size == 0
2711                  || start_ccb->ccg.volume_size == 0) {
2712                         start_ccb->ccg.cylinders = 0;
2713                         start_ccb->ccg.heads = 0;
2714                         start_ccb->ccg.secs_per_track = 0;
2715                         start_ccb->ccb_h.status = CAM_REQ_CMP;
2716                         break;
2717                 }
2718                 goto call_sim;
2719         case XPT_ABORT:
2720         {
2721                 union ccb* abort_ccb;
2722
2723                 abort_ccb = start_ccb->cab.abort_ccb;
2724                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2725                         struct cam_ed *device;
2726                         struct cam_devq *devq;
2727
2728                         device = abort_ccb->ccb_h.path->device;
2729                         devq = device->sim->devq;
2730
2731                         mtx_lock(&devq->send_mtx);
2732                         if (abort_ccb->ccb_h.pinfo.index > 0) {
2733                                 cam_ccbq_remove_ccb(&device->ccbq, abort_ccb);
2734                                 abort_ccb->ccb_h.status =
2735                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2736                                 xpt_freeze_devq_device(device, 1);
2737                                 mtx_unlock(&devq->send_mtx);
2738                                 xpt_done(abort_ccb);
2739                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2740                                 break;
2741                         }
2742                         mtx_unlock(&devq->send_mtx);
2743
2744                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2745                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2746                                 /*
2747                                  * We've caught this ccb en route to
2748                                  * the SIM.  Flag it for abort and the
2749                                  * SIM will do so just before starting
2750                                  * real work on the CCB.
2751                                  */
2752                                 abort_ccb->ccb_h.status =
2753                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2754                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2755                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2756                                 break;
2757                         }
2758                 }
2759                 if (XPT_FC_IS_QUEUED(abort_ccb)
2760                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2761                         /*
2762                          * It's already completed but waiting
2763                          * for our SWI to get to it.
2764                          */
2765                         start_ccb->ccb_h.status = CAM_UA_ABORT;
2766                         break;
2767                 }
2768                 /*
2769                  * If we weren't able to take care of the abort request
2770                  * in the XPT, pass the request down to the SIM for processing.
2771                  */
2772         }
2773         /* FALLTHROUGH */
2774         case XPT_ACCEPT_TARGET_IO:
2775         case XPT_EN_LUN:
2776         case XPT_IMMED_NOTIFY:
2777         case XPT_NOTIFY_ACK:
2778         case XPT_RESET_BUS:
2779         case XPT_IMMEDIATE_NOTIFY:
2780         case XPT_NOTIFY_ACKNOWLEDGE:
2781         case XPT_GET_SIM_KNOB_OLD:
2782         case XPT_GET_SIM_KNOB:
2783         case XPT_SET_SIM_KNOB:
2784         case XPT_GET_TRAN_SETTINGS:
2785         case XPT_SET_TRAN_SETTINGS:
2786         case XPT_PATH_INQ:
2787 call_sim:
2788                 sim = path->bus->sim;
2789                 mtx = sim->mtx;
2790                 if (mtx && !mtx_owned(mtx))
2791                         mtx_lock(mtx);
2792                 else
2793                         mtx = NULL;
2794
2795                 CAM_DEBUG(path, CAM_DEBUG_TRACE,
2796                     ("Calling sim->sim_action(): func=%#x\n", start_ccb->ccb_h.func_code));
2797                 (*(sim->sim_action))(sim, start_ccb);
2798                 CAM_DEBUG(path, CAM_DEBUG_TRACE,
2799                     ("sim->sim_action returned: status=%#x\n", start_ccb->ccb_h.status));
2800                 if (mtx)
2801                         mtx_unlock(mtx);
2802                 break;
2803         case XPT_PATH_STATS:
2804                 start_ccb->cpis.last_reset = path->bus->last_reset;
2805                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2806                 break;
2807         case XPT_GDEV_TYPE:
2808         {
2809                 struct cam_ed *dev;
2810
2811                 dev = path->device;
2812                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2813                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2814                 } else {
2815                         struct ccb_getdev *cgd;
2816
2817                         cgd = &start_ccb->cgd;
2818                         cgd->protocol = dev->protocol;
2819                         cgd->inq_data = dev->inq_data;
2820                         cgd->ident_data = dev->ident_data;
2821                         cgd->inq_flags = dev->inq_flags;
2822                         cgd->ccb_h.status = CAM_REQ_CMP;
2823                         cgd->serial_num_len = dev->serial_num_len;
2824                         if ((dev->serial_num_len > 0)
2825                          && (dev->serial_num != NULL))
2826                                 bcopy(dev->serial_num, cgd->serial_num,
2827                                       dev->serial_num_len);
2828                 }
2829                 break;
2830         }
2831         case XPT_GDEV_STATS:
2832         {
2833                 struct ccb_getdevstats *cgds = &start_ccb->cgds;
2834                 struct cam_ed *dev = path->device;
2835                 struct cam_eb *bus = path->bus;
2836                 struct cam_et *tar = path->target;
2837                 struct cam_devq *devq = bus->sim->devq;
2838
2839                 mtx_lock(&devq->send_mtx);
2840                 cgds->dev_openings = dev->ccbq.dev_openings;
2841                 cgds->dev_active = dev->ccbq.dev_active;
2842                 cgds->allocated = dev->ccbq.allocated;
2843                 cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq);
2844                 cgds->held = cgds->allocated - cgds->dev_active - cgds->queued;
2845                 cgds->last_reset = tar->last_reset;
2846                 cgds->maxtags = dev->maxtags;
2847                 cgds->mintags = dev->mintags;
2848                 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2849                         cgds->last_reset = bus->last_reset;
2850                 mtx_unlock(&devq->send_mtx);
2851                 cgds->ccb_h.status = CAM_REQ_CMP;
2852                 break;
2853         }
2854         case XPT_GDEVLIST:
2855         {
2856                 struct cam_periph       *nperiph;
2857                 struct periph_list      *periph_head;
2858                 struct ccb_getdevlist   *cgdl;
2859                 u_int                   i;
2860                 struct cam_ed           *device;
2861                 int                     found;
2862
2863                 found = 0;
2864
2865                 /*
2866                  * Don't want anyone mucking with our data.
2867                  */
2868                 device = path->device;
2869                 periph_head = &device->periphs;
2870                 cgdl = &start_ccb->cgdl;
2871
2872                 /*
2873                  * Check and see if the list has changed since the user
2874                  * last requested a list member.  If so, tell them that the
2875                  * list has changed, and therefore they need to start over
2876                  * from the beginning.
2877                  */
2878                 if ((cgdl->index != 0) &&
2879                     (cgdl->generation != device->generation)) {
2880                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2881                         break;
2882                 }
2883
2884                 /*
2885                  * Traverse the list of peripherals and attempt to find
2886                  * the requested peripheral.
2887                  */
2888                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
2889                      (nperiph != NULL) && (i <= cgdl->index);
2890                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
2891                         if (i == cgdl->index) {
2892                                 strlcpy(cgdl->periph_name,
2893                                         nperiph->periph_name,
2894                                         sizeof(cgdl->periph_name));
2895                                 cgdl->unit_number = nperiph->unit_number;
2896                                 found = 1;
2897                         }
2898                 }
2899                 if (found == 0) {
2900                         cgdl->status = CAM_GDEVLIST_ERROR;
2901                         break;
2902                 }
2903
2904                 if (nperiph == NULL)
2905                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
2906                 else
2907                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
2908
2909                 cgdl->index++;
2910                 cgdl->generation = device->generation;
2911
2912                 cgdl->ccb_h.status = CAM_REQ_CMP;
2913                 break;
2914         }
2915         case XPT_DEV_MATCH:
2916         {
2917                 dev_pos_type position_type;
2918                 struct ccb_dev_match *cdm;
2919
2920                 cdm = &start_ccb->cdm;
2921
2922                 /*
2923                  * There are two ways of getting at information in the EDT.
2924                  * The first way is via the primary EDT tree.  It starts
2925                  * with a list of buses, then a list of targets on a bus,
2926                  * then devices/luns on a target, and then peripherals on a
2927                  * device/lun.  The "other" way is by the peripheral driver
2928                  * lists.  The peripheral driver lists are organized by
2929                  * peripheral driver.  (obviously)  So it makes sense to
2930                  * use the peripheral driver list if the user is looking
2931                  * for something like "da1", or all "da" devices.  If the
2932                  * user is looking for something on a particular bus/target
2933                  * or lun, it's generally better to go through the EDT tree.
2934                  */
2935
2936                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
2937                         position_type = cdm->pos.position_type;
2938                 else {
2939                         u_int i;
2940
2941                         position_type = CAM_DEV_POS_NONE;
2942
2943                         for (i = 0; i < cdm->num_patterns; i++) {
2944                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
2945                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
2946                                         position_type = CAM_DEV_POS_EDT;
2947                                         break;
2948                                 }
2949                         }
2950
2951                         if (cdm->num_patterns == 0)
2952                                 position_type = CAM_DEV_POS_EDT;
2953                         else if (position_type == CAM_DEV_POS_NONE)
2954                                 position_type = CAM_DEV_POS_PDRV;
2955                 }
2956
2957                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
2958                 case CAM_DEV_POS_EDT:
2959                         xptedtmatch(cdm);
2960                         break;
2961                 case CAM_DEV_POS_PDRV:
2962                         xptperiphlistmatch(cdm);
2963                         break;
2964                 default:
2965                         cdm->status = CAM_DEV_MATCH_ERROR;
2966                         break;
2967                 }
2968
2969                 if (cdm->status == CAM_DEV_MATCH_ERROR)
2970                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2971                 else
2972                         start_ccb->ccb_h.status = CAM_REQ_CMP;
2973
2974                 break;
2975         }
2976         case XPT_SASYNC_CB:
2977         {
2978                 struct ccb_setasync *csa;
2979                 struct async_node *cur_entry;
2980                 struct async_list *async_head;
2981                 u_int32_t added;
2982
2983                 csa = &start_ccb->csa;
2984                 added = csa->event_enable;
2985                 async_head = &path->device->asyncs;
2986
2987                 /*
2988                  * If there is already an entry for us, simply
2989                  * update it.
2990                  */
2991                 cur_entry = SLIST_FIRST(async_head);
2992                 while (cur_entry != NULL) {
2993                         if ((cur_entry->callback_arg == csa->callback_arg)
2994                          && (cur_entry->callback == csa->callback))
2995                                 break;
2996                         cur_entry = SLIST_NEXT(cur_entry, links);
2997                 }
2998
2999                 if (cur_entry != NULL) {
3000                         /*
3001                          * If the request has no flags set,
3002                          * remove the entry.
3003                          */
3004                         added &= ~cur_entry->event_enable;
3005                         if (csa->event_enable == 0) {
3006                                 SLIST_REMOVE(async_head, cur_entry,
3007                                              async_node, links);
3008                                 xpt_release_device(path->device);
3009                                 free(cur_entry, M_CAMXPT);
3010                         } else {
3011                                 cur_entry->event_enable = csa->event_enable;
3012                         }
3013                         csa->event_enable = added;
3014                 } else {
3015                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
3016                                            M_NOWAIT);
3017                         if (cur_entry == NULL) {
3018                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3019                                 break;
3020                         }
3021                         cur_entry->event_enable = csa->event_enable;
3022                         cur_entry->event_lock = (path->bus->sim->mtx &&
3023                             mtx_owned(path->bus->sim->mtx)) ? 1 : 0;
3024                         cur_entry->callback_arg = csa->callback_arg;
3025                         cur_entry->callback = csa->callback;
3026                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
3027                         xpt_acquire_device(path->device);
3028                 }
3029                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3030                 break;
3031         }
3032         case XPT_REL_SIMQ:
3033         {
3034                 struct ccb_relsim *crs;
3035                 struct cam_ed *dev;
3036
3037                 crs = &start_ccb->crs;
3038                 dev = path->device;
3039                 if (dev == NULL) {
3040                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
3041                         break;
3042                 }
3043
3044                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3045                         /* Don't ever go below one opening */
3046                         if (crs->openings > 0) {
3047                                 xpt_dev_ccbq_resize(path, crs->openings);
3048                                 if (bootverbose) {
3049                                         xpt_print(path,
3050                                             "number of openings is now %d\n",
3051                                             crs->openings);
3052                                 }
3053                         }
3054                 }
3055
3056                 mtx_lock(&dev->sim->devq->send_mtx);
3057                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3058                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3059                                 /*
3060                                  * Just extend the old timeout and decrement
3061                                  * the freeze count so that a single timeout
3062                                  * is sufficient for releasing the queue.
3063                                  */
3064                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3065                                 callout_stop(&dev->callout);
3066                         } else {
3067                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3068                         }
3069
3070                         callout_reset_sbt(&dev->callout,
3071                             SBT_1MS * crs->release_timeout, 0,
3072                             xpt_release_devq_timeout, dev, 0);
3073
3074                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3075                 }
3076
3077                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3078                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3079                                 /*
3080                                  * Decrement the freeze count so that a single
3081                                  * completion is still sufficient to unfreeze
3082                                  * the queue.
3083                                  */
3084                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3085                         } else {
3086                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3087                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3088                         }
3089                 }
3090
3091                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3092                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3093                          || (dev->ccbq.dev_active == 0)) {
3094                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3095                         } else {
3096                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3097                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3098                         }
3099                 }
3100                 mtx_unlock(&dev->sim->devq->send_mtx);
3101
3102                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0)
3103                         xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
3104                 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt;
3105                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3106                 break;
3107         }
3108         case XPT_DEBUG: {
3109                 struct cam_path *oldpath;
3110
3111                 /* Check that all request bits are supported. */
3112                 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
3113                         start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3114                         break;
3115                 }
3116
3117                 cam_dflags = CAM_DEBUG_NONE;
3118                 if (cam_dpath != NULL) {
3119                         oldpath = cam_dpath;
3120                         cam_dpath = NULL;
3121                         xpt_free_path(oldpath);
3122                 }
3123                 if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
3124                         if (xpt_create_path(&cam_dpath, NULL,
3125                                             start_ccb->ccb_h.path_id,
3126                                             start_ccb->ccb_h.target_id,
3127                                             start_ccb->ccb_h.target_lun) !=
3128                                             CAM_REQ_CMP) {
3129                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3130                         } else {
3131                                 cam_dflags = start_ccb->cdbg.flags;
3132                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3133                                 xpt_print(cam_dpath, "debugging flags now %x\n",
3134                                     cam_dflags);
3135                         }
3136                 } else
3137                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3138                 break;
3139         }
3140         case XPT_NOOP:
3141                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3142                         xpt_freeze_devq(path, 1);
3143                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3144                 break;
3145         case XPT_REPROBE_LUN:
3146                 xpt_async(AC_INQ_CHANGED, path, NULL);
3147                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3148                 xpt_done(start_ccb);
3149                 break;
3150         case XPT_ASYNC:
3151                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3152                 xpt_done(start_ccb);
3153                 break;
3154         default:
3155         case XPT_SDEV_TYPE:
3156         case XPT_TERM_IO:
3157         case XPT_ENG_INQ:
3158                 /* XXX Implement */
3159                 xpt_print(start_ccb->ccb_h.path,
3160                     "%s: CCB type %#x %s not supported\n", __func__,
3161                     start_ccb->ccb_h.func_code,
3162                     xpt_action_name(start_ccb->ccb_h.func_code));
3163                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3164                 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
3165                         xpt_done(start_ccb);
3166                 }
3167                 break;
3168         }
3169         CAM_DEBUG(path, CAM_DEBUG_TRACE,
3170             ("xpt_action_default: func= %#x %s status %#x\n",
3171                 start_ccb->ccb_h.func_code,
3172                 xpt_action_name(start_ccb->ccb_h.func_code),
3173                 start_ccb->ccb_h.status));
3174 }
3175
3176 /*
3177  * Call the sim poll routine to allow the sim to complete
3178  * any inflight requests, then call camisr_runqueue to
3179  * complete any CCB that the polling completed.
3180  */
3181 void
3182 xpt_sim_poll(struct cam_sim *sim)
3183 {
3184         struct mtx *mtx;
3185
3186         KASSERT(cam_sim_pollable(sim), ("%s: non-pollable sim", __func__));
3187         mtx = sim->mtx;
3188         if (mtx)
3189                 mtx_lock(mtx);
3190         (*(sim->sim_poll))(sim);
3191         if (mtx)
3192                 mtx_unlock(mtx);
3193         camisr_runqueue();
3194 }
3195
3196 uint32_t
3197 xpt_poll_setup(union ccb *start_ccb)
3198 {
3199         u_int32_t timeout;
3200         struct    cam_sim *sim;
3201         struct    cam_devq *devq;
3202         struct    cam_ed *dev;
3203
3204         timeout = start_ccb->ccb_h.timeout * 10;
3205         sim = start_ccb->ccb_h.path->bus->sim;
3206         devq = sim->devq;
3207         dev = start_ccb->ccb_h.path->device;
3208
3209         KASSERT(cam_sim_pollable(sim), ("%s: non-pollable sim", __func__));
3210
3211         /*
3212          * Steal an opening so that no other queued requests
3213          * can get it before us while we simulate interrupts.
3214          */
3215         mtx_lock(&devq->send_mtx);
3216         dev->ccbq.dev_openings--;
3217         while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) &&
3218             (--timeout > 0)) {
3219                 mtx_unlock(&devq->send_mtx);
3220                 DELAY(100);
3221                 xpt_sim_poll(sim);
3222                 mtx_lock(&devq->send_mtx);
3223         }
3224         dev->ccbq.dev_openings++;
3225         mtx_unlock(&devq->send_mtx);
3226
3227         return (timeout);
3228 }
3229
3230 void
3231 xpt_pollwait(union ccb *start_ccb, uint32_t timeout)
3232 {
3233
3234         KASSERT(cam_sim_pollable(start_ccb->ccb_h.path->bus->sim),
3235             ("%s: non-pollable sim", __func__));
3236         while (--timeout > 0) {
3237                 xpt_sim_poll(start_ccb->ccb_h.path->bus->sim);
3238                 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3239                     != CAM_REQ_INPROG)
3240                         break;
3241                 DELAY(100);
3242         }
3243
3244         if (timeout == 0) {
3245                 /*
3246                  * XXX Is it worth adding a sim_timeout entry
3247                  * point so we can attempt recovery?  If
3248                  * this is only used for dumps, I don't think
3249                  * it is.
3250                  */
3251                 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3252         }
3253 }
3254
3255 void
3256 xpt_polled_action(union ccb *start_ccb)
3257 {
3258         uint32_t        timeout;
3259         struct cam_ed   *dev;
3260
3261         timeout = start_ccb->ccb_h.timeout * 10;
3262         dev = start_ccb->ccb_h.path->device;
3263
3264         mtx_unlock(&dev->device_mtx);
3265
3266         timeout = xpt_poll_setup(start_ccb);
3267         if (timeout > 0) {
3268                 xpt_action(start_ccb);
3269                 xpt_pollwait(start_ccb, timeout);
3270         } else {
3271                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3272         }
3273
3274         mtx_lock(&dev->device_mtx);
3275 }
3276
3277 /*
3278  * Schedule a peripheral driver to receive a ccb when its
3279  * target device has space for more transactions.
3280  */
3281 void
3282 xpt_schedule(struct cam_periph *periph, u_int32_t new_priority)
3283 {
3284
3285         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3286         cam_periph_assert(periph, MA_OWNED);
3287         if (new_priority < periph->scheduled_priority) {
3288                 periph->scheduled_priority = new_priority;
3289                 xpt_run_allocq(periph, 0);
3290         }
3291 }
3292
3293 /*
3294  * Schedule a device to run on a given queue.
3295  * If the device was inserted as a new entry on the queue,
3296  * return 1 meaning the device queue should be run. If we
3297  * were already queued, implying someone else has already
3298  * started the queue, return 0 so the caller doesn't attempt
3299  * to run the queue.
3300  */
3301 static int
3302 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3303                  u_int32_t new_priority)
3304 {
3305         int retval;
3306         u_int32_t old_priority;
3307
3308         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3309
3310         old_priority = pinfo->priority;
3311
3312         /*
3313          * Are we already queued?
3314          */
3315         if (pinfo->index != CAM_UNQUEUED_INDEX) {
3316                 /* Simply reorder based on new priority */
3317                 if (new_priority < old_priority) {
3318                         camq_change_priority(queue, pinfo->index,
3319                                              new_priority);
3320                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3321                                         ("changed priority to %d\n",
3322                                          new_priority));
3323                         retval = 1;
3324                 } else
3325                         retval = 0;
3326         } else {
3327                 /* New entry on the queue */
3328                 if (new_priority < old_priority)
3329                         pinfo->priority = new_priority;
3330
3331                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3332                                 ("Inserting onto queue\n"));
3333                 pinfo->generation = ++queue->generation;
3334                 camq_insert(queue, pinfo);
3335                 retval = 1;
3336         }
3337         return (retval);
3338 }
3339
3340 static void
3341 xpt_run_allocq_task(void *context, int pending)
3342 {
3343         struct cam_periph *periph = context;
3344
3345         cam_periph_lock(periph);
3346         periph->flags &= ~CAM_PERIPH_RUN_TASK;
3347         xpt_run_allocq(periph, 1);
3348         cam_periph_unlock(periph);
3349         cam_periph_release(periph);
3350 }
3351
3352 static void
3353 xpt_run_allocq(struct cam_periph *periph, int sleep)
3354 {
3355         struct cam_ed   *device;
3356         union ccb       *ccb;
3357         uint32_t         prio;
3358
3359         cam_periph_assert(periph, MA_OWNED);
3360         if (periph->periph_allocating)
3361                 return;
3362         cam_periph_doacquire(periph);
3363         periph->periph_allocating = 1;
3364         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph));
3365         device = periph->path->device;
3366         ccb = NULL;
3367 restart:
3368         while ((prio = min(periph->scheduled_priority,
3369             periph->immediate_priority)) != CAM_PRIORITY_NONE &&
3370             (periph->periph_allocated - (ccb != NULL ? 1 : 0) <
3371              device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) {
3372                 if (ccb == NULL &&
3373                     (ccb = xpt_get_ccb_nowait(periph)) == NULL) {
3374                         if (sleep) {
3375                                 ccb = xpt_get_ccb(periph);
3376                                 goto restart;
3377                         }
3378                         if (periph->flags & CAM_PERIPH_RUN_TASK)
3379                                 break;
3380                         cam_periph_doacquire(periph);
3381                         periph->flags |= CAM_PERIPH_RUN_TASK;
3382                         taskqueue_enqueue(xsoftc.xpt_taskq,
3383                             &periph->periph_run_task);
3384                         break;
3385                 }
3386                 xpt_setup_ccb(&ccb->ccb_h, periph->path, prio);
3387                 if (prio == periph->immediate_priority) {
3388                         periph->immediate_priority = CAM_PRIORITY_NONE;
3389                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3390                                         ("waking cam_periph_getccb()\n"));
3391                         SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h,
3392                                           periph_links.sle);
3393                         wakeup(&periph->ccb_list);
3394                 } else {
3395                         periph->scheduled_priority = CAM_PRIORITY_NONE;
3396                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3397                                         ("calling periph_start()\n"));
3398                         periph->periph_start(periph, ccb);
3399                 }
3400                 ccb = NULL;
3401         }
3402         if (ccb != NULL)
3403                 xpt_release_ccb(ccb);
3404         periph->periph_allocating = 0;
3405         cam_periph_release_locked(periph);
3406 }
3407
3408 static void
3409 xpt_run_devq(struct cam_devq *devq)
3410 {
3411         struct mtx *mtx;
3412
3413         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n"));
3414
3415         devq->send_queue.qfrozen_cnt++;
3416         while ((devq->send_queue.entries > 0)
3417             && (devq->send_openings > 0)
3418             && (devq->send_queue.qfrozen_cnt <= 1)) {
3419                 struct  cam_ed *device;
3420                 union ccb *work_ccb;
3421                 struct  cam_sim *sim;
3422                 struct xpt_proto *proto;
3423
3424                 device = (struct cam_ed *)camq_remove(&devq->send_queue,
3425                                                            CAMQ_HEAD);
3426                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3427                                 ("running device %p\n", device));
3428
3429                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3430                 if (work_ccb == NULL) {
3431                         printf("device on run queue with no ccbs???\n");
3432                         continue;
3433                 }
3434
3435                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3436                         mtx_lock(&xsoftc.xpt_highpower_lock);
3437                         if (xsoftc.num_highpower <= 0) {
3438                                 /*
3439                                  * We got a high power command, but we
3440                                  * don't have any available slots.  Freeze
3441                                  * the device queue until we have a slot
3442                                  * available.
3443                                  */
3444                                 xpt_freeze_devq_device(device, 1);
3445                                 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device,
3446                                                    highpowerq_entry);
3447
3448                                 mtx_unlock(&xsoftc.xpt_highpower_lock);
3449                                 continue;
3450                         } else {
3451                                 /*
3452                                  * Consume a high power slot while
3453                                  * this ccb runs.
3454                                  */
3455                                 xsoftc.num_highpower--;
3456                         }
3457                         mtx_unlock(&xsoftc.xpt_highpower_lock);
3458                 }
3459                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3460                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3461                 devq->send_openings--;
3462                 devq->send_active++;
3463                 xpt_schedule_devq(devq, device);
3464                 mtx_unlock(&devq->send_mtx);
3465
3466                 if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) {
3467                         /*
3468                          * The client wants to freeze the queue
3469                          * after this CCB is sent.
3470                          */
3471                         xpt_freeze_devq(work_ccb->ccb_h.path, 1);
3472                 }
3473
3474                 /* In Target mode, the peripheral driver knows best... */
3475                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3476                         if ((device->inq_flags & SID_CmdQue) != 0
3477                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3478                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3479                         else
3480                                 /*
3481                                  * Clear this in case of a retried CCB that
3482                                  * failed due to a rejected tag.
3483                                  */
3484                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3485                 }
3486
3487                 KASSERT(device == work_ccb->ccb_h.path->device,
3488                     ("device (%p) / path->device (%p) mismatch",
3489                         device, work_ccb->ccb_h.path->device));
3490                 proto = xpt_proto_find(device->protocol);
3491                 if (proto && proto->ops->debug_out)
3492                         proto->ops->debug_out(work_ccb);
3493
3494                 /*
3495                  * Device queues can be shared among multiple SIM instances
3496                  * that reside on different buses.  Use the SIM from the
3497                  * queued device, rather than the one from the calling bus.
3498                  */
3499                 sim = device->sim;
3500                 mtx = sim->mtx;
3501                 if (mtx && !mtx_owned(mtx))
3502                         mtx_lock(mtx);
3503                 else
3504                         mtx = NULL;
3505                 work_ccb->ccb_h.qos.periph_data = cam_iosched_now();
3506                 (*(sim->sim_action))(sim, work_ccb);
3507                 if (mtx)
3508                         mtx_unlock(mtx);
3509                 mtx_lock(&devq->send_mtx);
3510         }
3511         devq->send_queue.qfrozen_cnt--;
3512 }
3513
3514 /*
3515  * This function merges stuff from the src ccb into the dst ccb, while keeping
3516  * important fields in the dst ccb constant.
3517  */
3518 void
3519 xpt_merge_ccb(union ccb *dst_ccb, union ccb *src_ccb)
3520 {
3521
3522         /*
3523          * Pull fields that are valid for peripheral drivers to set
3524          * into the dst CCB along with the CCB "payload".
3525          */
3526         dst_ccb->ccb_h.retry_count = src_ccb->ccb_h.retry_count;
3527         dst_ccb->ccb_h.func_code = src_ccb->ccb_h.func_code;
3528         dst_ccb->ccb_h.timeout = src_ccb->ccb_h.timeout;
3529         dst_ccb->ccb_h.flags = src_ccb->ccb_h.flags;
3530         bcopy(&(&src_ccb->ccb_h)[1], &(&dst_ccb->ccb_h)[1],
3531               sizeof(union ccb) - sizeof(struct ccb_hdr));
3532 }
3533
3534 void
3535 xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path,
3536                     u_int32_t priority, u_int32_t flags)
3537 {
3538
3539         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3540         ccb_h->pinfo.priority = priority;
3541         ccb_h->path = path;
3542         ccb_h->path_id = path->bus->path_id;
3543         if (path->target)
3544                 ccb_h->target_id = path->target->target_id;
3545         else
3546                 ccb_h->target_id = CAM_TARGET_WILDCARD;
3547         if (path->device) {
3548                 ccb_h->target_lun = path->device->lun_id;
3549                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3550         } else {
3551                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3552         }
3553         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3554         ccb_h->flags = flags;
3555         ccb_h->xflags = 0;
3556 }
3557
3558 void
3559 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3560 {
3561         xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0);
3562 }
3563
3564 /* Path manipulation functions */
3565 cam_status
3566 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3567                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3568 {
3569         struct     cam_path *path;
3570         cam_status status;
3571
3572         path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3573
3574         if (path == NULL) {
3575                 status = CAM_RESRC_UNAVAIL;
3576                 return(status);
3577         }
3578         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3579         if (status != CAM_REQ_CMP) {
3580                 free(path, M_CAMPATH);
3581                 path = NULL;
3582         }
3583         *new_path_ptr = path;
3584         return (status);
3585 }
3586
3587 cam_status
3588 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3589                          struct cam_periph *periph, path_id_t path_id,
3590                          target_id_t target_id, lun_id_t lun_id)
3591 {
3592
3593         return (xpt_create_path(new_path_ptr, periph, path_id, target_id,
3594             lun_id));
3595 }
3596
3597 cam_status
3598 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3599                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3600 {
3601         struct       cam_eb *bus;
3602         struct       cam_et *target;
3603         struct       cam_ed *device;
3604         cam_status   status;
3605
3606         status = CAM_REQ_CMP;   /* Completed without error */
3607         target = NULL;          /* Wildcarded */
3608         device = NULL;          /* Wildcarded */
3609
3610         /*
3611          * We will potentially modify the EDT, so block interrupts
3612          * that may attempt to create cam paths.
3613          */
3614         bus = xpt_find_bus(path_id);
3615         if (bus == NULL) {
3616                 status = CAM_PATH_INVALID;
3617         } else {
3618                 xpt_lock_buses();
3619                 mtx_lock(&bus->eb_mtx);
3620                 target = xpt_find_target(bus, target_id);
3621                 if (target == NULL) {
3622                         /* Create one */
3623                         struct cam_et *new_target;
3624
3625                         new_target = xpt_alloc_target(bus, target_id);
3626                         if (new_target == NULL) {
3627                                 status = CAM_RESRC_UNAVAIL;
3628                         } else {
3629                                 target = new_target;
3630                         }
3631                 }
3632                 xpt_unlock_buses();
3633                 if (target != NULL) {
3634                         device = xpt_find_device(target, lun_id);
3635                         if (device == NULL) {
3636                                 /* Create one */
3637                                 struct cam_ed *new_device;
3638
3639                                 new_device =
3640                                     (*(bus->xport->ops->alloc_device))(bus,
3641                                                                        target,
3642                                                                        lun_id);
3643                                 if (new_device == NULL) {
3644                                         status = CAM_RESRC_UNAVAIL;
3645                                 } else {
3646                                         device = new_device;
3647                                 }
3648                         }
3649                 }
3650                 mtx_unlock(&bus->eb_mtx);
3651         }
3652
3653         /*
3654          * Only touch the user's data if we are successful.
3655          */
3656         if (status == CAM_REQ_CMP) {
3657                 new_path->periph = perph;
3658                 new_path->bus = bus;
3659                 new_path->target = target;
3660                 new_path->device = device;
3661                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3662         } else {
3663                 if (device != NULL)
3664                         xpt_release_device(device);
3665                 if (target != NULL)
3666                         xpt_release_target(target);
3667                 if (bus != NULL)
3668                         xpt_release_bus(bus);
3669         }
3670         return (status);
3671 }
3672
3673 cam_status
3674 xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path)
3675 {
3676         struct     cam_path *new_path;
3677
3678         new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3679         if (new_path == NULL)
3680                 return(CAM_RESRC_UNAVAIL);
3681         *new_path = *path;
3682         if (path->bus != NULL)
3683                 xpt_acquire_bus(path->bus);
3684         if (path->target != NULL)
3685                 xpt_acquire_target(path->target);
3686         if (path->device != NULL)
3687                 xpt_acquire_device(path->device);
3688         *new_path_ptr = new_path;
3689         return (CAM_REQ_CMP);
3690 }
3691
3692 void
3693 xpt_release_path(struct cam_path *path)
3694 {
3695         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3696         if (path->device != NULL) {
3697                 xpt_release_device(path->device);
3698                 path->device = NULL;
3699         }
3700         if (path->target != NULL) {
3701                 xpt_release_target(path->target);
3702                 path->target = NULL;
3703         }
3704         if (path->bus != NULL) {
3705                 xpt_release_bus(path->bus);
3706                 path->bus = NULL;
3707         }
3708 }
3709
3710 void
3711 xpt_free_path(struct cam_path *path)
3712 {
3713
3714         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3715         xpt_release_path(path);
3716         free(path, M_CAMPATH);
3717 }
3718
3719 void
3720 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
3721     uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
3722 {
3723
3724         xpt_lock_buses();
3725         if (bus_ref) {
3726                 if (path->bus)
3727                         *bus_ref = path->bus->refcount;
3728                 else
3729                         *bus_ref = 0;
3730         }
3731         if (periph_ref) {
3732                 if (path->periph)
3733                         *periph_ref = path->periph->refcount;
3734                 else
3735                         *periph_ref = 0;
3736         }
3737         xpt_unlock_buses();
3738         if (target_ref) {
3739                 if (path->target)
3740                         *target_ref = path->target->refcount;
3741                 else
3742                         *target_ref = 0;
3743         }
3744         if (device_ref) {
3745                 if (path->device)
3746                         *device_ref = path->device->refcount;
3747                 else
3748                         *device_ref = 0;
3749         }
3750 }
3751
3752 /*
3753  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3754  * in path1, 2 for match with wildcards in path2.
3755  */
3756 int
3757 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3758 {
3759         int retval = 0;
3760
3761         if (path1->bus != path2->bus) {
3762                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
3763                         retval = 1;
3764                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3765                         retval = 2;
3766                 else
3767                         return (-1);
3768         }
3769         if (path1->target != path2->target) {
3770                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3771                         if (retval == 0)
3772                                 retval = 1;
3773                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3774                         retval = 2;
3775                 else
3776                         return (-1);
3777         }
3778         if (path1->device != path2->device) {
3779                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3780                         if (retval == 0)
3781                                 retval = 1;
3782                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3783                         retval = 2;
3784                 else
3785                         return (-1);
3786         }
3787         return (retval);
3788 }
3789
3790 int
3791 xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev)
3792 {
3793         int retval = 0;
3794
3795         if (path->bus != dev->target->bus) {
3796                 if (path->bus->path_id == CAM_BUS_WILDCARD)
3797                         retval = 1;
3798                 else if (dev->target->bus->path_id == CAM_BUS_WILDCARD)
3799                         retval = 2;
3800                 else
3801                         return (-1);
3802         }
3803         if (path->target != dev->target) {
3804                 if (path->target->target_id == CAM_TARGET_WILDCARD) {
3805                         if (retval == 0)
3806                                 retval = 1;
3807                 } else if (dev->target->target_id == CAM_TARGET_WILDCARD)
3808                         retval = 2;
3809                 else
3810                         return (-1);
3811         }
3812         if (path->device != dev) {
3813                 if (path->device->lun_id == CAM_LUN_WILDCARD) {
3814                         if (retval == 0)
3815                                 retval = 1;
3816                 } else if (dev->lun_id == CAM_LUN_WILDCARD)
3817                         retval = 2;
3818                 else
3819                         return (-1);
3820         }
3821         return (retval);
3822 }
3823
3824 void
3825 xpt_print_path(struct cam_path *path)
3826 {
3827         struct sbuf sb;
3828         char buffer[XPT_PRINT_LEN];
3829
3830         sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN);
3831         xpt_path_sbuf(path, &sb);
3832         sbuf_finish(&sb);
3833         printf("%s", sbuf_data(&sb));
3834         sbuf_delete(&sb);
3835 }
3836
3837 void
3838 xpt_print_device(struct cam_ed *device)
3839 {
3840
3841         if (device == NULL)
3842                 printf("(nopath): ");
3843         else {
3844                 printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name,
3845                        device->sim->unit_number,
3846                        device->sim->bus_id,
3847                        device->target->target_id,
3848                        (uintmax_t)device->lun_id);
3849         }
3850 }
3851
3852 void
3853 xpt_print(struct cam_path *path, const char *fmt, ...)
3854 {
3855         va_list ap;
3856         struct sbuf sb;
3857         char buffer[XPT_PRINT_LEN];
3858
3859         sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN);
3860
3861         xpt_path_sbuf(path, &sb);
3862         va_start(ap, fmt);
3863         sbuf_vprintf(&sb, fmt, ap);
3864         va_end(ap);
3865
3866         sbuf_finish(&sb);
3867         printf("%s", sbuf_data(&sb));
3868         sbuf_delete(&sb);
3869 }
3870
3871 int
3872 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
3873 {
3874         struct sbuf sb;
3875         int len;
3876
3877         sbuf_new(&sb, str, str_len, 0);
3878         len = xpt_path_sbuf(path, &sb);
3879         sbuf_finish(&sb);
3880         return (len);
3881 }
3882
3883 int
3884 xpt_path_sbuf(struct cam_path *path, struct sbuf *sb)
3885 {
3886
3887         if (path == NULL)
3888                 sbuf_printf(sb, "(nopath): ");
3889         else {
3890                 if (path->periph != NULL)
3891                         sbuf_printf(sb, "(%s%d:", path->periph->periph_name,
3892                                     path->periph->unit_number);
3893                 else
3894                         sbuf_printf(sb, "(noperiph:");
3895
3896                 if (path->bus != NULL)
3897                         sbuf_printf(sb, "%s%d:%d:", path->bus->sim->sim_name,
3898                                     path->bus->sim->unit_number,
3899                                     path->bus->sim->bus_id);
3900                 else
3901                         sbuf_printf(sb, "nobus:");
3902
3903                 if (path->target != NULL)
3904                         sbuf_printf(sb, "%d:", path->target->target_id);
3905                 else
3906                         sbuf_printf(sb, "X:");
3907
3908                 if (path->device != NULL)
3909                         sbuf_printf(sb, "%jx): ",
3910                             (uintmax_t)path->device->lun_id);
3911                 else
3912                         sbuf_printf(sb, "X): ");
3913         }
3914
3915         return(sbuf_len(sb));
3916 }
3917
3918 path_id_t
3919 xpt_path_path_id(struct cam_path *path)
3920 {
3921         return(path->bus->path_id);
3922 }
3923
3924 target_id_t
3925 xpt_path_target_id(struct cam_path *path)
3926 {
3927         if (path->target != NULL)
3928                 return (path->target->target_id);
3929         else
3930                 return (CAM_TARGET_WILDCARD);
3931 }
3932
3933 lun_id_t
3934 xpt_path_lun_id(struct cam_path *path)
3935 {
3936         if (path->device != NULL)
3937                 return (path->device->lun_id);
3938         else
3939                 return (CAM_LUN_WILDCARD);
3940 }
3941
3942 struct cam_sim *
3943 xpt_path_sim(struct cam_path *path)
3944 {
3945
3946         return (path->bus->sim);
3947 }
3948
3949 struct cam_periph*
3950 xpt_path_periph(struct cam_path *path)
3951 {
3952
3953         return (path->periph);
3954 }
3955
3956 /*
3957  * Release a CAM control block for the caller.  Remit the cost of the structure
3958  * to the device referenced by the path.  If the this device had no 'credits'
3959  * and peripheral drivers have registered async callbacks for this notification
3960  * call them now.
3961  */
3962 void
3963 xpt_release_ccb(union ccb *free_ccb)
3964 {
3965         struct   cam_ed *device;
3966         struct   cam_periph *periph;
3967
3968         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3969         xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED);
3970         device = free_ccb->ccb_h.path->device;
3971         periph = free_ccb->ccb_h.path->periph;
3972
3973         xpt_free_ccb(free_ccb);
3974         periph->periph_allocated--;
3975         cam_ccbq_release_opening(&device->ccbq);
3976         xpt_run_allocq(periph, 0);
3977 }
3978
3979 /* Functions accessed by SIM drivers */
3980
3981 static struct xpt_xport_ops xport_default_ops = {
3982         .alloc_device = xpt_alloc_device_default,
3983         .action = xpt_action_default,
3984         .async = xpt_dev_async_default,
3985 };
3986 static struct xpt_xport xport_default = {
3987         .xport = XPORT_UNKNOWN,
3988         .name = "unknown",
3989         .ops = &xport_default_ops,
3990 };
3991
3992 CAM_XPT_XPORT(xport_default);
3993
3994 /*
3995  * A sim structure, listing the SIM entry points and instance
3996  * identification info is passed to xpt_bus_register to hook the SIM
3997  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
3998  * for this new bus and places it in the array of buses and assigns
3999  * it a path_id.  The path_id may be influenced by "hard wiring"
4000  * information specified by the user.  Once interrupt services are
4001  * available, the bus will be probed.
4002  */
4003 int32_t
4004 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
4005 {
4006         struct cam_eb *new_bus;
4007         struct cam_eb *old_bus;
4008         struct ccb_pathinq cpi;
4009         struct cam_path *path;
4010         cam_status status;
4011
4012         sim->bus_id = bus;
4013         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
4014                                           M_CAMXPT, M_NOWAIT|M_ZERO);
4015         if (new_bus == NULL) {
4016                 /* Couldn't satisfy request */
4017                 return (CAM_RESRC_UNAVAIL);
4018         }
4019
4020         mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF);
4021         TAILQ_INIT(&new_bus->et_entries);
4022         cam_sim_hold(sim);
4023         new_bus->sim = sim;
4024         timevalclear(&new_bus->last_reset);
4025         new_bus->flags = 0;
4026         new_bus->refcount = 1;  /* Held until a bus_deregister event */
4027         new_bus->generation = 0;
4028
4029         xpt_lock_buses();
4030         sim->path_id = new_bus->path_id =
4031             xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4032         old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4033         while (old_bus != NULL
4034             && old_bus->path_id < new_bus->path_id)
4035                 old_bus = TAILQ_NEXT(old_bus, links);
4036         if (old_bus != NULL)
4037                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4038         else
4039                 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
4040         xsoftc.bus_generation++;
4041         xpt_unlock_buses();
4042
4043         /*
4044          * Set a default transport so that a PATH_INQ can be issued to
4045          * the SIM.  This will then allow for probing and attaching of
4046          * a more appropriate transport.
4047          */
4048         new_bus->xport = &xport_default;
4049
4050         status = xpt_create_path(&path, /*periph*/NULL, sim->path_id,
4051                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4052         if (status != CAM_REQ_CMP) {
4053                 xpt_release_bus(new_bus);
4054                 return (CAM_RESRC_UNAVAIL);
4055         }
4056
4057         xpt_path_inq(&cpi, path);
4058
4059         if (cpi.ccb_h.status == CAM_REQ_CMP) {
4060                 struct xpt_xport **xpt;
4061
4062                 SET_FOREACH(xpt, cam_xpt_xport_set) {
4063                         if ((*xpt)->xport == cpi.transport) {
4064                                 new_bus->xport = *xpt;
4065                                 break;
4066                         }
4067                 }
4068                 if (new_bus->xport == NULL) {
4069                         xpt_print(path,
4070                             "No transport found for %d\n", cpi.transport);
4071                         xpt_release_bus(new_bus);
4072                         free(path, M_CAMXPT);
4073                         return (CAM_RESRC_UNAVAIL);
4074                 }
4075         }
4076
4077         /* Notify interested parties */
4078         if (sim->path_id != CAM_XPT_PATH_ID) {
4079                 xpt_async(AC_PATH_REGISTERED, path, &cpi);
4080                 if ((cpi.hba_misc & PIM_NOSCAN) == 0) {
4081                         union   ccb *scan_ccb;
4082
4083                         /* Initiate bus rescan. */
4084                         scan_ccb = xpt_alloc_ccb_nowait();
4085                         if (scan_ccb != NULL) {
4086                                 scan_ccb->ccb_h.path = path;
4087                                 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
4088                                 scan_ccb->crcn.flags = 0;
4089                                 xpt_rescan(scan_ccb);
4090                         } else {
4091                                 xpt_print(path,
4092                                           "Can't allocate CCB to scan bus\n");
4093                                 xpt_free_path(path);
4094                         }
4095                 } else
4096                         xpt_free_path(path);
4097         } else
4098                 xpt_free_path(path);
4099         return (CAM_SUCCESS);
4100 }
4101
4102 int32_t
4103 xpt_bus_deregister(path_id_t pathid)
4104 {
4105         struct cam_path bus_path;
4106         cam_status status;
4107
4108         status = xpt_compile_path(&bus_path, NULL, pathid,
4109                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4110         if (status != CAM_REQ_CMP)
4111                 return (status);
4112
4113         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4114         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4115
4116         /* Release the reference count held while registered. */
4117         xpt_release_bus(bus_path.bus);
4118         xpt_release_path(&bus_path);
4119
4120         return (CAM_REQ_CMP);
4121 }
4122
4123 static path_id_t
4124 xptnextfreepathid(void)
4125 {
4126         struct cam_eb *bus;
4127         path_id_t pathid;
4128         const char *strval;
4129
4130         mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4131         pathid = 0;
4132         bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4133 retry:
4134         /* Find an unoccupied pathid */
4135         while (bus != NULL && bus->path_id <= pathid) {
4136                 if (bus->path_id == pathid)
4137                         pathid++;
4138                 bus = TAILQ_NEXT(bus, links);
4139         }
4140
4141         /*
4142          * Ensure that this pathid is not reserved for
4143          * a bus that may be registered in the future.
4144          */
4145         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4146                 ++pathid;
4147                 /* Start the search over */
4148                 goto retry;
4149         }
4150         return (pathid);
4151 }
4152
4153 static path_id_t
4154 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4155 {
4156         path_id_t pathid;
4157         int i, dunit, val;
4158         char buf[32];
4159         const char *dname;
4160
4161         pathid = CAM_XPT_PATH_ID;
4162         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4163         if (strcmp(buf, "xpt0") == 0 && sim_bus == 0)
4164                 return (pathid);
4165         i = 0;
4166         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4167                 if (strcmp(dname, "scbus")) {
4168                         /* Avoid a bit of foot shooting. */
4169                         continue;
4170                 }
4171                 if (dunit < 0)          /* unwired?! */
4172                         continue;
4173                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4174                         if (sim_bus == val) {
4175                                 pathid = dunit;
4176                                 break;
4177                         }
4178                 } else if (sim_bus == 0) {
4179                         /* Unspecified matches bus 0 */
4180                         pathid = dunit;
4181                         break;
4182                 } else {
4183                         printf("Ambiguous scbus configuration for %s%d "
4184                                "bus %d, cannot wire down.  The kernel "
4185                                "config entry for scbus%d should "
4186                                "specify a controller bus.\n"
4187                                "Scbus will be assigned dynamically.\n",
4188                                sim_name, sim_unit, sim_bus, dunit);
4189                         break;
4190                 }
4191         }
4192
4193         if (pathid == CAM_XPT_PATH_ID)
4194                 pathid = xptnextfreepathid();
4195         return (pathid);
4196 }
4197
4198 static const char *
4199 xpt_async_string(u_int32_t async_code)
4200 {
4201
4202         switch (async_code) {
4203         case AC_BUS_RESET: return ("AC_BUS_RESET");
4204         case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
4205         case AC_SCSI_AEN: return ("AC_SCSI_AEN");
4206         case AC_SENT_BDR: return ("AC_SENT_BDR");
4207         case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
4208         case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
4209         case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
4210         case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
4211         case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
4212         case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
4213         case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
4214         case AC_CONTRACT: return ("AC_CONTRACT");
4215         case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED");
4216         case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION");
4217         }
4218         return ("AC_UNKNOWN");
4219 }
4220
4221 static int
4222 xpt_async_size(u_int32_t async_code)
4223 {
4224
4225         switch (async_code) {
4226         case AC_BUS_RESET: return (0);
4227         case AC_UNSOL_RESEL: return (0);
4228         case AC_SCSI_AEN: return (0);
4229         case AC_SENT_BDR: return (0);
4230         case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq));
4231         case AC_PATH_DEREGISTERED: return (0);
4232         case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev));
4233         case AC_LOST_DEVICE: return (0);
4234         case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings));
4235         case AC_INQ_CHANGED: return (0);
4236         case AC_GETDEV_CHANGED: return (0);
4237         case AC_CONTRACT: return (sizeof(struct ac_contract));
4238         case AC_ADVINFO_CHANGED: return (-1);
4239         case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio));
4240         }
4241         return (0);
4242 }
4243
4244 static int
4245 xpt_async_process_dev(struct cam_ed *device, void *arg)
4246 {
4247         union ccb *ccb = arg;
4248         struct cam_path *path = ccb->ccb_h.path;
4249         void *async_arg = ccb->casync.async_arg_ptr;
4250         u_int32_t async_code = ccb->casync.async_code;
4251         int relock;
4252
4253         if (path->device != device
4254          && path->device->lun_id != CAM_LUN_WILDCARD
4255          && device->lun_id != CAM_LUN_WILDCARD)
4256                 return (1);
4257
4258         /*
4259          * The async callback could free the device.
4260          * If it is a broadcast async, it doesn't hold
4261          * device reference, so take our own reference.
4262          */
4263         xpt_acquire_device(device);
4264
4265         /*
4266          * If async for specific device is to be delivered to
4267          * the wildcard client, take the specific device lock.
4268          * XXX: We may need a way for client to specify it.
4269          */
4270         if ((device->lun_id == CAM_LUN_WILDCARD &&
4271              path->device->lun_id != CAM_LUN_WILDCARD) ||
4272             (device->target->target_id == CAM_TARGET_WILDCARD &&
4273              path->target->target_id != CAM_TARGET_WILDCARD) ||
4274             (device->target->bus->path_id == CAM_BUS_WILDCARD &&
4275              path->target->bus->path_id != CAM_BUS_WILDCARD)) {
4276                 mtx_unlock(&device->device_mtx);
4277                 xpt_path_lock(path);
4278                 relock = 1;
4279         } else
4280                 relock = 0;
4281
4282         (*(device->target->bus->xport->ops->async))(async_code,
4283             device->target->bus, device->target, device, async_arg);
4284         xpt_async_bcast(&device->asyncs, async_code, path, async_arg);
4285
4286         if (relock) {
4287                 xpt_path_unlock(path);
4288                 mtx_lock(&device->device_mtx);
4289         }
4290         xpt_release_device(device);
4291         return (1);
4292 }
4293
4294 static int
4295 xpt_async_process_tgt(struct cam_et *target, void *arg)
4296 {
4297         union ccb *ccb = arg;
4298         struct cam_path *path = ccb->ccb_h.path;
4299
4300         if (path->target != target
4301          && path->target->target_id != CAM_TARGET_WILDCARD
4302          && target->target_id != CAM_TARGET_WILDCARD)
4303                 return (1);
4304
4305         if (ccb->casync.async_code == AC_SENT_BDR) {
4306                 /* Update our notion of when the last reset occurred */
4307                 microtime(&target->last_reset);
4308         }
4309
4310         return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb));
4311 }
4312
4313 static void
4314 xpt_async_process(struct cam_periph *periph, union ccb *ccb)
4315 {
4316         struct cam_eb *bus;
4317         struct cam_path *path;
4318         void *async_arg;
4319         u_int32_t async_code;
4320
4321         path = ccb->ccb_h.path;
4322         async_code = ccb->casync.async_code;
4323         async_arg = ccb->casync.async_arg_ptr;
4324         CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
4325             ("xpt_async(%s)\n", xpt_async_string(async_code)));
4326         bus = path->bus;
4327
4328         if (async_code == AC_BUS_RESET) {
4329                 /* Update our notion of when the last reset occurred */
4330                 microtime(&bus->last_reset);
4331         }
4332
4333         xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb);
4334
4335         /*
4336          * If this wasn't a fully wildcarded async, tell all
4337          * clients that want all async events.
4338          */
4339         if (bus != xpt_periph->path->bus) {
4340                 xpt_path_lock(xpt_periph->path);
4341                 xpt_async_process_dev(xpt_periph->path->device, ccb);
4342                 xpt_path_unlock(xpt_periph->path);
4343         }
4344
4345         if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4346                 xpt_release_devq(path, 1, TRUE);
4347         else
4348                 xpt_release_simq(path->bus->sim, TRUE);
4349         if (ccb->casync.async_arg_size > 0)
4350                 free(async_arg, M_CAMXPT);
4351         xpt_free_path(path);
4352         xpt_free_ccb(ccb);
4353 }
4354
4355 static void
4356 xpt_async_bcast(struct async_list *async_head,
4357                 u_int32_t async_code,
4358                 struct cam_path *path, void *async_arg)
4359 {
4360         struct async_node *cur_entry;
4361         struct mtx *mtx;
4362
4363         cur_entry = SLIST_FIRST(async_head);
4364         while (cur_entry != NULL) {
4365                 struct async_node *next_entry;
4366                 /*
4367                  * Grab the next list entry before we call the current
4368                  * entry's callback.  This is because the callback function
4369                  * can delete its async callback entry.
4370                  */
4371                 next_entry = SLIST_NEXT(cur_entry, links);
4372                 if ((cur_entry->event_enable & async_code) != 0) {
4373                         mtx = cur_entry->event_lock ?
4374                             path->device->sim->mtx : NULL;
4375                         if (mtx)
4376                                 mtx_lock(mtx);
4377                         cur_entry->callback(cur_entry->callback_arg,
4378                                             async_code, path,
4379                                             async_arg);
4380                         if (mtx)
4381                                 mtx_unlock(mtx);
4382                 }
4383                 cur_entry = next_entry;
4384         }
4385 }
4386
4387 void
4388 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4389 {
4390         union ccb *ccb;
4391         int size;
4392
4393         ccb = xpt_alloc_ccb_nowait();
4394         if (ccb == NULL) {
4395                 xpt_print(path, "Can't allocate CCB to send %s\n",
4396                     xpt_async_string(async_code));
4397                 return;
4398         }
4399
4400         if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) {
4401                 xpt_print(path, "Can't allocate path to send %s\n",
4402                     xpt_async_string(async_code));
4403                 xpt_free_ccb(ccb);
4404                 return;
4405         }
4406         ccb->ccb_h.path->periph = NULL;
4407         ccb->ccb_h.func_code = XPT_ASYNC;
4408         ccb->ccb_h.cbfcnp = xpt_async_process;
4409         ccb->ccb_h.flags |= CAM_UNLOCKED;
4410         ccb->casync.async_code = async_code;
4411         ccb->casync.async_arg_size = 0;
4412         size = xpt_async_size(async_code);
4413         CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
4414             ("xpt_async: func %#x %s aync_code %d %s\n",
4415                 ccb->ccb_h.func_code,
4416                 xpt_action_name(ccb->ccb_h.func_code),
4417                 async_code,
4418                 xpt_async_string(async_code)));
4419         if (size > 0 && async_arg != NULL) {
4420                 ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT);
4421                 if (ccb->casync.async_arg_ptr == NULL) {
4422                         xpt_print(path, "Can't allocate argument to send %s\n",
4423                             xpt_async_string(async_code));
4424                         xpt_free_path(ccb->ccb_h.path);
4425                         xpt_free_ccb(ccb);
4426                         return;
4427                 }
4428                 memcpy(ccb->casync.async_arg_ptr, async_arg, size);
4429                 ccb->casync.async_arg_size = size;
4430         } else if (size < 0) {
4431                 ccb->casync.async_arg_ptr = async_arg;
4432                 ccb->casync.async_arg_size = size;
4433         }
4434         if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4435                 xpt_freeze_devq(path, 1);
4436         else
4437                 xpt_freeze_simq(path->bus->sim, 1);
4438         xpt_action(ccb);
4439 }
4440
4441 static void
4442 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
4443                       struct cam_et *target, struct cam_ed *device,
4444                       void *async_arg)
4445 {
4446
4447         /*
4448          * We only need to handle events for real devices.
4449          */
4450         if (target->target_id == CAM_TARGET_WILDCARD
4451          || device->lun_id == CAM_LUN_WILDCARD)
4452                 return;
4453
4454         printf("%s called\n", __func__);
4455 }
4456
4457 static uint32_t
4458 xpt_freeze_devq_device(struct cam_ed *dev, u_int count)
4459 {
4460         struct cam_devq *devq;
4461         uint32_t freeze;
4462
4463         devq = dev->sim->devq;
4464         mtx_assert(&devq->send_mtx, MA_OWNED);
4465         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4466             ("xpt_freeze_devq_device(%d) %u->%u\n", count,
4467             dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count));
4468         freeze = (dev->ccbq.queue.qfrozen_cnt += count);
4469         /* Remove frozen device from sendq. */
4470         if (device_is_queued(dev))
4471                 camq_remove(&devq->send_queue, dev->devq_entry.index);
4472         return (freeze);
4473 }
4474
4475 u_int32_t
4476 xpt_freeze_devq(struct cam_path *path, u_int count)
4477 {
4478         struct cam_ed   *dev = path->device;
4479         struct cam_devq *devq;
4480         uint32_t         freeze;
4481
4482         devq = dev->sim->devq;
4483         mtx_lock(&devq->send_mtx);
4484         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count));
4485         freeze = xpt_freeze_devq_device(dev, count);
4486         mtx_unlock(&devq->send_mtx);
4487         return (freeze);
4488 }
4489
4490 u_int32_t
4491 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4492 {
4493         struct cam_devq *devq;
4494         uint32_t         freeze;
4495
4496         devq = sim->devq;
4497         mtx_lock(&devq->send_mtx);
4498         freeze = (devq->send_queue.qfrozen_cnt += count);
4499         mtx_unlock(&devq->send_mtx);
4500         return (freeze);
4501 }
4502
4503 static void
4504 xpt_release_devq_timeout(void *arg)
4505 {
4506         struct cam_ed *dev;
4507         struct cam_devq *devq;
4508
4509         dev = (struct cam_ed *)arg;
4510         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n"));
4511         devq = dev->sim->devq;
4512         mtx_assert(&devq->send_mtx, MA_OWNED);
4513         if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE))
4514                 xpt_run_devq(devq);
4515 }
4516
4517 void
4518 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4519 {
4520         struct cam_ed *dev;
4521         struct cam_devq *devq;
4522
4523         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n",
4524             count, run_queue));
4525         dev = path->device;
4526         devq = dev->sim->devq;
4527         mtx_lock(&devq->send_mtx);
4528         if (xpt_release_devq_device(dev, count, run_queue))
4529                 xpt_run_devq(dev->sim->devq);
4530         mtx_unlock(&devq->send_mtx);
4531 }
4532
4533 static int
4534 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4535 {
4536
4537         mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED);
4538         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4539             ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue,
4540             dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count));
4541         if (count > dev->ccbq.queue.qfrozen_cnt) {
4542 #ifdef INVARIANTS
4543                 printf("xpt_release_devq(): requested %u > present %u\n",
4544                     count, dev->ccbq.queue.qfrozen_cnt);
4545 #endif
4546                 count = dev->ccbq.queue.qfrozen_cnt;
4547         }
4548         dev->ccbq.queue.qfrozen_cnt -= count;
4549         if (dev->ccbq.queue.qfrozen_cnt == 0) {
4550                 /*
4551                  * No longer need to wait for a successful
4552                  * command completion.
4553                  */
4554                 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4555                 /*
4556                  * Remove any timeouts that might be scheduled
4557                  * to release this queue.
4558                  */
4559                 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4560                         callout_stop(&dev->callout);
4561                         dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4562                 }
4563                 /*
4564                  * Now that we are unfrozen schedule the
4565                  * device so any pending transactions are
4566                  * run.
4567                  */
4568                 xpt_schedule_devq(dev->sim->devq, dev);
4569         } else
4570                 run_queue = 0;
4571         return (run_queue);
4572 }
4573
4574 void
4575 xpt_release_simq(struct cam_sim *sim, int run_queue)
4576 {
4577         struct cam_devq *devq;
4578
4579         devq = sim->devq;
4580         mtx_lock(&devq->send_mtx);
4581         if (devq->send_queue.qfrozen_cnt <= 0) {
4582 #ifdef INVARIANTS
4583                 printf("xpt_release_simq: requested 1 > present %u\n",
4584                     devq->send_queue.qfrozen_cnt);
4585 #endif
4586         } else
4587                 devq->send_queue.qfrozen_cnt--;
4588         if (devq->send_queue.qfrozen_cnt == 0) {
4589                 /*
4590                  * If there is a timeout scheduled to release this
4591                  * sim queue, remove it.  The queue frozen count is
4592                  * already at 0.
4593                  */
4594                 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4595                         callout_stop(&sim->callout);
4596                         sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4597                 }
4598                 if (run_queue) {
4599                         /*
4600                          * Now that we are unfrozen run the send queue.
4601                          */
4602                         xpt_run_devq(sim->devq);
4603                 }
4604         }
4605         mtx_unlock(&devq->send_mtx);
4606 }
4607
4608 void
4609 xpt_done(union ccb *done_ccb)
4610 {
4611         struct cam_doneq *queue;
4612         int     run, hash;
4613
4614 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4615         if (done_ccb->ccb_h.func_code == XPT_SCSI_IO &&
4616             done_ccb->csio.bio != NULL)
4617                 biotrack(done_ccb->csio.bio, __func__);
4618 #endif
4619
4620         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4621             ("xpt_done: func= %#x %s status %#x\n",
4622                 done_ccb->ccb_h.func_code,
4623                 xpt_action_name(done_ccb->ccb_h.func_code),
4624                 done_ccb->ccb_h.status));
4625         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4626                 return;
4627
4628         /* Store the time the ccb was in the sim */
4629         done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data);
4630         hash = (u_int)(done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id +
4631             done_ccb->ccb_h.target_lun) % cam_num_doneqs;
4632         queue = &cam_doneqs[hash];
4633         mtx_lock(&queue->cam_doneq_mtx);
4634         run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq));
4635         STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe);
4636         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4637         mtx_unlock(&queue->cam_doneq_mtx);
4638         if (run && !dumping)
4639                 wakeup(&queue->cam_doneq);
4640 }
4641
4642 void
4643 xpt_done_direct(union ccb *done_ccb)
4644 {
4645
4646         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4647             ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status));
4648         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4649                 return;
4650
4651         /* Store the time the ccb was in the sim */
4652         done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data);
4653         done_ccb->ccb_h.status |= CAM_QOS_VALID;
4654         xpt_done_process(&done_ccb->ccb_h);
4655 }
4656
4657 union ccb *
4658 xpt_alloc_ccb(void)
4659 {
4660         union ccb *new_ccb;
4661
4662         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4663         return (new_ccb);
4664 }
4665
4666 union ccb *
4667 xpt_alloc_ccb_nowait(void)
4668 {
4669         union ccb *new_ccb;
4670
4671         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4672         return (new_ccb);
4673 }
4674
4675 void
4676 xpt_free_ccb(union ccb *free_ccb)
4677 {
4678         free(free_ccb, M_CAMCCB);
4679 }
4680
4681 /* Private XPT functions */
4682
4683 /*
4684  * Get a CAM control block for the caller. Charge the structure to the device
4685  * referenced by the path.  If we don't have sufficient resources to allocate
4686  * more ccbs, we return NULL.
4687  */
4688 static union ccb *
4689 xpt_get_ccb_nowait(struct cam_periph *periph)
4690 {
4691         union ccb *new_ccb;
4692
4693         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4694         if (new_ccb == NULL)
4695                 return (NULL);
4696         periph->periph_allocated++;
4697         cam_ccbq_take_opening(&periph->path->device->ccbq);
4698         return (new_ccb);
4699 }
4700
4701 static union ccb *
4702 xpt_get_ccb(struct cam_periph *periph)
4703 {
4704         union ccb *new_ccb;
4705
4706         cam_periph_unlock(periph);
4707         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4708         cam_periph_lock(periph);
4709         periph->periph_allocated++;
4710         cam_ccbq_take_opening(&periph->path->device->ccbq);
4711         return (new_ccb);
4712 }
4713
4714 union ccb *
4715 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
4716 {
4717         struct ccb_hdr *ccb_h;
4718
4719         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n"));
4720         cam_periph_assert(periph, MA_OWNED);
4721         while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL ||
4722             ccb_h->pinfo.priority != priority) {
4723                 if (priority < periph->immediate_priority) {
4724                         periph->immediate_priority = priority;
4725                         xpt_run_allocq(periph, 0);
4726                 } else
4727                         cam_periph_sleep(periph, &periph->ccb_list, PRIBIO,
4728                             "cgticb", 0);
4729         }
4730         SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
4731         return ((union ccb *)ccb_h);
4732 }
4733
4734 static void
4735 xpt_acquire_bus(struct cam_eb *bus)
4736 {
4737
4738         xpt_lock_buses();
4739         bus->refcount++;
4740         xpt_unlock_buses();
4741 }
4742
4743 static void
4744 xpt_release_bus(struct cam_eb *bus)
4745 {
4746
4747         xpt_lock_buses();
4748         KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
4749         if (--bus->refcount > 0) {
4750                 xpt_unlock_buses();
4751                 return;
4752         }
4753         TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4754         xsoftc.bus_generation++;
4755         xpt_unlock_buses();
4756         KASSERT(TAILQ_EMPTY(&bus->et_entries),
4757             ("destroying bus, but target list is not empty"));
4758         cam_sim_release(bus->sim);
4759         mtx_destroy(&bus->eb_mtx);
4760         free(bus, M_CAMXPT);
4761 }
4762
4763 static struct cam_et *
4764 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4765 {
4766         struct cam_et *cur_target, *target;
4767
4768         mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4769         mtx_assert(&bus->eb_mtx, MA_OWNED);
4770         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
4771                                          M_NOWAIT|M_ZERO);
4772         if (target == NULL)
4773                 return (NULL);
4774
4775         TAILQ_INIT(&target->ed_entries);
4776         target->bus = bus;
4777         target->target_id = target_id;
4778         target->refcount = 1;
4779         target->generation = 0;
4780         target->luns = NULL;
4781         mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF);
4782         timevalclear(&target->last_reset);
4783         /*
4784          * Hold a reference to our parent bus so it
4785          * will not go away before we do.
4786          */
4787         bus->refcount++;
4788
4789         /* Insertion sort into our bus's target list */
4790         cur_target = TAILQ_FIRST(&bus->et_entries);
4791         while (cur_target != NULL && cur_target->target_id < target_id)
4792                 cur_target = TAILQ_NEXT(cur_target, links);
4793         if (cur_target != NULL) {
4794                 TAILQ_INSERT_BEFORE(cur_target, target, links);
4795         } else {
4796                 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4797         }
4798         bus->generation++;
4799         return (target);
4800 }
4801
4802 static void
4803 xpt_acquire_target(struct cam_et *target)
4804 {
4805         struct cam_eb *bus = target->bus;
4806
4807         mtx_lock(&bus->eb_mtx);
4808         target->refcount++;
4809         mtx_unlock(&bus->eb_mtx);
4810 }
4811
4812 static void
4813 xpt_release_target(struct cam_et *target)
4814 {
4815         struct cam_eb *bus = target->bus;
4816
4817         mtx_lock(&bus->eb_mtx);
4818         if (--target->refcount > 0) {
4819                 mtx_unlock(&bus->eb_mtx);
4820                 return;
4821         }
4822         TAILQ_REMOVE(&bus->et_entries, target, links);
4823         bus->generation++;
4824         mtx_unlock(&bus->eb_mtx);
4825         KASSERT(TAILQ_EMPTY(&target->ed_entries),
4826             ("destroying target, but device list is not empty"));
4827         xpt_release_bus(bus);
4828         mtx_destroy(&target->luns_mtx);
4829         if (target->luns)
4830                 free(target->luns, M_CAMXPT);
4831         free(target, M_CAMXPT);
4832 }
4833
4834 static struct cam_ed *
4835 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
4836                          lun_id_t lun_id)
4837 {
4838         struct cam_ed *device;
4839
4840         device = xpt_alloc_device(bus, target, lun_id);
4841         if (device == NULL)
4842                 return (NULL);
4843
4844         device->mintags = 1;
4845         device->maxtags = 1;
4846         return (device);
4847 }
4848
4849 static void
4850 xpt_destroy_device(void *context, int pending)
4851 {
4852         struct cam_ed   *device = context;
4853
4854         mtx_lock(&device->device_mtx);
4855         mtx_destroy(&device->device_mtx);
4856         free(device, M_CAMDEV);
4857 }
4858
4859 struct cam_ed *
4860 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4861 {
4862         struct cam_ed   *cur_device, *device;
4863         struct cam_devq *devq;
4864         cam_status status;
4865
4866         mtx_assert(&bus->eb_mtx, MA_OWNED);
4867         /* Make space for us in the device queue on our bus */
4868         devq = bus->sim->devq;
4869         mtx_lock(&devq->send_mtx);
4870         status = cam_devq_resize(devq, devq->send_queue.array_size + 1);
4871         mtx_unlock(&devq->send_mtx);
4872         if (status != CAM_REQ_CMP)
4873                 return (NULL);
4874
4875         device = (struct cam_ed *)malloc(sizeof(*device),
4876                                          M_CAMDEV, M_NOWAIT|M_ZERO);
4877         if (device == NULL)
4878                 return (NULL);
4879
4880         cam_init_pinfo(&device->devq_entry);
4881         device->target = target;
4882         device->lun_id = lun_id;
4883         device->sim = bus->sim;
4884         if (cam_ccbq_init(&device->ccbq,
4885                           bus->sim->max_dev_openings) != 0) {
4886                 free(device, M_CAMDEV);
4887                 return (NULL);
4888         }
4889         SLIST_INIT(&device->asyncs);
4890         SLIST_INIT(&device->periphs);
4891         device->generation = 0;
4892         device->flags = CAM_DEV_UNCONFIGURED;
4893         device->tag_delay_count = 0;
4894         device->tag_saved_openings = 0;
4895         device->refcount = 1;
4896         mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF);
4897         callout_init_mtx(&device->callout, &devq->send_mtx, 0);
4898         TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device);
4899         /*
4900          * Hold a reference to our parent bus so it
4901          * will not go away before we do.
4902          */
4903         target->refcount++;
4904
4905         cur_device = TAILQ_FIRST(&target->ed_entries);
4906         while (cur_device != NULL && cur_device->lun_id < lun_id)
4907                 cur_device = TAILQ_NEXT(cur_device, links);
4908         if (cur_device != NULL)
4909                 TAILQ_INSERT_BEFORE(cur_device, device, links);
4910         else
4911                 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4912         target->generation++;
4913         return (device);
4914 }
4915
4916 void
4917 xpt_acquire_device(struct cam_ed *device)
4918 {
4919         struct cam_eb *bus = device->target->bus;
4920
4921         mtx_lock(&bus->eb_mtx);
4922         device->refcount++;
4923         mtx_unlock(&bus->eb_mtx);
4924 }
4925
4926 void
4927 xpt_release_device(struct cam_ed *device)
4928 {
4929         struct cam_eb *bus = device->target->bus;
4930         struct cam_devq *devq;
4931
4932         mtx_lock(&bus->eb_mtx);
4933         if (--device->refcount > 0) {
4934                 mtx_unlock(&bus->eb_mtx);
4935                 return;
4936         }
4937
4938         TAILQ_REMOVE(&device->target->ed_entries, device,links);
4939         device->target->generation++;
4940         mtx_unlock(&bus->eb_mtx);
4941
4942         /* Release our slot in the devq */
4943         devq = bus->sim->devq;
4944         mtx_lock(&devq->send_mtx);
4945         cam_devq_resize(devq, devq->send_queue.array_size - 1);
4946
4947         KASSERT(SLIST_EMPTY(&device->periphs),
4948             ("destroying device, but periphs list is not empty"));
4949         KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX,
4950             ("destroying device while still queued for ccbs"));
4951
4952         /* The send_mtx must be held when accessing the callout */
4953         if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
4954                 callout_stop(&device->callout);
4955
4956         mtx_unlock(&devq->send_mtx);
4957
4958         xpt_release_target(device->target);
4959
4960         cam_ccbq_fini(&device->ccbq);
4961         /*
4962          * Free allocated memory.  free(9) does nothing if the
4963          * supplied pointer is NULL, so it is safe to call without
4964          * checking.
4965          */
4966         free(device->supported_vpds, M_CAMXPT);
4967         free(device->device_id, M_CAMXPT);
4968         free(device->ext_inq, M_CAMXPT);
4969         free(device->physpath, M_CAMXPT);
4970         free(device->rcap_buf, M_CAMXPT);
4971         free(device->serial_num, M_CAMXPT);
4972         free(device->nvme_data, M_CAMXPT);
4973         free(device->nvme_cdata, M_CAMXPT);
4974         taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task);
4975 }
4976
4977 u_int32_t
4978 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4979 {
4980         int     result;
4981         struct  cam_ed *dev;
4982
4983         dev = path->device;
4984         mtx_lock(&dev->sim->devq->send_mtx);
4985         result = cam_ccbq_resize(&dev->ccbq, newopenings);
4986         mtx_unlock(&dev->sim->devq->send_mtx);
4987         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
4988          || (dev->inq_flags & SID_CmdQue) != 0)
4989                 dev->tag_saved_openings = newopenings;
4990         return (result);
4991 }
4992
4993 static struct cam_eb *
4994 xpt_find_bus(path_id_t path_id)
4995 {
4996         struct cam_eb *bus;
4997
4998         xpt_lock_buses();
4999         for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
5000              bus != NULL;
5001              bus = TAILQ_NEXT(bus, links)) {
5002                 if (bus->path_id == path_id) {
5003                         bus->refcount++;
5004                         break;
5005                 }
5006         }
5007         xpt_unlock_buses();
5008         return (bus);
5009 }
5010
5011 static struct cam_et *
5012 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
5013 {
5014         struct cam_et *target;
5015
5016         mtx_assert(&bus->eb_mtx, MA_OWNED);
5017         for (target = TAILQ_FIRST(&bus->et_entries);
5018              target != NULL;
5019              target = TAILQ_NEXT(target, links)) {
5020                 if (target->target_id == target_id) {
5021                         target->refcount++;
5022                         break;
5023                 }
5024         }
5025         return (target);
5026 }
5027
5028 static struct cam_ed *
5029 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
5030 {
5031         struct cam_ed *device;
5032
5033         mtx_assert(&target->bus->eb_mtx, MA_OWNED);
5034         for (device = TAILQ_FIRST(&target->ed_entries);
5035              device != NULL;
5036              device = TAILQ_NEXT(device, links)) {
5037                 if (device->lun_id == lun_id) {
5038                         device->refcount++;
5039                         break;
5040                 }
5041         }
5042         return (device);
5043 }
5044
5045 void
5046 xpt_start_tags(struct cam_path *path)
5047 {
5048         struct ccb_relsim crs;
5049         struct cam_ed *device;
5050         struct cam_sim *sim;
5051         int    newopenings;
5052
5053         device = path->device;
5054         sim = path->bus->sim;
5055         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
5056         xpt_freeze_devq(path, /*count*/1);
5057         device->inq_flags |= SID_CmdQue;
5058         if (device->tag_saved_openings != 0)
5059                 newopenings = device->tag_saved_openings;
5060         else
5061                 newopenings = min(device->maxtags,
5062                                   sim->max_tagged_dev_openings);
5063         xpt_dev_ccbq_resize(path, newopenings);
5064         xpt_async(AC_GETDEV_CHANGED, path, NULL);
5065         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
5066         crs.ccb_h.func_code = XPT_REL_SIMQ;
5067         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5068         crs.openings
5069             = crs.release_timeout
5070             = crs.qfrozen_cnt
5071             = 0;
5072         xpt_action((union ccb *)&crs);
5073 }
5074
5075 void
5076 xpt_stop_tags(struct cam_path *path)
5077 {
5078         struct ccb_relsim crs;
5079         struct cam_ed *device;
5080         struct cam_sim *sim;
5081
5082         device = path->device;
5083         sim = path->bus->sim;
5084         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
5085         device->tag_delay_count = 0;
5086         xpt_freeze_devq(path, /*count*/1);
5087         device->inq_flags &= ~SID_CmdQue;
5088         xpt_dev_ccbq_resize(path, sim->max_dev_openings);
5089         xpt_async(AC_GETDEV_CHANGED, path, NULL);
5090         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
5091         crs.ccb_h.func_code = XPT_REL_SIMQ;
5092         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5093         crs.openings
5094             = crs.release_timeout
5095             = crs.qfrozen_cnt
5096             = 0;
5097         xpt_action((union ccb *)&crs);
5098 }
5099
5100 /*
5101  * Assume all possible buses are detected by this time, so allow boot
5102  * as soon as they all are scanned.
5103  */
5104 static void
5105 xpt_boot_delay(void *arg)
5106 {
5107
5108         xpt_release_boot();
5109 }
5110
5111 /*
5112  * Now that all config hooks have completed, start boot_delay timer,
5113  * waiting for possibly still undetected buses (USB) to appear.
5114  */
5115 static void
5116 xpt_ch_done(void *arg)
5117 {
5118
5119         callout_init(&xsoftc.boot_callout, 1);
5120         callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0,
5121             xpt_boot_delay, NULL, 0);
5122 }
5123 SYSINIT(xpt_hw_delay, SI_SUB_INT_CONFIG_HOOKS, SI_ORDER_ANY, xpt_ch_done, NULL);
5124
5125 /*
5126  * Now that interrupts are enabled, go find our devices
5127  */
5128 static void
5129 xpt_config(void *arg)
5130 {
5131         if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq"))
5132                 printf("xpt_config: failed to create taskqueue thread.\n");
5133
5134         /* Setup debugging path */
5135         if (cam_dflags != CAM_DEBUG_NONE) {
5136                 if (xpt_create_path(&cam_dpath, NULL,
5137                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
5138                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
5139                         printf("xpt_config: xpt_create_path() failed for debug"
5140                                " target %d:%d:%d, debugging disabled\n",
5141                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
5142                         cam_dflags = CAM_DEBUG_NONE;
5143                 }
5144         } else
5145                 cam_dpath = NULL;
5146
5147         periphdriver_init(1);
5148         xpt_hold_boot();
5149
5150         /* Fire up rescan thread. */
5151         if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0,
5152             "cam", "scanner")) {
5153                 printf("xpt_config: failed to create rescan thread.\n");
5154         }
5155 }
5156
5157 void
5158 xpt_hold_boot_locked(void)
5159 {
5160
5161         if (xsoftc.buses_to_config++ == 0)
5162                 root_mount_hold_token("CAM", &xsoftc.xpt_rootmount);
5163 }
5164
5165 void
5166 xpt_hold_boot(void)
5167 {
5168
5169         xpt_lock_buses();
5170         xpt_hold_boot_locked();
5171         xpt_unlock_buses();
5172 }
5173
5174 void
5175 xpt_release_boot(void)
5176 {
5177
5178         xpt_lock_buses();
5179         if (--xsoftc.buses_to_config == 0) {
5180                 if (xsoftc.buses_config_done == 0) {
5181                         xsoftc.buses_config_done = 1;
5182                         xsoftc.buses_to_config++;
5183                         TASK_INIT(&xsoftc.boot_task, 0, xpt_finishconfig_task,
5184                             NULL);
5185                         taskqueue_enqueue(taskqueue_thread, &xsoftc.boot_task);
5186                 } else
5187                         root_mount_rel(&xsoftc.xpt_rootmount);
5188         }
5189         xpt_unlock_buses();
5190 }
5191
5192 /*
5193  * If the given device only has one peripheral attached to it, and if that
5194  * peripheral is the passthrough driver, announce it.  This insures that the
5195  * user sees some sort of announcement for every peripheral in their system.
5196  */
5197 static int
5198 xptpassannouncefunc(struct cam_ed *device, void *arg)
5199 {
5200         struct cam_periph *periph;
5201         int i;
5202
5203         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
5204              periph = SLIST_NEXT(periph, periph_links), i++);
5205
5206         periph = SLIST_FIRST(&device->periphs);
5207         if ((i == 1)
5208          && (strncmp(periph->periph_name, "pass", 4) == 0))
5209                 xpt_announce_periph(periph, NULL);
5210
5211         return(1);
5212 }
5213
5214 static void
5215 xpt_finishconfig_task(void *context, int pending)
5216 {
5217
5218         periphdriver_init(2);
5219         /*
5220          * Check for devices with no "standard" peripheral driver
5221          * attached.  For any devices like that, announce the
5222          * passthrough driver so the user will see something.
5223          */
5224         if (!bootverbose)
5225                 xpt_for_all_devices(xptpassannouncefunc, NULL);
5226
5227         xpt_release_boot();
5228 }
5229
5230 cam_status
5231 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
5232                    struct cam_path *path)
5233 {
5234         struct ccb_setasync csa;
5235         cam_status status;
5236         int xptpath = 0;
5237
5238         if (path == NULL) {
5239                 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
5240                                          CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
5241                 if (status != CAM_REQ_CMP)
5242                         return (status);
5243                 xpt_path_lock(path);
5244                 xptpath = 1;
5245         }
5246
5247         xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
5248         csa.ccb_h.func_code = XPT_SASYNC_CB;
5249         csa.event_enable = event;
5250         csa.callback = cbfunc;
5251         csa.callback_arg = cbarg;
5252         xpt_action((union ccb *)&csa);
5253         status = csa.ccb_h.status;
5254
5255         CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE,
5256             ("xpt_register_async: func %p\n", cbfunc));
5257
5258         if (xptpath) {
5259                 xpt_path_unlock(path);
5260                 xpt_free_path(path);
5261         }
5262
5263         if ((status == CAM_REQ_CMP) &&
5264             (csa.event_enable & AC_FOUND_DEVICE)) {
5265                 /*
5266                  * Get this peripheral up to date with all
5267                  * the currently existing devices.
5268                  */
5269                 xpt_for_all_devices(xptsetasyncfunc, &csa);
5270         }
5271         if ((status == CAM_REQ_CMP) &&
5272             (csa.event_enable & AC_PATH_REGISTERED)) {
5273                 /*
5274                  * Get this peripheral up to date with all
5275                  * the currently existing buses.
5276                  */
5277                 xpt_for_all_busses(xptsetasyncbusfunc, &csa);
5278         }
5279
5280         return (status);
5281 }
5282
5283 static void
5284 xptaction(struct cam_sim *sim, union ccb *work_ccb)
5285 {
5286         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
5287
5288         switch (work_ccb->ccb_h.func_code) {
5289         /* Common cases first */
5290         case XPT_PATH_INQ:              /* Path routing inquiry */
5291         {
5292                 struct ccb_pathinq *cpi;
5293
5294                 cpi = &work_ccb->cpi;
5295                 cpi->version_num = 1; /* XXX??? */
5296                 cpi->hba_inquiry = 0;
5297                 cpi->target_sprt = 0;
5298                 cpi->hba_misc = 0;
5299                 cpi->hba_eng_cnt = 0;
5300                 cpi->max_target = 0;
5301                 cpi->max_lun = 0;
5302                 cpi->initiator_id = 0;
5303                 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
5304                 strlcpy(cpi->hba_vid, "", HBA_IDLEN);
5305                 strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
5306                 cpi->unit_number = sim->unit_number;
5307                 cpi->bus_id = sim->bus_id;
5308                 cpi->base_transfer_speed = 0;
5309                 cpi->protocol = PROTO_UNSPECIFIED;
5310                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
5311                 cpi->transport = XPORT_UNSPECIFIED;
5312                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
5313                 cpi->ccb_h.status = CAM_REQ_CMP;
5314                 break;
5315         }
5316         default:
5317                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
5318                 break;
5319         }
5320         xpt_done(work_ccb);
5321 }
5322
5323 /*
5324  * The xpt as a "controller" has no interrupt sources, so polling
5325  * is a no-op.
5326  */
5327 static void
5328 xptpoll(struct cam_sim *sim)
5329 {
5330 }
5331
5332 void
5333 xpt_lock_buses(void)
5334 {
5335         mtx_lock(&xsoftc.xpt_topo_lock);
5336 }
5337
5338 void
5339 xpt_unlock_buses(void)
5340 {
5341         mtx_unlock(&xsoftc.xpt_topo_lock);
5342 }
5343
5344 struct mtx *
5345 xpt_path_mtx(struct cam_path *path)
5346 {
5347
5348         return (&path->device->device_mtx);
5349 }
5350
5351 static void
5352 xpt_done_process(struct ccb_hdr *ccb_h)
5353 {
5354         struct cam_sim *sim = NULL;
5355         struct cam_devq *devq = NULL;
5356         struct mtx *mtx = NULL;
5357
5358 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
5359         struct ccb_scsiio *csio;
5360
5361         if (ccb_h->func_code == XPT_SCSI_IO) {
5362                 csio = &((union ccb *)ccb_h)->csio;
5363                 if (csio->bio != NULL)
5364                         biotrack(csio->bio, __func__);
5365         }
5366 #endif
5367
5368         if (ccb_h->flags & CAM_HIGH_POWER) {
5369                 struct highpowerlist    *hphead;
5370                 struct cam_ed           *device;
5371
5372                 mtx_lock(&xsoftc.xpt_highpower_lock);
5373                 hphead = &xsoftc.highpowerq;
5374
5375                 device = STAILQ_FIRST(hphead);
5376
5377                 /*
5378                  * Increment the count since this command is done.
5379                  */
5380                 xsoftc.num_highpower++;
5381
5382                 /*
5383                  * Any high powered commands queued up?
5384                  */
5385                 if (device != NULL) {
5386                         STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
5387                         mtx_unlock(&xsoftc.xpt_highpower_lock);
5388
5389                         mtx_lock(&device->sim->devq->send_mtx);
5390                         xpt_release_devq_device(device,
5391                                          /*count*/1, /*runqueue*/TRUE);
5392                         mtx_unlock(&device->sim->devq->send_mtx);
5393                 } else
5394                         mtx_unlock(&xsoftc.xpt_highpower_lock);
5395         }
5396
5397         /*
5398          * Insulate against a race where the periph is destroyed but CCBs are
5399          * still not all processed. This shouldn't happen, but allows us better
5400          * bug diagnostic when it does.
5401          */
5402         if (ccb_h->path->bus)
5403                 sim = ccb_h->path->bus->sim;
5404
5405         if (ccb_h->status & CAM_RELEASE_SIMQ) {
5406                 KASSERT(sim, ("sim missing for CAM_RELEASE_SIMQ request"));
5407                 xpt_release_simq(sim, /*run_queue*/FALSE);
5408                 ccb_h->status &= ~CAM_RELEASE_SIMQ;
5409         }
5410
5411         if ((ccb_h->flags & CAM_DEV_QFRZDIS)
5412          && (ccb_h->status & CAM_DEV_QFRZN)) {
5413                 xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE);
5414                 ccb_h->status &= ~CAM_DEV_QFRZN;
5415         }
5416
5417         if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
5418                 struct cam_ed *dev = ccb_h->path->device;
5419
5420                 if (sim)
5421                         devq = sim->devq;
5422                 KASSERT(devq, ("Periph disappeared with CCB %p %s request pending.",
5423                         ccb_h, xpt_action_name(ccb_h->func_code)));
5424
5425                 mtx_lock(&devq->send_mtx);
5426                 devq->send_active--;
5427                 devq->send_openings++;
5428                 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
5429
5430                 if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
5431                   && (dev->ccbq.dev_active == 0))) {
5432                         dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
5433                         xpt_release_devq_device(dev, /*count*/1,
5434                                          /*run_queue*/FALSE);
5435                 }
5436
5437                 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
5438                   && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
5439                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
5440                         xpt_release_devq_device(dev, /*count*/1,
5441                                          /*run_queue*/FALSE);
5442                 }
5443
5444                 if (!device_is_queued(dev))
5445                         (void)xpt_schedule_devq(devq, dev);
5446                 xpt_run_devq(devq);
5447                 mtx_unlock(&devq->send_mtx);
5448
5449                 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) {
5450                         mtx = xpt_path_mtx(ccb_h->path);
5451                         mtx_lock(mtx);
5452
5453                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5454                          && (--dev->tag_delay_count == 0))
5455                                 xpt_start_tags(ccb_h->path);
5456                 }
5457         }
5458
5459         if ((ccb_h->flags & CAM_UNLOCKED) == 0) {
5460                 if (mtx == NULL) {
5461                         mtx = xpt_path_mtx(ccb_h->path);
5462                         mtx_lock(mtx);
5463                 }
5464         } else {
5465                 if (mtx != NULL) {
5466                         mtx_unlock(mtx);
5467                         mtx = NULL;
5468                 }
5469         }
5470
5471         /* Call the peripheral driver's callback */
5472         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
5473         (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
5474         if (mtx != NULL)
5475                 mtx_unlock(mtx);
5476 }
5477
5478 void
5479 xpt_done_td(void *arg)
5480 {
5481         struct cam_doneq *queue = arg;
5482         struct ccb_hdr *ccb_h;
5483         STAILQ_HEAD(, ccb_hdr)  doneq;
5484
5485         STAILQ_INIT(&doneq);
5486         mtx_lock(&queue->cam_doneq_mtx);
5487         while (1) {
5488                 while (STAILQ_EMPTY(&queue->cam_doneq)) {
5489                         queue->cam_doneq_sleep = 1;
5490                         msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
5491                             PRIBIO, "-", 0);
5492                         queue->cam_doneq_sleep = 0;
5493                 }
5494                 STAILQ_CONCAT(&doneq, &queue->cam_doneq);
5495                 mtx_unlock(&queue->cam_doneq_mtx);
5496
5497                 THREAD_NO_SLEEPING();
5498                 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
5499                         STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
5500                         xpt_done_process(ccb_h);
5501                 }
5502                 THREAD_SLEEPING_OK();
5503
5504                 mtx_lock(&queue->cam_doneq_mtx);
5505         }
5506 }
5507
5508 static void
5509 camisr_runqueue(void)
5510 {
5511         struct  ccb_hdr *ccb_h;
5512         struct cam_doneq *queue;
5513         int i;
5514
5515         /* Process global queues. */
5516         for (i = 0; i < cam_num_doneqs; i++) {
5517                 queue = &cam_doneqs[i];
5518                 mtx_lock(&queue->cam_doneq_mtx);
5519                 while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) {
5520                         STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe);
5521                         mtx_unlock(&queue->cam_doneq_mtx);
5522                         xpt_done_process(ccb_h);
5523                         mtx_lock(&queue->cam_doneq_mtx);
5524                 }
5525                 mtx_unlock(&queue->cam_doneq_mtx);
5526         }
5527 }
5528
5529 struct kv 
5530 {
5531         uint32_t v;
5532         const char *name;
5533 };
5534
5535 static struct kv map[] = {
5536         { XPT_NOOP, "XPT_NOOP" },
5537         { XPT_SCSI_IO, "XPT_SCSI_IO" },
5538         { XPT_GDEV_TYPE, "XPT_GDEV_TYPE" },
5539         { XPT_GDEVLIST, "XPT_GDEVLIST" },
5540         { XPT_PATH_INQ, "XPT_PATH_INQ" },
5541         { XPT_REL_SIMQ, "XPT_REL_SIMQ" },
5542         { XPT_SASYNC_CB, "XPT_SASYNC_CB" },
5543         { XPT_SDEV_TYPE, "XPT_SDEV_TYPE" },
5544         { XPT_SCAN_BUS, "XPT_SCAN_BUS" },
5545         { XPT_DEV_MATCH, "XPT_DEV_MATCH" },
5546         { XPT_DEBUG, "XPT_DEBUG" },
5547         { XPT_PATH_STATS, "XPT_PATH_STATS" },
5548         { XPT_GDEV_STATS, "XPT_GDEV_STATS" },
5549         { XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" },
5550         { XPT_ASYNC, "XPT_ASYNC" },
5551         { XPT_ABORT, "XPT_ABORT" },
5552         { XPT_RESET_BUS, "XPT_RESET_BUS" },
5553         { XPT_RESET_DEV, "XPT_RESET_DEV" },
5554         { XPT_TERM_IO, "XPT_TERM_IO" },
5555         { XPT_SCAN_LUN, "XPT_SCAN_LUN" },
5556         { XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" },
5557         { XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" },
5558         { XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" },
5559         { XPT_ATA_IO, "XPT_ATA_IO" },
5560         { XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" },
5561         { XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" },
5562         { XPT_NVME_IO, "XPT_NVME_IO" },
5563         { XPT_MMC_IO, "XPT_MMC_IO" },
5564         { XPT_SMP_IO, "XPT_SMP_IO" },
5565         { XPT_SCAN_TGT, "XPT_SCAN_TGT" },
5566         { XPT_NVME_ADMIN, "XPT_NVME_ADMIN" },
5567         { XPT_ENG_INQ, "XPT_ENG_INQ" },
5568         { XPT_ENG_EXEC, "XPT_ENG_EXEC" },
5569         { XPT_EN_LUN, "XPT_EN_LUN" },
5570         { XPT_TARGET_IO, "XPT_TARGET_IO" },
5571         { XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" },
5572         { XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" },
5573         { XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" },
5574         { XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" },
5575         { XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" },
5576         { XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" },
5577         { 0, 0 }
5578 };
5579
5580 const char *
5581 xpt_action_name(uint32_t action) 
5582 {
5583         static char buffer[32]; /* Only for unknown messages -- racy */
5584         struct kv *walker = map;
5585
5586         while (walker->name != NULL) {
5587                 if (walker->v == action)
5588                         return (walker->name);
5589                 walker++;
5590         }
5591
5592         snprintf(buffer, sizeof(buffer), "%#x", action);
5593         return (buffer);
5594 }