]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/cam/cam_xpt.c
MFV r320905: Import upstream fix for CVE-2017-11103.
[FreeBSD/FreeBSD.git] / sys / cam / cam_xpt.c
1 /*-
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29
30 #include "opt_printf.h"
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/bio.h>
37 #include <sys/bus.h>
38 #include <sys/systm.h>
39 #include <sys/types.h>
40 #include <sys/malloc.h>
41 #include <sys/kernel.h>
42 #include <sys/time.h>
43 #include <sys/conf.h>
44 #include <sys/fcntl.h>
45 #include <sys/interrupt.h>
46 #include <sys/proc.h>
47 #include <sys/sbuf.h>
48 #include <sys/smp.h>
49 #include <sys/taskqueue.h>
50
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 #include <sys/sysctl.h>
54 #include <sys/kthread.h>
55
56 #include <cam/cam.h>
57 #include <cam/cam_ccb.h>
58 #include <cam/cam_periph.h>
59 #include <cam/cam_queue.h>
60 #include <cam/cam_sim.h>
61 #include <cam/cam_xpt.h>
62 #include <cam/cam_xpt_sim.h>
63 #include <cam/cam_xpt_periph.h>
64 #include <cam/cam_xpt_internal.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_compat.h>
67
68 #include <cam/scsi/scsi_all.h>
69 #include <cam/scsi/scsi_message.h>
70 #include <cam/scsi/scsi_pass.h>
71
72 #include <machine/md_var.h>     /* geometry translation */
73 #include <machine/stdarg.h>     /* for xpt_print below */
74
75 #include "opt_cam.h"
76
77 /* Wild guess based on not wanting to grow the stack too much */
78 #define XPT_PRINT_MAXLEN        512
79 #ifdef PRINTF_BUFR_SIZE
80 #define XPT_PRINT_LEN   PRINTF_BUFR_SIZE
81 #else
82 #define XPT_PRINT_LEN   128
83 #endif
84 _Static_assert(XPT_PRINT_LEN <= XPT_PRINT_MAXLEN, "XPT_PRINT_LEN is too large");
85
86 /*
87  * This is the maximum number of high powered commands (e.g. start unit)
88  * that can be outstanding at a particular time.
89  */
90 #ifndef CAM_MAX_HIGHPOWER
91 #define CAM_MAX_HIGHPOWER  4
92 #endif
93
94 /* Datastructures internal to the xpt layer */
95 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
96 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices");
97 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs");
98 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths");
99
100 /* Object for defering XPT actions to a taskqueue */
101 struct xpt_task {
102         struct task     task;
103         void            *data1;
104         uintptr_t       data2;
105 };
106
107 struct xpt_softc {
108         uint32_t                xpt_generation;
109
110         /* number of high powered commands that can go through right now */
111         struct mtx              xpt_highpower_lock;
112         STAILQ_HEAD(highpowerlist, cam_ed)      highpowerq;
113         int                     num_highpower;
114
115         /* queue for handling async rescan requests. */
116         TAILQ_HEAD(, ccb_hdr) ccb_scanq;
117         int buses_to_config;
118         int buses_config_done;
119         int announce_nosbuf;
120
121         /*
122          * Registered buses
123          *
124          * N.B., "busses" is an archaic spelling of "buses".  In new code
125          * "buses" is preferred.
126          */
127         TAILQ_HEAD(,cam_eb)     xpt_busses;
128         u_int                   bus_generation;
129
130         struct intr_config_hook *xpt_config_hook;
131
132         int                     boot_delay;
133         struct callout          boot_callout;
134
135         struct mtx              xpt_topo_lock;
136         struct mtx              xpt_lock;
137         struct taskqueue        *xpt_taskq;
138 };
139
140 typedef enum {
141         DM_RET_COPY             = 0x01,
142         DM_RET_FLAG_MASK        = 0x0f,
143         DM_RET_NONE             = 0x00,
144         DM_RET_STOP             = 0x10,
145         DM_RET_DESCEND          = 0x20,
146         DM_RET_ERROR            = 0x30,
147         DM_RET_ACTION_MASK      = 0xf0
148 } dev_match_ret;
149
150 typedef enum {
151         XPT_DEPTH_BUS,
152         XPT_DEPTH_TARGET,
153         XPT_DEPTH_DEVICE,
154         XPT_DEPTH_PERIPH
155 } xpt_traverse_depth;
156
157 struct xpt_traverse_config {
158         xpt_traverse_depth      depth;
159         void                    *tr_func;
160         void                    *tr_arg;
161 };
162
163 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
164 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
165 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
166 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
167 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
168
169 /* Transport layer configuration information */
170 static struct xpt_softc xsoftc;
171
172 MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF);
173
174 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
175            &xsoftc.boot_delay, 0, "Bus registration wait time");
176 SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD,
177             &xsoftc.xpt_generation, 0, "CAM peripheral generation count");
178 SYSCTL_INT(_kern_cam, OID_AUTO, announce_nosbuf, CTLFLAG_RWTUN,
179             &xsoftc.announce_nosbuf, 0, "Don't use sbuf for announcements");
180
181 struct cam_doneq {
182         struct mtx_padalign     cam_doneq_mtx;
183         STAILQ_HEAD(, ccb_hdr)  cam_doneq;
184         int                     cam_doneq_sleep;
185 };
186
187 static struct cam_doneq cam_doneqs[MAXCPU];
188 static int cam_num_doneqs;
189 static struct proc *cam_proc;
190
191 SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
192            &cam_num_doneqs, 0, "Number of completion queues/threads");
193
194 struct cam_periph *xpt_periph;
195
196 static periph_init_t xpt_periph_init;
197
198 static struct periph_driver xpt_driver =
199 {
200         xpt_periph_init, "xpt",
201         TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
202         CAM_PERIPH_DRV_EARLY
203 };
204
205 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
206
207 static d_open_t xptopen;
208 static d_close_t xptclose;
209 static d_ioctl_t xptioctl;
210 static d_ioctl_t xptdoioctl;
211
212 static struct cdevsw xpt_cdevsw = {
213         .d_version =    D_VERSION,
214         .d_flags =      0,
215         .d_open =       xptopen,
216         .d_close =      xptclose,
217         .d_ioctl =      xptioctl,
218         .d_name =       "xpt",
219 };
220
221 /* Storage for debugging datastructures */
222 struct cam_path *cam_dpath;
223 u_int32_t cam_dflags = CAM_DEBUG_FLAGS;
224 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN,
225         &cam_dflags, 0, "Enabled debug flags");
226 u_int32_t cam_debug_delay = CAM_DEBUG_DELAY;
227 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN,
228         &cam_debug_delay, 0, "Delay in us after each debug message");
229
230 /* Our boot-time initialization hook */
231 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
232
233 static moduledata_t cam_moduledata = {
234         "cam",
235         cam_module_event_handler,
236         NULL
237 };
238
239 static int      xpt_init(void *);
240
241 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
242 MODULE_VERSION(cam, 1);
243
244
245 static void             xpt_async_bcast(struct async_list *async_head,
246                                         u_int32_t async_code,
247                                         struct cam_path *path,
248                                         void *async_arg);
249 static path_id_t xptnextfreepathid(void);
250 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
251 static union ccb *xpt_get_ccb(struct cam_periph *periph);
252 static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph);
253 static void      xpt_run_allocq(struct cam_periph *periph, int sleep);
254 static void      xpt_run_allocq_task(void *context, int pending);
255 static void      xpt_run_devq(struct cam_devq *devq);
256 static timeout_t xpt_release_devq_timeout;
257 static void      xpt_release_simq_timeout(void *arg) __unused;
258 static void      xpt_acquire_bus(struct cam_eb *bus);
259 static void      xpt_release_bus(struct cam_eb *bus);
260 static uint32_t  xpt_freeze_devq_device(struct cam_ed *dev, u_int count);
261 static int       xpt_release_devq_device(struct cam_ed *dev, u_int count,
262                     int run_queue);
263 static struct cam_et*
264                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
265 static void      xpt_acquire_target(struct cam_et *target);
266 static void      xpt_release_target(struct cam_et *target);
267 static struct cam_eb*
268                  xpt_find_bus(path_id_t path_id);
269 static struct cam_et*
270                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
271 static struct cam_ed*
272                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
273 static void      xpt_config(void *arg);
274 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
275                                  u_int32_t new_priority);
276 static xpt_devicefunc_t xptpassannouncefunc;
277 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
278 static void      xptpoll(struct cam_sim *sim);
279 static void      camisr_runqueue(void);
280 static void      xpt_done_process(struct ccb_hdr *ccb_h);
281 static void      xpt_done_td(void *);
282 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
283                                     u_int num_patterns, struct cam_eb *bus);
284 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
285                                        u_int num_patterns,
286                                        struct cam_ed *device);
287 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
288                                        u_int num_patterns,
289                                        struct cam_periph *periph);
290 static xpt_busfunc_t    xptedtbusfunc;
291 static xpt_targetfunc_t xptedttargetfunc;
292 static xpt_devicefunc_t xptedtdevicefunc;
293 static xpt_periphfunc_t xptedtperiphfunc;
294 static xpt_pdrvfunc_t   xptplistpdrvfunc;
295 static xpt_periphfunc_t xptplistperiphfunc;
296 static int              xptedtmatch(struct ccb_dev_match *cdm);
297 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
298 static int              xptbustraverse(struct cam_eb *start_bus,
299                                        xpt_busfunc_t *tr_func, void *arg);
300 static int              xpttargettraverse(struct cam_eb *bus,
301                                           struct cam_et *start_target,
302                                           xpt_targetfunc_t *tr_func, void *arg);
303 static int              xptdevicetraverse(struct cam_et *target,
304                                           struct cam_ed *start_device,
305                                           xpt_devicefunc_t *tr_func, void *arg);
306 static int              xptperiphtraverse(struct cam_ed *device,
307                                           struct cam_periph *start_periph,
308                                           xpt_periphfunc_t *tr_func, void *arg);
309 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
310                                         xpt_pdrvfunc_t *tr_func, void *arg);
311 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
312                                             struct cam_periph *start_periph,
313                                             xpt_periphfunc_t *tr_func,
314                                             void *arg);
315 static xpt_busfunc_t    xptdefbusfunc;
316 static xpt_targetfunc_t xptdeftargetfunc;
317 static xpt_devicefunc_t xptdefdevicefunc;
318 static xpt_periphfunc_t xptdefperiphfunc;
319 static void             xpt_finishconfig_task(void *context, int pending);
320 static void             xpt_dev_async_default(u_int32_t async_code,
321                                               struct cam_eb *bus,
322                                               struct cam_et *target,
323                                               struct cam_ed *device,
324                                               void *async_arg);
325 static struct cam_ed *  xpt_alloc_device_default(struct cam_eb *bus,
326                                                  struct cam_et *target,
327                                                  lun_id_t lun_id);
328 static xpt_devicefunc_t xptsetasyncfunc;
329 static xpt_busfunc_t    xptsetasyncbusfunc;
330 static cam_status       xptregister(struct cam_periph *periph,
331                                     void *arg);
332 static __inline int device_is_queued(struct cam_ed *device);
333
334 static __inline int
335 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
336 {
337         int     retval;
338
339         mtx_assert(&devq->send_mtx, MA_OWNED);
340         if ((dev->ccbq.queue.entries > 0) &&
341             (dev->ccbq.dev_openings > 0) &&
342             (dev->ccbq.queue.qfrozen_cnt == 0)) {
343                 /*
344                  * The priority of a device waiting for controller
345                  * resources is that of the highest priority CCB
346                  * enqueued.
347                  */
348                 retval =
349                     xpt_schedule_dev(&devq->send_queue,
350                                      &dev->devq_entry,
351                                      CAMQ_GET_PRIO(&dev->ccbq.queue));
352         } else {
353                 retval = 0;
354         }
355         return (retval);
356 }
357
358 static __inline int
359 device_is_queued(struct cam_ed *device)
360 {
361         return (device->devq_entry.index != CAM_UNQUEUED_INDEX);
362 }
363
364 static void
365 xpt_periph_init()
366 {
367         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
368 }
369
370 static int
371 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
372 {
373
374         /*
375          * Only allow read-write access.
376          */
377         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
378                 return(EPERM);
379
380         /*
381          * We don't allow nonblocking access.
382          */
383         if ((flags & O_NONBLOCK) != 0) {
384                 printf("%s: can't do nonblocking access\n", devtoname(dev));
385                 return(ENODEV);
386         }
387
388         return(0);
389 }
390
391 static int
392 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
393 {
394
395         return(0);
396 }
397
398 /*
399  * Don't automatically grab the xpt softc lock here even though this is going
400  * through the xpt device.  The xpt device is really just a back door for
401  * accessing other devices and SIMs, so the right thing to do is to grab
402  * the appropriate SIM lock once the bus/SIM is located.
403  */
404 static int
405 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
406 {
407         int error;
408
409         if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
410                 error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl);
411         }
412         return (error);
413 }
414
415 static int
416 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
417 {
418         int error;
419
420         error = 0;
421
422         switch(cmd) {
423         /*
424          * For the transport layer CAMIOCOMMAND ioctl, we really only want
425          * to accept CCB types that don't quite make sense to send through a
426          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
427          * in the CAM spec.
428          */
429         case CAMIOCOMMAND: {
430                 union ccb *ccb;
431                 union ccb *inccb;
432                 struct cam_eb *bus;
433
434                 inccb = (union ccb *)addr;
435 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
436                 if (inccb->ccb_h.func_code == XPT_SCSI_IO)
437                         inccb->csio.bio = NULL;
438 #endif
439
440                 if (inccb->ccb_h.flags & CAM_UNLOCKED)
441                         return (EINVAL);
442
443                 bus = xpt_find_bus(inccb->ccb_h.path_id);
444                 if (bus == NULL)
445                         return (EINVAL);
446
447                 switch (inccb->ccb_h.func_code) {
448                 case XPT_SCAN_BUS:
449                 case XPT_RESET_BUS:
450                         if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
451                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
452                                 xpt_release_bus(bus);
453                                 return (EINVAL);
454                         }
455                         break;
456                 case XPT_SCAN_TGT:
457                         if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
458                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
459                                 xpt_release_bus(bus);
460                                 return (EINVAL);
461                         }
462                         break;
463                 default:
464                         break;
465                 }
466
467                 switch(inccb->ccb_h.func_code) {
468                 case XPT_SCAN_BUS:
469                 case XPT_RESET_BUS:
470                 case XPT_PATH_INQ:
471                 case XPT_ENG_INQ:
472                 case XPT_SCAN_LUN:
473                 case XPT_SCAN_TGT:
474
475                         ccb = xpt_alloc_ccb();
476
477                         /*
478                          * Create a path using the bus, target, and lun the
479                          * user passed in.
480                          */
481                         if (xpt_create_path(&ccb->ccb_h.path, NULL,
482                                             inccb->ccb_h.path_id,
483                                             inccb->ccb_h.target_id,
484                                             inccb->ccb_h.target_lun) !=
485                                             CAM_REQ_CMP){
486                                 error = EINVAL;
487                                 xpt_free_ccb(ccb);
488                                 break;
489                         }
490                         /* Ensure all of our fields are correct */
491                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
492                                       inccb->ccb_h.pinfo.priority);
493                         xpt_merge_ccb(ccb, inccb);
494                         xpt_path_lock(ccb->ccb_h.path);
495                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
496                         xpt_path_unlock(ccb->ccb_h.path);
497                         bcopy(ccb, inccb, sizeof(union ccb));
498                         xpt_free_path(ccb->ccb_h.path);
499                         xpt_free_ccb(ccb);
500                         break;
501
502                 case XPT_DEBUG: {
503                         union ccb ccb;
504
505                         /*
506                          * This is an immediate CCB, so it's okay to
507                          * allocate it on the stack.
508                          */
509
510                         /*
511                          * Create a path using the bus, target, and lun the
512                          * user passed in.
513                          */
514                         if (xpt_create_path(&ccb.ccb_h.path, NULL,
515                                             inccb->ccb_h.path_id,
516                                             inccb->ccb_h.target_id,
517                                             inccb->ccb_h.target_lun) !=
518                                             CAM_REQ_CMP){
519                                 error = EINVAL;
520                                 break;
521                         }
522                         /* Ensure all of our fields are correct */
523                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
524                                       inccb->ccb_h.pinfo.priority);
525                         xpt_merge_ccb(&ccb, inccb);
526                         xpt_action(&ccb);
527                         bcopy(&ccb, inccb, sizeof(union ccb));
528                         xpt_free_path(ccb.ccb_h.path);
529                         break;
530
531                 }
532                 case XPT_DEV_MATCH: {
533                         struct cam_periph_map_info mapinfo;
534                         struct cam_path *old_path;
535
536                         /*
537                          * We can't deal with physical addresses for this
538                          * type of transaction.
539                          */
540                         if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
541                             CAM_DATA_VADDR) {
542                                 error = EINVAL;
543                                 break;
544                         }
545
546                         /*
547                          * Save this in case the caller had it set to
548                          * something in particular.
549                          */
550                         old_path = inccb->ccb_h.path;
551
552                         /*
553                          * We really don't need a path for the matching
554                          * code.  The path is needed because of the
555                          * debugging statements in xpt_action().  They
556                          * assume that the CCB has a valid path.
557                          */
558                         inccb->ccb_h.path = xpt_periph->path;
559
560                         bzero(&mapinfo, sizeof(mapinfo));
561
562                         /*
563                          * Map the pattern and match buffers into kernel
564                          * virtual address space.
565                          */
566                         error = cam_periph_mapmem(inccb, &mapinfo, MAXPHYS);
567
568                         if (error) {
569                                 inccb->ccb_h.path = old_path;
570                                 break;
571                         }
572
573                         /*
574                          * This is an immediate CCB, we can send it on directly.
575                          */
576                         xpt_action(inccb);
577
578                         /*
579                          * Map the buffers back into user space.
580                          */
581                         cam_periph_unmapmem(inccb, &mapinfo);
582
583                         inccb->ccb_h.path = old_path;
584
585                         error = 0;
586                         break;
587                 }
588                 default:
589                         error = ENOTSUP;
590                         break;
591                 }
592                 xpt_release_bus(bus);
593                 break;
594         }
595         /*
596          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
597          * with the periphal driver name and unit name filled in.  The other
598          * fields don't really matter as input.  The passthrough driver name
599          * ("pass"), and unit number are passed back in the ccb.  The current
600          * device generation number, and the index into the device peripheral
601          * driver list, and the status are also passed back.  Note that
602          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
603          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
604          * (or rather should be) impossible for the device peripheral driver
605          * list to change since we look at the whole thing in one pass, and
606          * we do it with lock protection.
607          *
608          */
609         case CAMGETPASSTHRU: {
610                 union ccb *ccb;
611                 struct cam_periph *periph;
612                 struct periph_driver **p_drv;
613                 char   *name;
614                 u_int unit;
615                 int base_periph_found;
616
617                 ccb = (union ccb *)addr;
618                 unit = ccb->cgdl.unit_number;
619                 name = ccb->cgdl.periph_name;
620                 base_periph_found = 0;
621 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
622                 if (ccb->ccb_h.func_code == XPT_SCSI_IO)
623                         ccb->csio.bio = NULL;
624 #endif
625
626                 /*
627                  * Sanity check -- make sure we don't get a null peripheral
628                  * driver name.
629                  */
630                 if (*ccb->cgdl.periph_name == '\0') {
631                         error = EINVAL;
632                         break;
633                 }
634
635                 /* Keep the list from changing while we traverse it */
636                 xpt_lock_buses();
637
638                 /* first find our driver in the list of drivers */
639                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
640                         if (strcmp((*p_drv)->driver_name, name) == 0)
641                                 break;
642
643                 if (*p_drv == NULL) {
644                         xpt_unlock_buses();
645                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
646                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
647                         *ccb->cgdl.periph_name = '\0';
648                         ccb->cgdl.unit_number = 0;
649                         error = ENOENT;
650                         break;
651                 }
652
653                 /*
654                  * Run through every peripheral instance of this driver
655                  * and check to see whether it matches the unit passed
656                  * in by the user.  If it does, get out of the loops and
657                  * find the passthrough driver associated with that
658                  * peripheral driver.
659                  */
660                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
661                      periph = TAILQ_NEXT(periph, unit_links)) {
662
663                         if (periph->unit_number == unit)
664                                 break;
665                 }
666                 /*
667                  * If we found the peripheral driver that the user passed
668                  * in, go through all of the peripheral drivers for that
669                  * particular device and look for a passthrough driver.
670                  */
671                 if (periph != NULL) {
672                         struct cam_ed *device;
673                         int i;
674
675                         base_periph_found = 1;
676                         device = periph->path->device;
677                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
678                              periph != NULL;
679                              periph = SLIST_NEXT(periph, periph_links), i++) {
680                                 /*
681                                  * Check to see whether we have a
682                                  * passthrough device or not.
683                                  */
684                                 if (strcmp(periph->periph_name, "pass") == 0) {
685                                         /*
686                                          * Fill in the getdevlist fields.
687                                          */
688                                         strcpy(ccb->cgdl.periph_name,
689                                                periph->periph_name);
690                                         ccb->cgdl.unit_number =
691                                                 periph->unit_number;
692                                         if (SLIST_NEXT(periph, periph_links))
693                                                 ccb->cgdl.status =
694                                                         CAM_GDEVLIST_MORE_DEVS;
695                                         else
696                                                 ccb->cgdl.status =
697                                                        CAM_GDEVLIST_LAST_DEVICE;
698                                         ccb->cgdl.generation =
699                                                 device->generation;
700                                         ccb->cgdl.index = i;
701                                         /*
702                                          * Fill in some CCB header fields
703                                          * that the user may want.
704                                          */
705                                         ccb->ccb_h.path_id =
706                                                 periph->path->bus->path_id;
707                                         ccb->ccb_h.target_id =
708                                                 periph->path->target->target_id;
709                                         ccb->ccb_h.target_lun =
710                                                 periph->path->device->lun_id;
711                                         ccb->ccb_h.status = CAM_REQ_CMP;
712                                         break;
713                                 }
714                         }
715                 }
716
717                 /*
718                  * If the periph is null here, one of two things has
719                  * happened.  The first possibility is that we couldn't
720                  * find the unit number of the particular peripheral driver
721                  * that the user is asking about.  e.g. the user asks for
722                  * the passthrough driver for "da11".  We find the list of
723                  * "da" peripherals all right, but there is no unit 11.
724                  * The other possibility is that we went through the list
725                  * of peripheral drivers attached to the device structure,
726                  * but didn't find one with the name "pass".  Either way,
727                  * we return ENOENT, since we couldn't find something.
728                  */
729                 if (periph == NULL) {
730                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
731                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
732                         *ccb->cgdl.periph_name = '\0';
733                         ccb->cgdl.unit_number = 0;
734                         error = ENOENT;
735                         /*
736                          * It is unfortunate that this is even necessary,
737                          * but there are many, many clueless users out there.
738                          * If this is true, the user is looking for the
739                          * passthrough driver, but doesn't have one in his
740                          * kernel.
741                          */
742                         if (base_periph_found == 1) {
743                                 printf("xptioctl: pass driver is not in the "
744                                        "kernel\n");
745                                 printf("xptioctl: put \"device pass\" in "
746                                        "your kernel config file\n");
747                         }
748                 }
749                 xpt_unlock_buses();
750                 break;
751                 }
752         default:
753                 error = ENOTTY;
754                 break;
755         }
756
757         return(error);
758 }
759
760 static int
761 cam_module_event_handler(module_t mod, int what, void *arg)
762 {
763         int error;
764
765         switch (what) {
766         case MOD_LOAD:
767                 if ((error = xpt_init(NULL)) != 0)
768                         return (error);
769                 break;
770         case MOD_UNLOAD:
771                 return EBUSY;
772         default:
773                 return EOPNOTSUPP;
774         }
775
776         return 0;
777 }
778
779 static struct xpt_proto *
780 xpt_proto_find(cam_proto proto)
781 {
782         struct xpt_proto **pp;
783
784         SET_FOREACH(pp, cam_xpt_proto_set) {
785                 if ((*pp)->proto == proto)
786                         return *pp;
787         }
788
789         return NULL;
790 }
791
792 static void
793 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
794 {
795
796         if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
797                 xpt_free_path(done_ccb->ccb_h.path);
798                 xpt_free_ccb(done_ccb);
799         } else {
800                 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
801                 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
802         }
803         xpt_release_boot();
804 }
805
806 /* thread to handle bus rescans */
807 static void
808 xpt_scanner_thread(void *dummy)
809 {
810         union ccb       *ccb;
811         struct cam_path  path;
812
813         xpt_lock_buses();
814         for (;;) {
815                 if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
816                         msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
817                                "-", 0);
818                 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
819                         TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
820                         xpt_unlock_buses();
821
822                         /*
823                          * Since lock can be dropped inside and path freed
824                          * by completion callback even before return here,
825                          * take our own path copy for reference.
826                          */
827                         xpt_copy_path(&path, ccb->ccb_h.path);
828                         xpt_path_lock(&path);
829                         xpt_action(ccb);
830                         xpt_path_unlock(&path);
831                         xpt_release_path(&path);
832
833                         xpt_lock_buses();
834                 }
835         }
836 }
837
838 void
839 xpt_rescan(union ccb *ccb)
840 {
841         struct ccb_hdr *hdr;
842
843         /* Prepare request */
844         if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
845             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
846                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
847         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
848             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
849                 ccb->ccb_h.func_code = XPT_SCAN_TGT;
850         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
851             ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
852                 ccb->ccb_h.func_code = XPT_SCAN_LUN;
853         else {
854                 xpt_print(ccb->ccb_h.path, "illegal scan path\n");
855                 xpt_free_path(ccb->ccb_h.path);
856                 xpt_free_ccb(ccb);
857                 return;
858         }
859         CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
860             ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code,
861                 xpt_action_name(ccb->ccb_h.func_code)));
862
863         ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
864         ccb->ccb_h.cbfcnp = xpt_rescan_done;
865         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
866         /* Don't make duplicate entries for the same paths. */
867         xpt_lock_buses();
868         if (ccb->ccb_h.ppriv_ptr1 == NULL) {
869                 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
870                         if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
871                                 wakeup(&xsoftc.ccb_scanq);
872                                 xpt_unlock_buses();
873                                 xpt_print(ccb->ccb_h.path, "rescan already queued\n");
874                                 xpt_free_path(ccb->ccb_h.path);
875                                 xpt_free_ccb(ccb);
876                                 return;
877                         }
878                 }
879         }
880         TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
881         xsoftc.buses_to_config++;
882         wakeup(&xsoftc.ccb_scanq);
883         xpt_unlock_buses();
884 }
885
886 /* Functions accessed by the peripheral drivers */
887 static int
888 xpt_init(void *dummy)
889 {
890         struct cam_sim *xpt_sim;
891         struct cam_path *path;
892         struct cam_devq *devq;
893         cam_status status;
894         int error, i;
895
896         TAILQ_INIT(&xsoftc.xpt_busses);
897         TAILQ_INIT(&xsoftc.ccb_scanq);
898         STAILQ_INIT(&xsoftc.highpowerq);
899         xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
900
901         mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
902         mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF);
903         xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK,
904             taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq);
905
906 #ifdef CAM_BOOT_DELAY
907         /*
908          * Override this value at compile time to assist our users
909          * who don't use loader to boot a kernel.
910          */
911         xsoftc.boot_delay = CAM_BOOT_DELAY;
912 #endif
913         /*
914          * The xpt layer is, itself, the equivalent of a SIM.
915          * Allow 16 ccbs in the ccb pool for it.  This should
916          * give decent parallelism when we probe buses and
917          * perform other XPT functions.
918          */
919         devq = cam_simq_alloc(16);
920         xpt_sim = cam_sim_alloc(xptaction,
921                                 xptpoll,
922                                 "xpt",
923                                 /*softc*/NULL,
924                                 /*unit*/0,
925                                 /*mtx*/&xsoftc.xpt_lock,
926                                 /*max_dev_transactions*/0,
927                                 /*max_tagged_dev_transactions*/0,
928                                 devq);
929         if (xpt_sim == NULL)
930                 return (ENOMEM);
931
932         mtx_lock(&xsoftc.xpt_lock);
933         if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
934                 mtx_unlock(&xsoftc.xpt_lock);
935                 printf("xpt_init: xpt_bus_register failed with status %#x,"
936                        " failing attach\n", status);
937                 return (EINVAL);
938         }
939         mtx_unlock(&xsoftc.xpt_lock);
940
941         /*
942          * Looking at the XPT from the SIM layer, the XPT is
943          * the equivalent of a peripheral driver.  Allocate
944          * a peripheral driver entry for us.
945          */
946         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
947                                       CAM_TARGET_WILDCARD,
948                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
949                 printf("xpt_init: xpt_create_path failed with status %#x,"
950                        " failing attach\n", status);
951                 return (EINVAL);
952         }
953         xpt_path_lock(path);
954         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
955                          path, NULL, 0, xpt_sim);
956         xpt_path_unlock(path);
957         xpt_free_path(path);
958
959         if (cam_num_doneqs < 1)
960                 cam_num_doneqs = 1 + mp_ncpus / 6;
961         else if (cam_num_doneqs > MAXCPU)
962                 cam_num_doneqs = MAXCPU;
963         for (i = 0; i < cam_num_doneqs; i++) {
964                 mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL,
965                     MTX_DEF);
966                 STAILQ_INIT(&cam_doneqs[i].cam_doneq);
967                 error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i],
968                     &cam_proc, NULL, 0, 0, "cam", "doneq%d", i);
969                 if (error != 0) {
970                         cam_num_doneqs = i;
971                         break;
972                 }
973         }
974         if (cam_num_doneqs < 1) {
975                 printf("xpt_init: Cannot init completion queues "
976                        "- failing attach\n");
977                 return (ENOMEM);
978         }
979         /*
980          * Register a callback for when interrupts are enabled.
981          */
982         xsoftc.xpt_config_hook =
983             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
984                                               M_CAMXPT, M_NOWAIT | M_ZERO);
985         if (xsoftc.xpt_config_hook == NULL) {
986                 printf("xpt_init: Cannot malloc config hook "
987                        "- failing attach\n");
988                 return (ENOMEM);
989         }
990         xsoftc.xpt_config_hook->ich_func = xpt_config;
991         if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
992                 free (xsoftc.xpt_config_hook, M_CAMXPT);
993                 printf("xpt_init: config_intrhook_establish failed "
994                        "- failing attach\n");
995         }
996
997         return (0);
998 }
999
1000 static cam_status
1001 xptregister(struct cam_periph *periph, void *arg)
1002 {
1003         struct cam_sim *xpt_sim;
1004
1005         if (periph == NULL) {
1006                 printf("xptregister: periph was NULL!!\n");
1007                 return(CAM_REQ_CMP_ERR);
1008         }
1009
1010         xpt_sim = (struct cam_sim *)arg;
1011         xpt_sim->softc = periph;
1012         xpt_periph = periph;
1013         periph->softc = NULL;
1014
1015         return(CAM_REQ_CMP);
1016 }
1017
1018 int32_t
1019 xpt_add_periph(struct cam_periph *periph)
1020 {
1021         struct cam_ed *device;
1022         int32_t  status;
1023
1024         TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph);
1025         device = periph->path->device;
1026         status = CAM_REQ_CMP;
1027         if (device != NULL) {
1028                 mtx_lock(&device->target->bus->eb_mtx);
1029                 device->generation++;
1030                 SLIST_INSERT_HEAD(&device->periphs, periph, periph_links);
1031                 mtx_unlock(&device->target->bus->eb_mtx);
1032                 atomic_add_32(&xsoftc.xpt_generation, 1);
1033         }
1034
1035         return (status);
1036 }
1037
1038 void
1039 xpt_remove_periph(struct cam_periph *periph)
1040 {
1041         struct cam_ed *device;
1042
1043         device = periph->path->device;
1044         if (device != NULL) {
1045                 mtx_lock(&device->target->bus->eb_mtx);
1046                 device->generation++;
1047                 SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links);
1048                 mtx_unlock(&device->target->bus->eb_mtx);
1049                 atomic_add_32(&xsoftc.xpt_generation, 1);
1050         }
1051 }
1052
1053
1054 void
1055 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1056 {
1057         struct  cam_path *path = periph->path;
1058         struct  xpt_proto *proto;
1059
1060         cam_periph_assert(periph, MA_OWNED);
1061         periph->flags |= CAM_PERIPH_ANNOUNCED;
1062
1063         printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1064                periph->periph_name, periph->unit_number,
1065                path->bus->sim->sim_name,
1066                path->bus->sim->unit_number,
1067                path->bus->sim->bus_id,
1068                path->bus->path_id,
1069                path->target->target_id,
1070                (uintmax_t)path->device->lun_id);
1071         printf("%s%d: ", periph->periph_name, periph->unit_number);
1072         proto = xpt_proto_find(path->device->protocol);
1073         if (proto)
1074                 proto->ops->announce(path->device);
1075         else
1076                 printf("%s%d: Unknown protocol device %d\n",
1077                     periph->periph_name, periph->unit_number,
1078                     path->device->protocol);
1079         if (path->device->serial_num_len > 0) {
1080                 /* Don't wrap the screen  - print only the first 60 chars */
1081                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1082                        periph->unit_number, path->device->serial_num);
1083         }
1084         /* Announce transport details. */
1085         path->bus->xport->ops->announce(periph);
1086         /* Announce command queueing. */
1087         if (path->device->inq_flags & SID_CmdQue
1088          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1089                 printf("%s%d: Command Queueing enabled\n",
1090                        periph->periph_name, periph->unit_number);
1091         }
1092         /* Announce caller's details if they've passed in. */
1093         if (announce_string != NULL)
1094                 printf("%s%d: %s\n", periph->periph_name,
1095                        periph->unit_number, announce_string);
1096 }
1097
1098 void
1099 xpt_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb,
1100     char *announce_string)
1101 {
1102         struct  cam_path *path = periph->path;
1103         struct  xpt_proto *proto;
1104
1105         cam_periph_assert(periph, MA_OWNED);
1106         periph->flags |= CAM_PERIPH_ANNOUNCED;
1107
1108         /* Fall back to the non-sbuf method if necessary */
1109         if (xsoftc.announce_nosbuf != 0) {
1110                 xpt_announce_periph(periph, announce_string);
1111                 return;
1112         }
1113         proto = xpt_proto_find(path->device->protocol);
1114         if (((proto != NULL) && (proto->ops->announce_sbuf == NULL)) ||
1115             (path->bus->xport->ops->announce_sbuf == NULL)) {
1116                 xpt_announce_periph(periph, announce_string);
1117                 return;
1118         }
1119
1120         sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1121             periph->periph_name, periph->unit_number,
1122             path->bus->sim->sim_name,
1123             path->bus->sim->unit_number,
1124             path->bus->sim->bus_id,
1125             path->bus->path_id,
1126             path->target->target_id,
1127             (uintmax_t)path->device->lun_id);
1128         sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number);
1129
1130         if (proto)
1131                 proto->ops->announce_sbuf(path->device, sb);
1132         else
1133                 sbuf_printf(sb, "%s%d: Unknown protocol device %d\n",
1134                     periph->periph_name, periph->unit_number,
1135                     path->device->protocol);
1136         if (path->device->serial_num_len > 0) {
1137                 /* Don't wrap the screen  - print only the first 60 chars */
1138                 sbuf_printf(sb, "%s%d: Serial Number %.60s\n",
1139                     periph->periph_name, periph->unit_number,
1140                     path->device->serial_num);
1141         }
1142         /* Announce transport details. */
1143         path->bus->xport->ops->announce_sbuf(periph, sb);
1144         /* Announce command queueing. */
1145         if (path->device->inq_flags & SID_CmdQue
1146          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1147                 sbuf_printf(sb, "%s%d: Command Queueing enabled\n",
1148                     periph->periph_name, periph->unit_number);
1149         }
1150         /* Announce caller's details if they've passed in. */
1151         if (announce_string != NULL)
1152                 sbuf_printf(sb, "%s%d: %s\n", periph->periph_name,
1153                     periph->unit_number, announce_string);
1154 }
1155
1156 void
1157 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string)
1158 {
1159         if (quirks != 0) {
1160                 printf("%s%d: quirks=0x%b\n", periph->periph_name,
1161                     periph->unit_number, quirks, bit_string);
1162         }
1163 }
1164
1165 void
1166 xpt_announce_quirks_sbuf(struct cam_periph *periph, struct sbuf *sb,
1167                          int quirks, char *bit_string)
1168 {
1169         if (xsoftc.announce_nosbuf != 0) {
1170                 xpt_announce_quirks(periph, quirks, bit_string);
1171                 return;
1172         }
1173
1174         if (quirks != 0) {
1175                 sbuf_printf(sb, "%s%d: quirks=0x%b\n", periph->periph_name,
1176                     periph->unit_number, quirks, bit_string);
1177         }
1178 }
1179
1180 void
1181 xpt_denounce_periph(struct cam_periph *periph)
1182 {
1183         struct  cam_path *path = periph->path;
1184         struct  xpt_proto *proto;
1185
1186         cam_periph_assert(periph, MA_OWNED);
1187         printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1188                periph->periph_name, periph->unit_number,
1189                path->bus->sim->sim_name,
1190                path->bus->sim->unit_number,
1191                path->bus->sim->bus_id,
1192                path->bus->path_id,
1193                path->target->target_id,
1194                (uintmax_t)path->device->lun_id);
1195         printf("%s%d: ", periph->periph_name, periph->unit_number);
1196         proto = xpt_proto_find(path->device->protocol);
1197         if (proto)
1198                 proto->ops->denounce(path->device);
1199         else
1200                 printf("%s%d: Unknown protocol device %d\n",
1201                     periph->periph_name, periph->unit_number,
1202                     path->device->protocol);
1203         if (path->device->serial_num_len > 0)
1204                 printf(" s/n %.60s", path->device->serial_num);
1205         printf(" detached\n");
1206 }
1207
1208 void
1209 xpt_denounce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb)
1210 {
1211         struct cam_path *path = periph->path;
1212         struct xpt_proto *proto;
1213
1214         cam_periph_assert(periph, MA_OWNED);
1215
1216         /* Fall back to the non-sbuf method if necessary */
1217         if (xsoftc.announce_nosbuf != 0) {
1218                 xpt_denounce_periph(periph);
1219                 return;
1220         }
1221         proto = xpt_proto_find(path->device->protocol);
1222         if ((proto != NULL) && (proto->ops->denounce_sbuf == NULL)) {
1223                 xpt_denounce_periph(periph);
1224                 return;
1225         }
1226
1227         sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1228             periph->periph_name, periph->unit_number,
1229             path->bus->sim->sim_name,
1230             path->bus->sim->unit_number,
1231             path->bus->sim->bus_id,
1232             path->bus->path_id,
1233             path->target->target_id,
1234             (uintmax_t)path->device->lun_id);
1235         sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number);
1236
1237         if (proto)
1238                 proto->ops->denounce_sbuf(path->device, sb);
1239         else
1240                 sbuf_printf(sb, "%s%d: Unknown protocol device %d\n",
1241                     periph->periph_name, periph->unit_number,
1242                     path->device->protocol);
1243         if (path->device->serial_num_len > 0)
1244                 sbuf_printf(sb, " s/n %.60s", path->device->serial_num);
1245         sbuf_printf(sb, " detached\n");
1246 }
1247
1248 int
1249 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
1250 {
1251         int ret = -1, l, o;
1252         struct ccb_dev_advinfo cdai;
1253         struct scsi_vpd_id_descriptor *idd;
1254
1255         xpt_path_assert(path, MA_OWNED);
1256
1257         memset(&cdai, 0, sizeof(cdai));
1258         xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
1259         cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
1260         cdai.flags = CDAI_FLAG_NONE;
1261         cdai.bufsiz = len;
1262
1263         if (!strcmp(attr, "GEOM::ident"))
1264                 cdai.buftype = CDAI_TYPE_SERIAL_NUM;
1265         else if (!strcmp(attr, "GEOM::physpath"))
1266                 cdai.buftype = CDAI_TYPE_PHYS_PATH;
1267         else if (strcmp(attr, "GEOM::lunid") == 0 ||
1268                  strcmp(attr, "GEOM::lunname") == 0) {
1269                 cdai.buftype = CDAI_TYPE_SCSI_DEVID;
1270                 cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
1271         } else
1272                 goto out;
1273
1274         cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO);
1275         if (cdai.buf == NULL) {
1276                 ret = ENOMEM;
1277                 goto out;
1278         }
1279         xpt_action((union ccb *)&cdai); /* can only be synchronous */
1280         if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
1281                 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
1282         if (cdai.provsiz == 0)
1283                 goto out;
1284         if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) {
1285                 if (strcmp(attr, "GEOM::lunid") == 0) {
1286                         idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1287                             cdai.provsiz, scsi_devid_is_lun_naa);
1288                         if (idd == NULL)
1289                                 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1290                                     cdai.provsiz, scsi_devid_is_lun_eui64);
1291                         if (idd == NULL)
1292                                 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1293                                     cdai.provsiz, scsi_devid_is_lun_uuid);
1294                         if (idd == NULL)
1295                                 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1296                                     cdai.provsiz, scsi_devid_is_lun_md5);
1297                 } else
1298                         idd = NULL;
1299                 if (idd == NULL)
1300                         idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1301                             cdai.provsiz, scsi_devid_is_lun_t10);
1302                 if (idd == NULL)
1303                         idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1304                             cdai.provsiz, scsi_devid_is_lun_name);
1305                 if (idd == NULL)
1306                         goto out;
1307                 ret = 0;
1308                 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII) {
1309                         if (idd->length < len) {
1310                                 for (l = 0; l < idd->length; l++)
1311                                         buf[l] = idd->identifier[l] ?
1312                                             idd->identifier[l] : ' ';
1313                                 buf[l] = 0;
1314                         } else
1315                                 ret = EFAULT;
1316                 } else if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) {
1317                         l = strnlen(idd->identifier, idd->length);
1318                         if (l < len) {
1319                                 bcopy(idd->identifier, buf, l);
1320                                 buf[l] = 0;
1321                         } else
1322                                 ret = EFAULT;
1323                 } else if ((idd->id_type & SVPD_ID_TYPE_MASK) == SVPD_ID_TYPE_UUID
1324                     && idd->identifier[0] == 0x10) {
1325                         if ((idd->length - 2) * 2 + 4 < len) {
1326                                 for (l = 2, o = 0; l < idd->length; l++) {
1327                                         if (l == 6 || l == 8 || l == 10 || l == 12)
1328                                             o += sprintf(buf + o, "-");
1329                                         o += sprintf(buf + o, "%02x",
1330                                             idd->identifier[l]);
1331                                 }
1332                         } else
1333                                 ret = EFAULT;
1334                 } else {
1335                         if (idd->length * 2 < len) {
1336                                 for (l = 0; l < idd->length; l++)
1337                                         sprintf(buf + l * 2, "%02x",
1338                                             idd->identifier[l]);
1339                         } else
1340                                 ret = EFAULT;
1341                 }
1342         } else {
1343                 ret = 0;
1344                 if (strlcpy(buf, cdai.buf, len) >= len)
1345                         ret = EFAULT;
1346         }
1347
1348 out:
1349         if (cdai.buf != NULL)
1350                 free(cdai.buf, M_CAMXPT);
1351         return ret;
1352 }
1353
1354 static dev_match_ret
1355 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1356             struct cam_eb *bus)
1357 {
1358         dev_match_ret retval;
1359         u_int i;
1360
1361         retval = DM_RET_NONE;
1362
1363         /*
1364          * If we aren't given something to match against, that's an error.
1365          */
1366         if (bus == NULL)
1367                 return(DM_RET_ERROR);
1368
1369         /*
1370          * If there are no match entries, then this bus matches no
1371          * matter what.
1372          */
1373         if ((patterns == NULL) || (num_patterns == 0))
1374                 return(DM_RET_DESCEND | DM_RET_COPY);
1375
1376         for (i = 0; i < num_patterns; i++) {
1377                 struct bus_match_pattern *cur_pattern;
1378
1379                 /*
1380                  * If the pattern in question isn't for a bus node, we
1381                  * aren't interested.  However, we do indicate to the
1382                  * calling routine that we should continue descending the
1383                  * tree, since the user wants to match against lower-level
1384                  * EDT elements.
1385                  */
1386                 if (patterns[i].type != DEV_MATCH_BUS) {
1387                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1388                                 retval |= DM_RET_DESCEND;
1389                         continue;
1390                 }
1391
1392                 cur_pattern = &patterns[i].pattern.bus_pattern;
1393
1394                 /*
1395                  * If they want to match any bus node, we give them any
1396                  * device node.
1397                  */
1398                 if (cur_pattern->flags == BUS_MATCH_ANY) {
1399                         /* set the copy flag */
1400                         retval |= DM_RET_COPY;
1401
1402                         /*
1403                          * If we've already decided on an action, go ahead
1404                          * and return.
1405                          */
1406                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1407                                 return(retval);
1408                 }
1409
1410                 /*
1411                  * Not sure why someone would do this...
1412                  */
1413                 if (cur_pattern->flags == BUS_MATCH_NONE)
1414                         continue;
1415
1416                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1417                  && (cur_pattern->path_id != bus->path_id))
1418                         continue;
1419
1420                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1421                  && (cur_pattern->bus_id != bus->sim->bus_id))
1422                         continue;
1423
1424                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1425                  && (cur_pattern->unit_number != bus->sim->unit_number))
1426                         continue;
1427
1428                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1429                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1430                              DEV_IDLEN) != 0))
1431                         continue;
1432
1433                 /*
1434                  * If we get to this point, the user definitely wants
1435                  * information on this bus.  So tell the caller to copy the
1436                  * data out.
1437                  */
1438                 retval |= DM_RET_COPY;
1439
1440                 /*
1441                  * If the return action has been set to descend, then we
1442                  * know that we've already seen a non-bus matching
1443                  * expression, therefore we need to further descend the tree.
1444                  * This won't change by continuing around the loop, so we
1445                  * go ahead and return.  If we haven't seen a non-bus
1446                  * matching expression, we keep going around the loop until
1447                  * we exhaust the matching expressions.  We'll set the stop
1448                  * flag once we fall out of the loop.
1449                  */
1450                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1451                         return(retval);
1452         }
1453
1454         /*
1455          * If the return action hasn't been set to descend yet, that means
1456          * we haven't seen anything other than bus matching patterns.  So
1457          * tell the caller to stop descending the tree -- the user doesn't
1458          * want to match against lower level tree elements.
1459          */
1460         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1461                 retval |= DM_RET_STOP;
1462
1463         return(retval);
1464 }
1465
1466 static dev_match_ret
1467 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1468                struct cam_ed *device)
1469 {
1470         dev_match_ret retval;
1471         u_int i;
1472
1473         retval = DM_RET_NONE;
1474
1475         /*
1476          * If we aren't given something to match against, that's an error.
1477          */
1478         if (device == NULL)
1479                 return(DM_RET_ERROR);
1480
1481         /*
1482          * If there are no match entries, then this device matches no
1483          * matter what.
1484          */
1485         if ((patterns == NULL) || (num_patterns == 0))
1486                 return(DM_RET_DESCEND | DM_RET_COPY);
1487
1488         for (i = 0; i < num_patterns; i++) {
1489                 struct device_match_pattern *cur_pattern;
1490                 struct scsi_vpd_device_id *device_id_page;
1491
1492                 /*
1493                  * If the pattern in question isn't for a device node, we
1494                  * aren't interested.
1495                  */
1496                 if (patterns[i].type != DEV_MATCH_DEVICE) {
1497                         if ((patterns[i].type == DEV_MATCH_PERIPH)
1498                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1499                                 retval |= DM_RET_DESCEND;
1500                         continue;
1501                 }
1502
1503                 cur_pattern = &patterns[i].pattern.device_pattern;
1504
1505                 /* Error out if mutually exclusive options are specified. */
1506                 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1507                  == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1508                         return(DM_RET_ERROR);
1509
1510                 /*
1511                  * If they want to match any device node, we give them any
1512                  * device node.
1513                  */
1514                 if (cur_pattern->flags == DEV_MATCH_ANY)
1515                         goto copy_dev_node;
1516
1517                 /*
1518                  * Not sure why someone would do this...
1519                  */
1520                 if (cur_pattern->flags == DEV_MATCH_NONE)
1521                         continue;
1522
1523                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1524                  && (cur_pattern->path_id != device->target->bus->path_id))
1525                         continue;
1526
1527                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1528                  && (cur_pattern->target_id != device->target->target_id))
1529                         continue;
1530
1531                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1532                  && (cur_pattern->target_lun != device->lun_id))
1533                         continue;
1534
1535                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1536                  && (cam_quirkmatch((caddr_t)&device->inq_data,
1537                                     (caddr_t)&cur_pattern->data.inq_pat,
1538                                     1, sizeof(cur_pattern->data.inq_pat),
1539                                     scsi_static_inquiry_match) == NULL))
1540                         continue;
1541
1542                 device_id_page = (struct scsi_vpd_device_id *)device->device_id;
1543                 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
1544                  && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
1545                   || scsi_devid_match((uint8_t *)device_id_page->desc_list,
1546                                       device->device_id_len
1547                                     - SVPD_DEVICE_ID_HDR_LEN,
1548                                       cur_pattern->data.devid_pat.id,
1549                                       cur_pattern->data.devid_pat.id_len) != 0))
1550                         continue;
1551
1552 copy_dev_node:
1553                 /*
1554                  * If we get to this point, the user definitely wants
1555                  * information on this device.  So tell the caller to copy
1556                  * the data out.
1557                  */
1558                 retval |= DM_RET_COPY;
1559
1560                 /*
1561                  * If the return action has been set to descend, then we
1562                  * know that we've already seen a peripheral matching
1563                  * expression, therefore we need to further descend the tree.
1564                  * This won't change by continuing around the loop, so we
1565                  * go ahead and return.  If we haven't seen a peripheral
1566                  * matching expression, we keep going around the loop until
1567                  * we exhaust the matching expressions.  We'll set the stop
1568                  * flag once we fall out of the loop.
1569                  */
1570                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1571                         return(retval);
1572         }
1573
1574         /*
1575          * If the return action hasn't been set to descend yet, that means
1576          * we haven't seen any peripheral matching patterns.  So tell the
1577          * caller to stop descending the tree -- the user doesn't want to
1578          * match against lower level tree elements.
1579          */
1580         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1581                 retval |= DM_RET_STOP;
1582
1583         return(retval);
1584 }
1585
1586 /*
1587  * Match a single peripheral against any number of match patterns.
1588  */
1589 static dev_match_ret
1590 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1591                struct cam_periph *periph)
1592 {
1593         dev_match_ret retval;
1594         u_int i;
1595
1596         /*
1597          * If we aren't given something to match against, that's an error.
1598          */
1599         if (periph == NULL)
1600                 return(DM_RET_ERROR);
1601
1602         /*
1603          * If there are no match entries, then this peripheral matches no
1604          * matter what.
1605          */
1606         if ((patterns == NULL) || (num_patterns == 0))
1607                 return(DM_RET_STOP | DM_RET_COPY);
1608
1609         /*
1610          * There aren't any nodes below a peripheral node, so there's no
1611          * reason to descend the tree any further.
1612          */
1613         retval = DM_RET_STOP;
1614
1615         for (i = 0; i < num_patterns; i++) {
1616                 struct periph_match_pattern *cur_pattern;
1617
1618                 /*
1619                  * If the pattern in question isn't for a peripheral, we
1620                  * aren't interested.
1621                  */
1622                 if (patterns[i].type != DEV_MATCH_PERIPH)
1623                         continue;
1624
1625                 cur_pattern = &patterns[i].pattern.periph_pattern;
1626
1627                 /*
1628                  * If they want to match on anything, then we will do so.
1629                  */
1630                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1631                         /* set the copy flag */
1632                         retval |= DM_RET_COPY;
1633
1634                         /*
1635                          * We've already set the return action to stop,
1636                          * since there are no nodes below peripherals in
1637                          * the tree.
1638                          */
1639                         return(retval);
1640                 }
1641
1642                 /*
1643                  * Not sure why someone would do this...
1644                  */
1645                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
1646                         continue;
1647
1648                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1649                  && (cur_pattern->path_id != periph->path->bus->path_id))
1650                         continue;
1651
1652                 /*
1653                  * For the target and lun id's, we have to make sure the
1654                  * target and lun pointers aren't NULL.  The xpt peripheral
1655                  * has a wildcard target and device.
1656                  */
1657                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1658                  && ((periph->path->target == NULL)
1659                  ||(cur_pattern->target_id != periph->path->target->target_id)))
1660                         continue;
1661
1662                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1663                  && ((periph->path->device == NULL)
1664                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
1665                         continue;
1666
1667                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1668                  && (cur_pattern->unit_number != periph->unit_number))
1669                         continue;
1670
1671                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1672                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
1673                              DEV_IDLEN) != 0))
1674                         continue;
1675
1676                 /*
1677                  * If we get to this point, the user definitely wants
1678                  * information on this peripheral.  So tell the caller to
1679                  * copy the data out.
1680                  */
1681                 retval |= DM_RET_COPY;
1682
1683                 /*
1684                  * The return action has already been set to stop, since
1685                  * peripherals don't have any nodes below them in the EDT.
1686                  */
1687                 return(retval);
1688         }
1689
1690         /*
1691          * If we get to this point, the peripheral that was passed in
1692          * doesn't match any of the patterns.
1693          */
1694         return(retval);
1695 }
1696
1697 static int
1698 xptedtbusfunc(struct cam_eb *bus, void *arg)
1699 {
1700         struct ccb_dev_match *cdm;
1701         struct cam_et *target;
1702         dev_match_ret retval;
1703
1704         cdm = (struct ccb_dev_match *)arg;
1705
1706         /*
1707          * If our position is for something deeper in the tree, that means
1708          * that we've already seen this node.  So, we keep going down.
1709          */
1710         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1711          && (cdm->pos.cookie.bus == bus)
1712          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1713          && (cdm->pos.cookie.target != NULL))
1714                 retval = DM_RET_DESCEND;
1715         else
1716                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1717
1718         /*
1719          * If we got an error, bail out of the search.
1720          */
1721         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1722                 cdm->status = CAM_DEV_MATCH_ERROR;
1723                 return(0);
1724         }
1725
1726         /*
1727          * If the copy flag is set, copy this bus out.
1728          */
1729         if (retval & DM_RET_COPY) {
1730                 int spaceleft, j;
1731
1732                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1733                         sizeof(struct dev_match_result));
1734
1735                 /*
1736                  * If we don't have enough space to put in another
1737                  * match result, save our position and tell the
1738                  * user there are more devices to check.
1739                  */
1740                 if (spaceleft < sizeof(struct dev_match_result)) {
1741                         bzero(&cdm->pos, sizeof(cdm->pos));
1742                         cdm->pos.position_type =
1743                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1744
1745                         cdm->pos.cookie.bus = bus;
1746                         cdm->pos.generations[CAM_BUS_GENERATION]=
1747                                 xsoftc.bus_generation;
1748                         cdm->status = CAM_DEV_MATCH_MORE;
1749                         return(0);
1750                 }
1751                 j = cdm->num_matches;
1752                 cdm->num_matches++;
1753                 cdm->matches[j].type = DEV_MATCH_BUS;
1754                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
1755                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1756                 cdm->matches[j].result.bus_result.unit_number =
1757                         bus->sim->unit_number;
1758                 strncpy(cdm->matches[j].result.bus_result.dev_name,
1759                         bus->sim->sim_name, DEV_IDLEN);
1760         }
1761
1762         /*
1763          * If the user is only interested in buses, there's no
1764          * reason to descend to the next level in the tree.
1765          */
1766         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1767                 return(1);
1768
1769         /*
1770          * If there is a target generation recorded, check it to
1771          * make sure the target list hasn't changed.
1772          */
1773         mtx_lock(&bus->eb_mtx);
1774         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1775          && (cdm->pos.cookie.bus == bus)
1776          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1777          && (cdm->pos.cookie.target != NULL)) {
1778                 if ((cdm->pos.generations[CAM_TARGET_GENERATION] !=
1779                     bus->generation)) {
1780                         mtx_unlock(&bus->eb_mtx);
1781                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1782                         return (0);
1783                 }
1784                 target = (struct cam_et *)cdm->pos.cookie.target;
1785                 target->refcount++;
1786         } else
1787                 target = NULL;
1788         mtx_unlock(&bus->eb_mtx);
1789
1790         return (xpttargettraverse(bus, target, xptedttargetfunc, arg));
1791 }
1792
1793 static int
1794 xptedttargetfunc(struct cam_et *target, void *arg)
1795 {
1796         struct ccb_dev_match *cdm;
1797         struct cam_eb *bus;
1798         struct cam_ed *device;
1799
1800         cdm = (struct ccb_dev_match *)arg;
1801         bus = target->bus;
1802
1803         /*
1804          * If there is a device list generation recorded, check it to
1805          * make sure the device list hasn't changed.
1806          */
1807         mtx_lock(&bus->eb_mtx);
1808         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1809          && (cdm->pos.cookie.bus == bus)
1810          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1811          && (cdm->pos.cookie.target == target)
1812          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1813          && (cdm->pos.cookie.device != NULL)) {
1814                 if (cdm->pos.generations[CAM_DEV_GENERATION] !=
1815                     target->generation) {
1816                         mtx_unlock(&bus->eb_mtx);
1817                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1818                         return(0);
1819                 }
1820                 device = (struct cam_ed *)cdm->pos.cookie.device;
1821                 device->refcount++;
1822         } else
1823                 device = NULL;
1824         mtx_unlock(&bus->eb_mtx);
1825
1826         return (xptdevicetraverse(target, device, xptedtdevicefunc, arg));
1827 }
1828
1829 static int
1830 xptedtdevicefunc(struct cam_ed *device, void *arg)
1831 {
1832         struct cam_eb *bus;
1833         struct cam_periph *periph;
1834         struct ccb_dev_match *cdm;
1835         dev_match_ret retval;
1836
1837         cdm = (struct ccb_dev_match *)arg;
1838         bus = device->target->bus;
1839
1840         /*
1841          * If our position is for something deeper in the tree, that means
1842          * that we've already seen this node.  So, we keep going down.
1843          */
1844         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1845          && (cdm->pos.cookie.device == device)
1846          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1847          && (cdm->pos.cookie.periph != NULL))
1848                 retval = DM_RET_DESCEND;
1849         else
1850                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
1851                                         device);
1852
1853         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1854                 cdm->status = CAM_DEV_MATCH_ERROR;
1855                 return(0);
1856         }
1857
1858         /*
1859          * If the copy flag is set, copy this device out.
1860          */
1861         if (retval & DM_RET_COPY) {
1862                 int spaceleft, j;
1863
1864                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1865                         sizeof(struct dev_match_result));
1866
1867                 /*
1868                  * If we don't have enough space to put in another
1869                  * match result, save our position and tell the
1870                  * user there are more devices to check.
1871                  */
1872                 if (spaceleft < sizeof(struct dev_match_result)) {
1873                         bzero(&cdm->pos, sizeof(cdm->pos));
1874                         cdm->pos.position_type =
1875                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1876                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
1877
1878                         cdm->pos.cookie.bus = device->target->bus;
1879                         cdm->pos.generations[CAM_BUS_GENERATION]=
1880                                 xsoftc.bus_generation;
1881                         cdm->pos.cookie.target = device->target;
1882                         cdm->pos.generations[CAM_TARGET_GENERATION] =
1883                                 device->target->bus->generation;
1884                         cdm->pos.cookie.device = device;
1885                         cdm->pos.generations[CAM_DEV_GENERATION] =
1886                                 device->target->generation;
1887                         cdm->status = CAM_DEV_MATCH_MORE;
1888                         return(0);
1889                 }
1890                 j = cdm->num_matches;
1891                 cdm->num_matches++;
1892                 cdm->matches[j].type = DEV_MATCH_DEVICE;
1893                 cdm->matches[j].result.device_result.path_id =
1894                         device->target->bus->path_id;
1895                 cdm->matches[j].result.device_result.target_id =
1896                         device->target->target_id;
1897                 cdm->matches[j].result.device_result.target_lun =
1898                         device->lun_id;
1899                 cdm->matches[j].result.device_result.protocol =
1900                         device->protocol;
1901                 bcopy(&device->inq_data,
1902                       &cdm->matches[j].result.device_result.inq_data,
1903                       sizeof(struct scsi_inquiry_data));
1904                 bcopy(&device->ident_data,
1905                       &cdm->matches[j].result.device_result.ident_data,
1906                       sizeof(struct ata_params));
1907                 bcopy(&device->mmc_ident_data,
1908                       &cdm->matches[j].result.device_result.mmc_ident_data,
1909                       sizeof(struct mmc_params));
1910
1911                 /* Let the user know whether this device is unconfigured */
1912                 if (device->flags & CAM_DEV_UNCONFIGURED)
1913                         cdm->matches[j].result.device_result.flags =
1914                                 DEV_RESULT_UNCONFIGURED;
1915                 else
1916                         cdm->matches[j].result.device_result.flags =
1917                                 DEV_RESULT_NOFLAG;
1918         }
1919
1920         /*
1921          * If the user isn't interested in peripherals, don't descend
1922          * the tree any further.
1923          */
1924         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1925                 return(1);
1926
1927         /*
1928          * If there is a peripheral list generation recorded, make sure
1929          * it hasn't changed.
1930          */
1931         xpt_lock_buses();
1932         mtx_lock(&bus->eb_mtx);
1933         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1934          && (cdm->pos.cookie.bus == bus)
1935          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1936          && (cdm->pos.cookie.target == device->target)
1937          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1938          && (cdm->pos.cookie.device == device)
1939          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1940          && (cdm->pos.cookie.periph != NULL)) {
1941                 if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1942                     device->generation) {
1943                         mtx_unlock(&bus->eb_mtx);
1944                         xpt_unlock_buses();
1945                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1946                         return(0);
1947                 }
1948                 periph = (struct cam_periph *)cdm->pos.cookie.periph;
1949                 periph->refcount++;
1950         } else
1951                 periph = NULL;
1952         mtx_unlock(&bus->eb_mtx);
1953         xpt_unlock_buses();
1954
1955         return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg));
1956 }
1957
1958 static int
1959 xptedtperiphfunc(struct cam_periph *periph, void *arg)
1960 {
1961         struct ccb_dev_match *cdm;
1962         dev_match_ret retval;
1963
1964         cdm = (struct ccb_dev_match *)arg;
1965
1966         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1967
1968         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1969                 cdm->status = CAM_DEV_MATCH_ERROR;
1970                 return(0);
1971         }
1972
1973         /*
1974          * If the copy flag is set, copy this peripheral out.
1975          */
1976         if (retval & DM_RET_COPY) {
1977                 int spaceleft, j;
1978
1979                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1980                         sizeof(struct dev_match_result));
1981
1982                 /*
1983                  * If we don't have enough space to put in another
1984                  * match result, save our position and tell the
1985                  * user there are more devices to check.
1986                  */
1987                 if (spaceleft < sizeof(struct dev_match_result)) {
1988                         bzero(&cdm->pos, sizeof(cdm->pos));
1989                         cdm->pos.position_type =
1990                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1991                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
1992                                 CAM_DEV_POS_PERIPH;
1993
1994                         cdm->pos.cookie.bus = periph->path->bus;
1995                         cdm->pos.generations[CAM_BUS_GENERATION]=
1996                                 xsoftc.bus_generation;
1997                         cdm->pos.cookie.target = periph->path->target;
1998                         cdm->pos.generations[CAM_TARGET_GENERATION] =
1999                                 periph->path->bus->generation;
2000                         cdm->pos.cookie.device = periph->path->device;
2001                         cdm->pos.generations[CAM_DEV_GENERATION] =
2002                                 periph->path->target->generation;
2003                         cdm->pos.cookie.periph = periph;
2004                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2005                                 periph->path->device->generation;
2006                         cdm->status = CAM_DEV_MATCH_MORE;
2007                         return(0);
2008                 }
2009
2010                 j = cdm->num_matches;
2011                 cdm->num_matches++;
2012                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2013                 cdm->matches[j].result.periph_result.path_id =
2014                         periph->path->bus->path_id;
2015                 cdm->matches[j].result.periph_result.target_id =
2016                         periph->path->target->target_id;
2017                 cdm->matches[j].result.periph_result.target_lun =
2018                         periph->path->device->lun_id;
2019                 cdm->matches[j].result.periph_result.unit_number =
2020                         periph->unit_number;
2021                 strncpy(cdm->matches[j].result.periph_result.periph_name,
2022                         periph->periph_name, DEV_IDLEN);
2023         }
2024
2025         return(1);
2026 }
2027
2028 static int
2029 xptedtmatch(struct ccb_dev_match *cdm)
2030 {
2031         struct cam_eb *bus;
2032         int ret;
2033
2034         cdm->num_matches = 0;
2035
2036         /*
2037          * Check the bus list generation.  If it has changed, the user
2038          * needs to reset everything and start over.
2039          */
2040         xpt_lock_buses();
2041         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2042          && (cdm->pos.cookie.bus != NULL)) {
2043                 if (cdm->pos.generations[CAM_BUS_GENERATION] !=
2044                     xsoftc.bus_generation) {
2045                         xpt_unlock_buses();
2046                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2047                         return(0);
2048                 }
2049                 bus = (struct cam_eb *)cdm->pos.cookie.bus;
2050                 bus->refcount++;
2051         } else
2052                 bus = NULL;
2053         xpt_unlock_buses();
2054
2055         ret = xptbustraverse(bus, xptedtbusfunc, cdm);
2056
2057         /*
2058          * If we get back 0, that means that we had to stop before fully
2059          * traversing the EDT.  It also means that one of the subroutines
2060          * has set the status field to the proper value.  If we get back 1,
2061          * we've fully traversed the EDT and copied out any matching entries.
2062          */
2063         if (ret == 1)
2064                 cdm->status = CAM_DEV_MATCH_LAST;
2065
2066         return(ret);
2067 }
2068
2069 static int
2070 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2071 {
2072         struct cam_periph *periph;
2073         struct ccb_dev_match *cdm;
2074
2075         cdm = (struct ccb_dev_match *)arg;
2076
2077         xpt_lock_buses();
2078         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2079          && (cdm->pos.cookie.pdrv == pdrv)
2080          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2081          && (cdm->pos.cookie.periph != NULL)) {
2082                 if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2083                     (*pdrv)->generation) {
2084                         xpt_unlock_buses();
2085                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2086                         return(0);
2087                 }
2088                 periph = (struct cam_periph *)cdm->pos.cookie.periph;
2089                 periph->refcount++;
2090         } else
2091                 periph = NULL;
2092         xpt_unlock_buses();
2093
2094         return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg));
2095 }
2096
2097 static int
2098 xptplistperiphfunc(struct cam_periph *periph, void *arg)
2099 {
2100         struct ccb_dev_match *cdm;
2101         dev_match_ret retval;
2102
2103         cdm = (struct ccb_dev_match *)arg;
2104
2105         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2106
2107         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2108                 cdm->status = CAM_DEV_MATCH_ERROR;
2109                 return(0);
2110         }
2111
2112         /*
2113          * If the copy flag is set, copy this peripheral out.
2114          */
2115         if (retval & DM_RET_COPY) {
2116                 int spaceleft, j;
2117
2118                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2119                         sizeof(struct dev_match_result));
2120
2121                 /*
2122                  * If we don't have enough space to put in another
2123                  * match result, save our position and tell the
2124                  * user there are more devices to check.
2125                  */
2126                 if (spaceleft < sizeof(struct dev_match_result)) {
2127                         struct periph_driver **pdrv;
2128
2129                         pdrv = NULL;
2130                         bzero(&cdm->pos, sizeof(cdm->pos));
2131                         cdm->pos.position_type =
2132                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2133                                 CAM_DEV_POS_PERIPH;
2134
2135                         /*
2136                          * This may look a bit non-sensical, but it is
2137                          * actually quite logical.  There are very few
2138                          * peripheral drivers, and bloating every peripheral
2139                          * structure with a pointer back to its parent
2140                          * peripheral driver linker set entry would cost
2141                          * more in the long run than doing this quick lookup.
2142                          */
2143                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2144                                 if (strcmp((*pdrv)->driver_name,
2145                                     periph->periph_name) == 0)
2146                                         break;
2147                         }
2148
2149                         if (*pdrv == NULL) {
2150                                 cdm->status = CAM_DEV_MATCH_ERROR;
2151                                 return(0);
2152                         }
2153
2154                         cdm->pos.cookie.pdrv = pdrv;
2155                         /*
2156                          * The periph generation slot does double duty, as
2157                          * does the periph pointer slot.  They are used for
2158                          * both edt and pdrv lookups and positioning.
2159                          */
2160                         cdm->pos.cookie.periph = periph;
2161                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2162                                 (*pdrv)->generation;
2163                         cdm->status = CAM_DEV_MATCH_MORE;
2164                         return(0);
2165                 }
2166
2167                 j = cdm->num_matches;
2168                 cdm->num_matches++;
2169                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2170                 cdm->matches[j].result.periph_result.path_id =
2171                         periph->path->bus->path_id;
2172
2173                 /*
2174                  * The transport layer peripheral doesn't have a target or
2175                  * lun.
2176                  */
2177                 if (periph->path->target)
2178                         cdm->matches[j].result.periph_result.target_id =
2179                                 periph->path->target->target_id;
2180                 else
2181                         cdm->matches[j].result.periph_result.target_id =
2182                                 CAM_TARGET_WILDCARD;
2183
2184                 if (periph->path->device)
2185                         cdm->matches[j].result.periph_result.target_lun =
2186                                 periph->path->device->lun_id;
2187                 else
2188                         cdm->matches[j].result.periph_result.target_lun =
2189                                 CAM_LUN_WILDCARD;
2190
2191                 cdm->matches[j].result.periph_result.unit_number =
2192                         periph->unit_number;
2193                 strncpy(cdm->matches[j].result.periph_result.periph_name,
2194                         periph->periph_name, DEV_IDLEN);
2195         }
2196
2197         return(1);
2198 }
2199
2200 static int
2201 xptperiphlistmatch(struct ccb_dev_match *cdm)
2202 {
2203         int ret;
2204
2205         cdm->num_matches = 0;
2206
2207         /*
2208          * At this point in the edt traversal function, we check the bus
2209          * list generation to make sure that no buses have been added or
2210          * removed since the user last sent a XPT_DEV_MATCH ccb through.
2211          * For the peripheral driver list traversal function, however, we
2212          * don't have to worry about new peripheral driver types coming or
2213          * going; they're in a linker set, and therefore can't change
2214          * without a recompile.
2215          */
2216
2217         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2218          && (cdm->pos.cookie.pdrv != NULL))
2219                 ret = xptpdrvtraverse(
2220                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
2221                                 xptplistpdrvfunc, cdm);
2222         else
2223                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2224
2225         /*
2226          * If we get back 0, that means that we had to stop before fully
2227          * traversing the peripheral driver tree.  It also means that one of
2228          * the subroutines has set the status field to the proper value.  If
2229          * we get back 1, we've fully traversed the EDT and copied out any
2230          * matching entries.
2231          */
2232         if (ret == 1)
2233                 cdm->status = CAM_DEV_MATCH_LAST;
2234
2235         return(ret);
2236 }
2237
2238 static int
2239 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2240 {
2241         struct cam_eb *bus, *next_bus;
2242         int retval;
2243
2244         retval = 1;
2245         if (start_bus)
2246                 bus = start_bus;
2247         else {
2248                 xpt_lock_buses();
2249                 bus = TAILQ_FIRST(&xsoftc.xpt_busses);
2250                 if (bus == NULL) {
2251                         xpt_unlock_buses();
2252                         return (retval);
2253                 }
2254                 bus->refcount++;
2255                 xpt_unlock_buses();
2256         }
2257         for (; bus != NULL; bus = next_bus) {
2258                 retval = tr_func(bus, arg);
2259                 if (retval == 0) {
2260                         xpt_release_bus(bus);
2261                         break;
2262                 }
2263                 xpt_lock_buses();
2264                 next_bus = TAILQ_NEXT(bus, links);
2265                 if (next_bus)
2266                         next_bus->refcount++;
2267                 xpt_unlock_buses();
2268                 xpt_release_bus(bus);
2269         }
2270         return(retval);
2271 }
2272
2273 static int
2274 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2275                   xpt_targetfunc_t *tr_func, void *arg)
2276 {
2277         struct cam_et *target, *next_target;
2278         int retval;
2279
2280         retval = 1;
2281         if (start_target)
2282                 target = start_target;
2283         else {
2284                 mtx_lock(&bus->eb_mtx);
2285                 target = TAILQ_FIRST(&bus->et_entries);
2286                 if (target == NULL) {
2287                         mtx_unlock(&bus->eb_mtx);
2288                         return (retval);
2289                 }
2290                 target->refcount++;
2291                 mtx_unlock(&bus->eb_mtx);
2292         }
2293         for (; target != NULL; target = next_target) {
2294                 retval = tr_func(target, arg);
2295                 if (retval == 0) {
2296                         xpt_release_target(target);
2297                         break;
2298                 }
2299                 mtx_lock(&bus->eb_mtx);
2300                 next_target = TAILQ_NEXT(target, links);
2301                 if (next_target)
2302                         next_target->refcount++;
2303                 mtx_unlock(&bus->eb_mtx);
2304                 xpt_release_target(target);
2305         }
2306         return(retval);
2307 }
2308
2309 static int
2310 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2311                   xpt_devicefunc_t *tr_func, void *arg)
2312 {
2313         struct cam_eb *bus;
2314         struct cam_ed *device, *next_device;
2315         int retval;
2316
2317         retval = 1;
2318         bus = target->bus;
2319         if (start_device)
2320                 device = start_device;
2321         else {
2322                 mtx_lock(&bus->eb_mtx);
2323                 device = TAILQ_FIRST(&target->ed_entries);
2324                 if (device == NULL) {
2325                         mtx_unlock(&bus->eb_mtx);
2326                         return (retval);
2327                 }
2328                 device->refcount++;
2329                 mtx_unlock(&bus->eb_mtx);
2330         }
2331         for (; device != NULL; device = next_device) {
2332                 mtx_lock(&device->device_mtx);
2333                 retval = tr_func(device, arg);
2334                 mtx_unlock(&device->device_mtx);
2335                 if (retval == 0) {
2336                         xpt_release_device(device);
2337                         break;
2338                 }
2339                 mtx_lock(&bus->eb_mtx);
2340                 next_device = TAILQ_NEXT(device, links);
2341                 if (next_device)
2342                         next_device->refcount++;
2343                 mtx_unlock(&bus->eb_mtx);
2344                 xpt_release_device(device);
2345         }
2346         return(retval);
2347 }
2348
2349 static int
2350 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2351                   xpt_periphfunc_t *tr_func, void *arg)
2352 {
2353         struct cam_eb *bus;
2354         struct cam_periph *periph, *next_periph;
2355         int retval;
2356
2357         retval = 1;
2358
2359         bus = device->target->bus;
2360         if (start_periph)
2361                 periph = start_periph;
2362         else {
2363                 xpt_lock_buses();
2364                 mtx_lock(&bus->eb_mtx);
2365                 periph = SLIST_FIRST(&device->periphs);
2366                 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2367                         periph = SLIST_NEXT(periph, periph_links);
2368                 if (periph == NULL) {
2369                         mtx_unlock(&bus->eb_mtx);
2370                         xpt_unlock_buses();
2371                         return (retval);
2372                 }
2373                 periph->refcount++;
2374                 mtx_unlock(&bus->eb_mtx);
2375                 xpt_unlock_buses();
2376         }
2377         for (; periph != NULL; periph = next_periph) {
2378                 retval = tr_func(periph, arg);
2379                 if (retval == 0) {
2380                         cam_periph_release_locked(periph);
2381                         break;
2382                 }
2383                 xpt_lock_buses();
2384                 mtx_lock(&bus->eb_mtx);
2385                 next_periph = SLIST_NEXT(periph, periph_links);
2386                 while (next_periph != NULL &&
2387                     (next_periph->flags & CAM_PERIPH_FREE) != 0)
2388                         next_periph = SLIST_NEXT(next_periph, periph_links);
2389                 if (next_periph)
2390                         next_periph->refcount++;
2391                 mtx_unlock(&bus->eb_mtx);
2392                 xpt_unlock_buses();
2393                 cam_periph_release_locked(periph);
2394         }
2395         return(retval);
2396 }
2397
2398 static int
2399 xptpdrvtraverse(struct periph_driver **start_pdrv,
2400                 xpt_pdrvfunc_t *tr_func, void *arg)
2401 {
2402         struct periph_driver **pdrv;
2403         int retval;
2404
2405         retval = 1;
2406
2407         /*
2408          * We don't traverse the peripheral driver list like we do the
2409          * other lists, because it is a linker set, and therefore cannot be
2410          * changed during runtime.  If the peripheral driver list is ever
2411          * re-done to be something other than a linker set (i.e. it can
2412          * change while the system is running), the list traversal should
2413          * be modified to work like the other traversal functions.
2414          */
2415         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2416              *pdrv != NULL; pdrv++) {
2417                 retval = tr_func(pdrv, arg);
2418
2419                 if (retval == 0)
2420                         return(retval);
2421         }
2422
2423         return(retval);
2424 }
2425
2426 static int
2427 xptpdperiphtraverse(struct periph_driver **pdrv,
2428                     struct cam_periph *start_periph,
2429                     xpt_periphfunc_t *tr_func, void *arg)
2430 {
2431         struct cam_periph *periph, *next_periph;
2432         int retval;
2433
2434         retval = 1;
2435
2436         if (start_periph)
2437                 periph = start_periph;
2438         else {
2439                 xpt_lock_buses();
2440                 periph = TAILQ_FIRST(&(*pdrv)->units);
2441                 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2442                         periph = TAILQ_NEXT(periph, unit_links);
2443                 if (periph == NULL) {
2444                         xpt_unlock_buses();
2445                         return (retval);
2446                 }
2447                 periph->refcount++;
2448                 xpt_unlock_buses();
2449         }
2450         for (; periph != NULL; periph = next_periph) {
2451                 cam_periph_lock(periph);
2452                 retval = tr_func(periph, arg);
2453                 cam_periph_unlock(periph);
2454                 if (retval == 0) {
2455                         cam_periph_release(periph);
2456                         break;
2457                 }
2458                 xpt_lock_buses();
2459                 next_periph = TAILQ_NEXT(periph, unit_links);
2460                 while (next_periph != NULL &&
2461                     (next_periph->flags & CAM_PERIPH_FREE) != 0)
2462                         next_periph = TAILQ_NEXT(next_periph, unit_links);
2463                 if (next_periph)
2464                         next_periph->refcount++;
2465                 xpt_unlock_buses();
2466                 cam_periph_release(periph);
2467         }
2468         return(retval);
2469 }
2470
2471 static int
2472 xptdefbusfunc(struct cam_eb *bus, void *arg)
2473 {
2474         struct xpt_traverse_config *tr_config;
2475
2476         tr_config = (struct xpt_traverse_config *)arg;
2477
2478         if (tr_config->depth == XPT_DEPTH_BUS) {
2479                 xpt_busfunc_t *tr_func;
2480
2481                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2482
2483                 return(tr_func(bus, tr_config->tr_arg));
2484         } else
2485                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2486 }
2487
2488 static int
2489 xptdeftargetfunc(struct cam_et *target, void *arg)
2490 {
2491         struct xpt_traverse_config *tr_config;
2492
2493         tr_config = (struct xpt_traverse_config *)arg;
2494
2495         if (tr_config->depth == XPT_DEPTH_TARGET) {
2496                 xpt_targetfunc_t *tr_func;
2497
2498                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2499
2500                 return(tr_func(target, tr_config->tr_arg));
2501         } else
2502                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2503 }
2504
2505 static int
2506 xptdefdevicefunc(struct cam_ed *device, void *arg)
2507 {
2508         struct xpt_traverse_config *tr_config;
2509
2510         tr_config = (struct xpt_traverse_config *)arg;
2511
2512         if (tr_config->depth == XPT_DEPTH_DEVICE) {
2513                 xpt_devicefunc_t *tr_func;
2514
2515                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2516
2517                 return(tr_func(device, tr_config->tr_arg));
2518         } else
2519                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2520 }
2521
2522 static int
2523 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2524 {
2525         struct xpt_traverse_config *tr_config;
2526         xpt_periphfunc_t *tr_func;
2527
2528         tr_config = (struct xpt_traverse_config *)arg;
2529
2530         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2531
2532         /*
2533          * Unlike the other default functions, we don't check for depth
2534          * here.  The peripheral driver level is the last level in the EDT,
2535          * so if we're here, we should execute the function in question.
2536          */
2537         return(tr_func(periph, tr_config->tr_arg));
2538 }
2539
2540 /*
2541  * Execute the given function for every bus in the EDT.
2542  */
2543 static int
2544 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2545 {
2546         struct xpt_traverse_config tr_config;
2547
2548         tr_config.depth = XPT_DEPTH_BUS;
2549         tr_config.tr_func = tr_func;
2550         tr_config.tr_arg = arg;
2551
2552         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2553 }
2554
2555 /*
2556  * Execute the given function for every device in the EDT.
2557  */
2558 static int
2559 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2560 {
2561         struct xpt_traverse_config tr_config;
2562
2563         tr_config.depth = XPT_DEPTH_DEVICE;
2564         tr_config.tr_func = tr_func;
2565         tr_config.tr_arg = arg;
2566
2567         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2568 }
2569
2570 static int
2571 xptsetasyncfunc(struct cam_ed *device, void *arg)
2572 {
2573         struct cam_path path;
2574         struct ccb_getdev cgd;
2575         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2576
2577         /*
2578          * Don't report unconfigured devices (Wildcard devs,
2579          * devices only for target mode, device instances
2580          * that have been invalidated but are waiting for
2581          * their last reference count to be released).
2582          */
2583         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2584                 return (1);
2585
2586         xpt_compile_path(&path,
2587                          NULL,
2588                          device->target->bus->path_id,
2589                          device->target->target_id,
2590                          device->lun_id);
2591         xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
2592         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2593         xpt_action((union ccb *)&cgd);
2594         csa->callback(csa->callback_arg,
2595                             AC_FOUND_DEVICE,
2596                             &path, &cgd);
2597         xpt_release_path(&path);
2598
2599         return(1);
2600 }
2601
2602 static int
2603 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2604 {
2605         struct cam_path path;
2606         struct ccb_pathinq cpi;
2607         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2608
2609         xpt_compile_path(&path, /*periph*/NULL,
2610                          bus->path_id,
2611                          CAM_TARGET_WILDCARD,
2612                          CAM_LUN_WILDCARD);
2613         xpt_path_lock(&path);
2614         xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL);
2615         cpi.ccb_h.func_code = XPT_PATH_INQ;
2616         xpt_action((union ccb *)&cpi);
2617         csa->callback(csa->callback_arg,
2618                             AC_PATH_REGISTERED,
2619                             &path, &cpi);
2620         xpt_path_unlock(&path);
2621         xpt_release_path(&path);
2622
2623         return(1);
2624 }
2625
2626 void
2627 xpt_action(union ccb *start_ccb)
2628 {
2629
2630         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE,
2631             ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code,
2632                 xpt_action_name(start_ccb->ccb_h.func_code)));
2633
2634         start_ccb->ccb_h.status = CAM_REQ_INPROG;
2635         (*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb);
2636 }
2637
2638 void
2639 xpt_action_default(union ccb *start_ccb)
2640 {
2641         struct cam_path *path;
2642         struct cam_sim *sim;
2643         struct mtx *mtx;
2644
2645         path = start_ccb->ccb_h.path;
2646         CAM_DEBUG(path, CAM_DEBUG_TRACE,
2647             ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code,
2648                 xpt_action_name(start_ccb->ccb_h.func_code)));
2649
2650         switch (start_ccb->ccb_h.func_code) {
2651         case XPT_SCSI_IO:
2652         {
2653                 struct cam_ed *device;
2654
2655                 /*
2656                  * For the sake of compatibility with SCSI-1
2657                  * devices that may not understand the identify
2658                  * message, we include lun information in the
2659                  * second byte of all commands.  SCSI-1 specifies
2660                  * that luns are a 3 bit value and reserves only 3
2661                  * bits for lun information in the CDB.  Later
2662                  * revisions of the SCSI spec allow for more than 8
2663                  * luns, but have deprecated lun information in the
2664                  * CDB.  So, if the lun won't fit, we must omit.
2665                  *
2666                  * Also be aware that during initial probing for devices,
2667                  * the inquiry information is unknown but initialized to 0.
2668                  * This means that this code will be exercised while probing
2669                  * devices with an ANSI revision greater than 2.
2670                  */
2671                 device = path->device;
2672                 if (device->protocol_version <= SCSI_REV_2
2673                  && start_ccb->ccb_h.target_lun < 8
2674                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2675
2676                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
2677                             start_ccb->ccb_h.target_lun << 5;
2678                 }
2679                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2680         }
2681         /* FALLTHROUGH */
2682         case XPT_TARGET_IO:
2683         case XPT_CONT_TARGET_IO:
2684                 start_ccb->csio.sense_resid = 0;
2685                 start_ccb->csio.resid = 0;
2686                 /* FALLTHROUGH */
2687         case XPT_ATA_IO:
2688                 if (start_ccb->ccb_h.func_code == XPT_ATA_IO)
2689                         start_ccb->ataio.resid = 0;
2690                 /* FALLTHROUGH */
2691         case XPT_NVME_IO:
2692                 if (start_ccb->ccb_h.func_code == XPT_NVME_IO)
2693                         start_ccb->nvmeio.resid = 0;
2694                 /* FALLTHROUGH */
2695         case XPT_MMC_IO:
2696                 /* XXX just like nmve_io? */
2697         case XPT_RESET_DEV:
2698         case XPT_ENG_EXEC:
2699         case XPT_SMP_IO:
2700         {
2701                 struct cam_devq *devq;
2702
2703                 devq = path->bus->sim->devq;
2704                 mtx_lock(&devq->send_mtx);
2705                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2706                 if (xpt_schedule_devq(devq, path->device) != 0)
2707                         xpt_run_devq(devq);
2708                 mtx_unlock(&devq->send_mtx);
2709                 break;
2710         }
2711         case XPT_CALC_GEOMETRY:
2712                 /* Filter out garbage */
2713                 if (start_ccb->ccg.block_size == 0
2714                  || start_ccb->ccg.volume_size == 0) {
2715                         start_ccb->ccg.cylinders = 0;
2716                         start_ccb->ccg.heads = 0;
2717                         start_ccb->ccg.secs_per_track = 0;
2718                         start_ccb->ccb_h.status = CAM_REQ_CMP;
2719                         break;
2720                 }
2721 #if defined(__sparc64__)
2722                 /*
2723                  * For sparc64, we may need adjust the geometry of large
2724                  * disks in order to fit the limitations of the 16-bit
2725                  * fields of the VTOC8 disk label.
2726                  */
2727                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2728                         start_ccb->ccb_h.status = CAM_REQ_CMP;
2729                         break;
2730                 }
2731 #endif
2732                 goto call_sim;
2733         case XPT_ABORT:
2734         {
2735                 union ccb* abort_ccb;
2736
2737                 abort_ccb = start_ccb->cab.abort_ccb;
2738                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2739                         struct cam_ed *device;
2740                         struct cam_devq *devq;
2741
2742                         device = abort_ccb->ccb_h.path->device;
2743                         devq = device->sim->devq;
2744
2745                         mtx_lock(&devq->send_mtx);
2746                         if (abort_ccb->ccb_h.pinfo.index > 0) {
2747                                 cam_ccbq_remove_ccb(&device->ccbq, abort_ccb);
2748                                 abort_ccb->ccb_h.status =
2749                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2750                                 xpt_freeze_devq_device(device, 1);
2751                                 mtx_unlock(&devq->send_mtx);
2752                                 xpt_done(abort_ccb);
2753                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2754                                 break;
2755                         }
2756                         mtx_unlock(&devq->send_mtx);
2757
2758                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2759                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2760                                 /*
2761                                  * We've caught this ccb en route to
2762                                  * the SIM.  Flag it for abort and the
2763                                  * SIM will do so just before starting
2764                                  * real work on the CCB.
2765                                  */
2766                                 abort_ccb->ccb_h.status =
2767                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2768                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2769                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2770                                 break;
2771                         }
2772                 }
2773                 if (XPT_FC_IS_QUEUED(abort_ccb)
2774                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2775                         /*
2776                          * It's already completed but waiting
2777                          * for our SWI to get to it.
2778                          */
2779                         start_ccb->ccb_h.status = CAM_UA_ABORT;
2780                         break;
2781                 }
2782                 /*
2783                  * If we weren't able to take care of the abort request
2784                  * in the XPT, pass the request down to the SIM for processing.
2785                  */
2786         }
2787         /* FALLTHROUGH */
2788         case XPT_ACCEPT_TARGET_IO:
2789         case XPT_EN_LUN:
2790         case XPT_IMMED_NOTIFY:
2791         case XPT_NOTIFY_ACK:
2792         case XPT_RESET_BUS:
2793         case XPT_IMMEDIATE_NOTIFY:
2794         case XPT_NOTIFY_ACKNOWLEDGE:
2795         case XPT_GET_SIM_KNOB_OLD:
2796         case XPT_GET_SIM_KNOB:
2797         case XPT_SET_SIM_KNOB:
2798         case XPT_GET_TRAN_SETTINGS:
2799         case XPT_SET_TRAN_SETTINGS:
2800         case XPT_PATH_INQ:
2801 call_sim:
2802                 sim = path->bus->sim;
2803                 mtx = sim->mtx;
2804                 if (mtx && !mtx_owned(mtx))
2805                         mtx_lock(mtx);
2806                 else
2807                         mtx = NULL;
2808
2809                 CAM_DEBUG(path, CAM_DEBUG_TRACE,
2810                     ("Calling sim->sim_action(): func=%#x\n", start_ccb->ccb_h.func_code));
2811                 (*(sim->sim_action))(sim, start_ccb);
2812                 CAM_DEBUG(path, CAM_DEBUG_TRACE,
2813                     ("sim->sim_action returned: status=%#x\n", start_ccb->ccb_h.status));
2814                 if (mtx)
2815                         mtx_unlock(mtx);
2816                 break;
2817         case XPT_PATH_STATS:
2818                 start_ccb->cpis.last_reset = path->bus->last_reset;
2819                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2820                 break;
2821         case XPT_GDEV_TYPE:
2822         {
2823                 struct cam_ed *dev;
2824
2825                 dev = path->device;
2826                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2827                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2828                 } else {
2829                         struct ccb_getdev *cgd;
2830
2831                         cgd = &start_ccb->cgd;
2832                         cgd->protocol = dev->protocol;
2833                         cgd->inq_data = dev->inq_data;
2834                         cgd->ident_data = dev->ident_data;
2835                         cgd->inq_flags = dev->inq_flags;
2836                         cgd->nvme_data = dev->nvme_data;
2837                         cgd->nvme_cdata = dev->nvme_cdata;
2838                         cgd->ccb_h.status = CAM_REQ_CMP;
2839                         cgd->serial_num_len = dev->serial_num_len;
2840                         if ((dev->serial_num_len > 0)
2841                          && (dev->serial_num != NULL))
2842                                 bcopy(dev->serial_num, cgd->serial_num,
2843                                       dev->serial_num_len);
2844                 }
2845                 break;
2846         }
2847         case XPT_GDEV_STATS:
2848         {
2849                 struct ccb_getdevstats *cgds = &start_ccb->cgds;
2850                 struct cam_ed *dev = path->device;
2851                 struct cam_eb *bus = path->bus;
2852                 struct cam_et *tar = path->target;
2853                 struct cam_devq *devq = bus->sim->devq;
2854
2855                 mtx_lock(&devq->send_mtx);
2856                 cgds->dev_openings = dev->ccbq.dev_openings;
2857                 cgds->dev_active = dev->ccbq.dev_active;
2858                 cgds->allocated = dev->ccbq.allocated;
2859                 cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq);
2860                 cgds->held = cgds->allocated - cgds->dev_active - cgds->queued;
2861                 cgds->last_reset = tar->last_reset;
2862                 cgds->maxtags = dev->maxtags;
2863                 cgds->mintags = dev->mintags;
2864                 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2865                         cgds->last_reset = bus->last_reset;
2866                 mtx_unlock(&devq->send_mtx);
2867                 cgds->ccb_h.status = CAM_REQ_CMP;
2868                 break;
2869         }
2870         case XPT_GDEVLIST:
2871         {
2872                 struct cam_periph       *nperiph;
2873                 struct periph_list      *periph_head;
2874                 struct ccb_getdevlist   *cgdl;
2875                 u_int                   i;
2876                 struct cam_ed           *device;
2877                 int                     found;
2878
2879
2880                 found = 0;
2881
2882                 /*
2883                  * Don't want anyone mucking with our data.
2884                  */
2885                 device = path->device;
2886                 periph_head = &device->periphs;
2887                 cgdl = &start_ccb->cgdl;
2888
2889                 /*
2890                  * Check and see if the list has changed since the user
2891                  * last requested a list member.  If so, tell them that the
2892                  * list has changed, and therefore they need to start over
2893                  * from the beginning.
2894                  */
2895                 if ((cgdl->index != 0) &&
2896                     (cgdl->generation != device->generation)) {
2897                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2898                         break;
2899                 }
2900
2901                 /*
2902                  * Traverse the list of peripherals and attempt to find
2903                  * the requested peripheral.
2904                  */
2905                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
2906                      (nperiph != NULL) && (i <= cgdl->index);
2907                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
2908                         if (i == cgdl->index) {
2909                                 strncpy(cgdl->periph_name,
2910                                         nperiph->periph_name,
2911                                         DEV_IDLEN);
2912                                 cgdl->unit_number = nperiph->unit_number;
2913                                 found = 1;
2914                         }
2915                 }
2916                 if (found == 0) {
2917                         cgdl->status = CAM_GDEVLIST_ERROR;
2918                         break;
2919                 }
2920
2921                 if (nperiph == NULL)
2922                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
2923                 else
2924                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
2925
2926                 cgdl->index++;
2927                 cgdl->generation = device->generation;
2928
2929                 cgdl->ccb_h.status = CAM_REQ_CMP;
2930                 break;
2931         }
2932         case XPT_DEV_MATCH:
2933         {
2934                 dev_pos_type position_type;
2935                 struct ccb_dev_match *cdm;
2936
2937                 cdm = &start_ccb->cdm;
2938
2939                 /*
2940                  * There are two ways of getting at information in the EDT.
2941                  * The first way is via the primary EDT tree.  It starts
2942                  * with a list of buses, then a list of targets on a bus,
2943                  * then devices/luns on a target, and then peripherals on a
2944                  * device/lun.  The "other" way is by the peripheral driver
2945                  * lists.  The peripheral driver lists are organized by
2946                  * peripheral driver.  (obviously)  So it makes sense to
2947                  * use the peripheral driver list if the user is looking
2948                  * for something like "da1", or all "da" devices.  If the
2949                  * user is looking for something on a particular bus/target
2950                  * or lun, it's generally better to go through the EDT tree.
2951                  */
2952
2953                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
2954                         position_type = cdm->pos.position_type;
2955                 else {
2956                         u_int i;
2957
2958                         position_type = CAM_DEV_POS_NONE;
2959
2960                         for (i = 0; i < cdm->num_patterns; i++) {
2961                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
2962                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
2963                                         position_type = CAM_DEV_POS_EDT;
2964                                         break;
2965                                 }
2966                         }
2967
2968                         if (cdm->num_patterns == 0)
2969                                 position_type = CAM_DEV_POS_EDT;
2970                         else if (position_type == CAM_DEV_POS_NONE)
2971                                 position_type = CAM_DEV_POS_PDRV;
2972                 }
2973
2974                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
2975                 case CAM_DEV_POS_EDT:
2976                         xptedtmatch(cdm);
2977                         break;
2978                 case CAM_DEV_POS_PDRV:
2979                         xptperiphlistmatch(cdm);
2980                         break;
2981                 default:
2982                         cdm->status = CAM_DEV_MATCH_ERROR;
2983                         break;
2984                 }
2985
2986                 if (cdm->status == CAM_DEV_MATCH_ERROR)
2987                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2988                 else
2989                         start_ccb->ccb_h.status = CAM_REQ_CMP;
2990
2991                 break;
2992         }
2993         case XPT_SASYNC_CB:
2994         {
2995                 struct ccb_setasync *csa;
2996                 struct async_node *cur_entry;
2997                 struct async_list *async_head;
2998                 u_int32_t added;
2999
3000                 csa = &start_ccb->csa;
3001                 added = csa->event_enable;
3002                 async_head = &path->device->asyncs;
3003
3004                 /*
3005                  * If there is already an entry for us, simply
3006                  * update it.
3007                  */
3008                 cur_entry = SLIST_FIRST(async_head);
3009                 while (cur_entry != NULL) {
3010                         if ((cur_entry->callback_arg == csa->callback_arg)
3011                          && (cur_entry->callback == csa->callback))
3012                                 break;
3013                         cur_entry = SLIST_NEXT(cur_entry, links);
3014                 }
3015
3016                 if (cur_entry != NULL) {
3017                         /*
3018                          * If the request has no flags set,
3019                          * remove the entry.
3020                          */
3021                         added &= ~cur_entry->event_enable;
3022                         if (csa->event_enable == 0) {
3023                                 SLIST_REMOVE(async_head, cur_entry,
3024                                              async_node, links);
3025                                 xpt_release_device(path->device);
3026                                 free(cur_entry, M_CAMXPT);
3027                         } else {
3028                                 cur_entry->event_enable = csa->event_enable;
3029                         }
3030                         csa->event_enable = added;
3031                 } else {
3032                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
3033                                            M_NOWAIT);
3034                         if (cur_entry == NULL) {
3035                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3036                                 break;
3037                         }
3038                         cur_entry->event_enable = csa->event_enable;
3039                         cur_entry->event_lock = (path->bus->sim->mtx &&
3040                             mtx_owned(path->bus->sim->mtx)) ? 1 : 0;
3041                         cur_entry->callback_arg = csa->callback_arg;
3042                         cur_entry->callback = csa->callback;
3043                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
3044                         xpt_acquire_device(path->device);
3045                 }
3046                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3047                 break;
3048         }
3049         case XPT_REL_SIMQ:
3050         {
3051                 struct ccb_relsim *crs;
3052                 struct cam_ed *dev;
3053
3054                 crs = &start_ccb->crs;
3055                 dev = path->device;
3056                 if (dev == NULL) {
3057
3058                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
3059                         break;
3060                 }
3061
3062                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3063
3064                         /* Don't ever go below one opening */
3065                         if (crs->openings > 0) {
3066                                 xpt_dev_ccbq_resize(path, crs->openings);
3067                                 if (bootverbose) {
3068                                         xpt_print(path,
3069                                             "number of openings is now %d\n",
3070                                             crs->openings);
3071                                 }
3072                         }
3073                 }
3074
3075                 mtx_lock(&dev->sim->devq->send_mtx);
3076                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3077
3078                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3079
3080                                 /*
3081                                  * Just extend the old timeout and decrement
3082                                  * the freeze count so that a single timeout
3083                                  * is sufficient for releasing the queue.
3084                                  */
3085                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3086                                 callout_stop(&dev->callout);
3087                         } else {
3088
3089                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3090                         }
3091
3092                         callout_reset_sbt(&dev->callout,
3093                             SBT_1MS * crs->release_timeout, 0,
3094                             xpt_release_devq_timeout, dev, 0);
3095
3096                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3097
3098                 }
3099
3100                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3101
3102                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3103                                 /*
3104                                  * Decrement the freeze count so that a single
3105                                  * completion is still sufficient to unfreeze
3106                                  * the queue.
3107                                  */
3108                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3109                         } else {
3110
3111                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3112                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3113                         }
3114                 }
3115
3116                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3117
3118                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3119                          || (dev->ccbq.dev_active == 0)) {
3120
3121                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3122                         } else {
3123
3124                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3125                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3126                         }
3127                 }
3128                 mtx_unlock(&dev->sim->devq->send_mtx);
3129
3130                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0)
3131                         xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
3132                 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt;
3133                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3134                 break;
3135         }
3136         case XPT_DEBUG: {
3137                 struct cam_path *oldpath;
3138
3139                 /* Check that all request bits are supported. */
3140                 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
3141                         start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3142                         break;
3143                 }
3144
3145                 cam_dflags = CAM_DEBUG_NONE;
3146                 if (cam_dpath != NULL) {
3147                         oldpath = cam_dpath;
3148                         cam_dpath = NULL;
3149                         xpt_free_path(oldpath);
3150                 }
3151                 if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
3152                         if (xpt_create_path(&cam_dpath, NULL,
3153                                             start_ccb->ccb_h.path_id,
3154                                             start_ccb->ccb_h.target_id,
3155                                             start_ccb->ccb_h.target_lun) !=
3156                                             CAM_REQ_CMP) {
3157                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3158                         } else {
3159                                 cam_dflags = start_ccb->cdbg.flags;
3160                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3161                                 xpt_print(cam_dpath, "debugging flags now %x\n",
3162                                     cam_dflags);
3163                         }
3164                 } else
3165                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3166                 break;
3167         }
3168         case XPT_NOOP:
3169                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3170                         xpt_freeze_devq(path, 1);
3171                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3172                 break;
3173         case XPT_REPROBE_LUN:
3174                 xpt_async(AC_INQ_CHANGED, path, NULL);
3175                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3176                 xpt_done(start_ccb);
3177                 break;
3178         default:
3179         case XPT_SDEV_TYPE:
3180         case XPT_TERM_IO:
3181         case XPT_ENG_INQ:
3182                 /* XXX Implement */
3183                 xpt_print(start_ccb->ccb_h.path,
3184                     "%s: CCB type %#x %s not supported\n", __func__,
3185                     start_ccb->ccb_h.func_code,
3186                     xpt_action_name(start_ccb->ccb_h.func_code));
3187                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3188                 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
3189                         xpt_done(start_ccb);
3190                 }
3191                 break;
3192         }
3193         CAM_DEBUG(path, CAM_DEBUG_TRACE,
3194             ("xpt_action_default: func= %#x %s status %#x\n",
3195                 start_ccb->ccb_h.func_code,
3196                 xpt_action_name(start_ccb->ccb_h.func_code),
3197                 start_ccb->ccb_h.status));
3198 }
3199
3200 void
3201 xpt_polled_action(union ccb *start_ccb)
3202 {
3203         u_int32_t timeout;
3204         struct    cam_sim *sim;
3205         struct    cam_devq *devq;
3206         struct    cam_ed *dev;
3207         struct mtx *mtx;
3208
3209         timeout = start_ccb->ccb_h.timeout * 10;
3210         sim = start_ccb->ccb_h.path->bus->sim;
3211         devq = sim->devq;
3212         mtx = sim->mtx;
3213         dev = start_ccb->ccb_h.path->device;
3214
3215         mtx_unlock(&dev->device_mtx);
3216
3217         /*
3218          * Steal an opening so that no other queued requests
3219          * can get it before us while we simulate interrupts.
3220          */
3221         mtx_lock(&devq->send_mtx);
3222         dev->ccbq.dev_openings--;
3223         while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) &&
3224             (--timeout > 0)) {
3225                 mtx_unlock(&devq->send_mtx);
3226                 DELAY(100);
3227                 if (mtx)
3228                         mtx_lock(mtx);
3229                 (*(sim->sim_poll))(sim);
3230                 if (mtx)
3231                         mtx_unlock(mtx);
3232                 camisr_runqueue();
3233                 mtx_lock(&devq->send_mtx);
3234         }
3235         dev->ccbq.dev_openings++;
3236         mtx_unlock(&devq->send_mtx);
3237
3238         if (timeout != 0) {
3239                 xpt_action(start_ccb);
3240                 while(--timeout > 0) {
3241                         if (mtx)
3242                                 mtx_lock(mtx);
3243                         (*(sim->sim_poll))(sim);
3244                         if (mtx)
3245                                 mtx_unlock(mtx);
3246                         camisr_runqueue();
3247                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3248                             != CAM_REQ_INPROG)
3249                                 break;
3250                         DELAY(100);
3251                 }
3252                 if (timeout == 0) {
3253                         /*
3254                          * XXX Is it worth adding a sim_timeout entry
3255                          * point so we can attempt recovery?  If
3256                          * this is only used for dumps, I don't think
3257                          * it is.
3258                          */
3259                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3260                 }
3261         } else {
3262                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3263         }
3264
3265         mtx_lock(&dev->device_mtx);
3266 }
3267
3268 /*
3269  * Schedule a peripheral driver to receive a ccb when its
3270  * target device has space for more transactions.
3271  */
3272 void
3273 xpt_schedule(struct cam_periph *periph, u_int32_t new_priority)
3274 {
3275
3276         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3277         cam_periph_assert(periph, MA_OWNED);
3278         if (new_priority < periph->scheduled_priority) {
3279                 periph->scheduled_priority = new_priority;
3280                 xpt_run_allocq(periph, 0);
3281         }
3282 }
3283
3284
3285 /*
3286  * Schedule a device to run on a given queue.
3287  * If the device was inserted as a new entry on the queue,
3288  * return 1 meaning the device queue should be run. If we
3289  * were already queued, implying someone else has already
3290  * started the queue, return 0 so the caller doesn't attempt
3291  * to run the queue.
3292  */
3293 static int
3294 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3295                  u_int32_t new_priority)
3296 {
3297         int retval;
3298         u_int32_t old_priority;
3299
3300         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3301
3302         old_priority = pinfo->priority;
3303
3304         /*
3305          * Are we already queued?
3306          */
3307         if (pinfo->index != CAM_UNQUEUED_INDEX) {
3308                 /* Simply reorder based on new priority */
3309                 if (new_priority < old_priority) {
3310                         camq_change_priority(queue, pinfo->index,
3311                                              new_priority);
3312                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3313                                         ("changed priority to %d\n",
3314                                          new_priority));
3315                         retval = 1;
3316                 } else
3317                         retval = 0;
3318         } else {
3319                 /* New entry on the queue */
3320                 if (new_priority < old_priority)
3321                         pinfo->priority = new_priority;
3322
3323                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3324                                 ("Inserting onto queue\n"));
3325                 pinfo->generation = ++queue->generation;
3326                 camq_insert(queue, pinfo);
3327                 retval = 1;
3328         }
3329         return (retval);
3330 }
3331
3332 static void
3333 xpt_run_allocq_task(void *context, int pending)
3334 {
3335         struct cam_periph *periph = context;
3336
3337         cam_periph_lock(periph);
3338         periph->flags &= ~CAM_PERIPH_RUN_TASK;
3339         xpt_run_allocq(periph, 1);
3340         cam_periph_unlock(periph);
3341         cam_periph_release(periph);
3342 }
3343
3344 static void
3345 xpt_run_allocq(struct cam_periph *periph, int sleep)
3346 {
3347         struct cam_ed   *device;
3348         union ccb       *ccb;
3349         uint32_t         prio;
3350
3351         cam_periph_assert(periph, MA_OWNED);
3352         if (periph->periph_allocating)
3353                 return;
3354         periph->periph_allocating = 1;
3355         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph));
3356         device = periph->path->device;
3357         ccb = NULL;
3358 restart:
3359         while ((prio = min(periph->scheduled_priority,
3360             periph->immediate_priority)) != CAM_PRIORITY_NONE &&
3361             (periph->periph_allocated - (ccb != NULL ? 1 : 0) <
3362              device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) {
3363
3364                 if (ccb == NULL &&
3365                     (ccb = xpt_get_ccb_nowait(periph)) == NULL) {
3366                         if (sleep) {
3367                                 ccb = xpt_get_ccb(periph);
3368                                 goto restart;
3369                         }
3370                         if (periph->flags & CAM_PERIPH_RUN_TASK)
3371                                 break;
3372                         cam_periph_doacquire(periph);
3373                         periph->flags |= CAM_PERIPH_RUN_TASK;
3374                         taskqueue_enqueue(xsoftc.xpt_taskq,
3375                             &periph->periph_run_task);
3376                         break;
3377                 }
3378                 xpt_setup_ccb(&ccb->ccb_h, periph->path, prio);
3379                 if (prio == periph->immediate_priority) {
3380                         periph->immediate_priority = CAM_PRIORITY_NONE;
3381                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3382                                         ("waking cam_periph_getccb()\n"));
3383                         SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h,
3384                                           periph_links.sle);
3385                         wakeup(&periph->ccb_list);
3386                 } else {
3387                         periph->scheduled_priority = CAM_PRIORITY_NONE;
3388                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3389                                         ("calling periph_start()\n"));
3390                         periph->periph_start(periph, ccb);
3391                 }
3392                 ccb = NULL;
3393         }
3394         if (ccb != NULL)
3395                 xpt_release_ccb(ccb);
3396         periph->periph_allocating = 0;
3397 }
3398
3399 static void
3400 xpt_run_devq(struct cam_devq *devq)
3401 {
3402         struct mtx *mtx;
3403
3404         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n"));
3405
3406         devq->send_queue.qfrozen_cnt++;
3407         while ((devq->send_queue.entries > 0)
3408             && (devq->send_openings > 0)
3409             && (devq->send_queue.qfrozen_cnt <= 1)) {
3410                 struct  cam_ed *device;
3411                 union ccb *work_ccb;
3412                 struct  cam_sim *sim;
3413                 struct xpt_proto *proto;
3414
3415                 device = (struct cam_ed *)camq_remove(&devq->send_queue,
3416                                                            CAMQ_HEAD);
3417                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3418                                 ("running device %p\n", device));
3419
3420                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3421                 if (work_ccb == NULL) {
3422                         printf("device on run queue with no ccbs???\n");
3423                         continue;
3424                 }
3425
3426                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3427
3428                         mtx_lock(&xsoftc.xpt_highpower_lock);
3429                         if (xsoftc.num_highpower <= 0) {
3430                                 /*
3431                                  * We got a high power command, but we
3432                                  * don't have any available slots.  Freeze
3433                                  * the device queue until we have a slot
3434                                  * available.
3435                                  */
3436                                 xpt_freeze_devq_device(device, 1);
3437                                 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device,
3438                                                    highpowerq_entry);
3439
3440                                 mtx_unlock(&xsoftc.xpt_highpower_lock);
3441                                 continue;
3442                         } else {
3443                                 /*
3444                                  * Consume a high power slot while
3445                                  * this ccb runs.
3446                                  */
3447                                 xsoftc.num_highpower--;
3448                         }
3449                         mtx_unlock(&xsoftc.xpt_highpower_lock);
3450                 }
3451                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3452                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3453                 devq->send_openings--;
3454                 devq->send_active++;
3455                 xpt_schedule_devq(devq, device);
3456                 mtx_unlock(&devq->send_mtx);
3457
3458                 if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) {
3459                         /*
3460                          * The client wants to freeze the queue
3461                          * after this CCB is sent.
3462                          */
3463                         xpt_freeze_devq(work_ccb->ccb_h.path, 1);
3464                 }
3465
3466                 /* In Target mode, the peripheral driver knows best... */
3467                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3468                         if ((device->inq_flags & SID_CmdQue) != 0
3469                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3470                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3471                         else
3472                                 /*
3473                                  * Clear this in case of a retried CCB that
3474                                  * failed due to a rejected tag.
3475                                  */
3476                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3477                 }
3478
3479                 KASSERT(device == work_ccb->ccb_h.path->device,
3480                     ("device (%p) / path->device (%p) mismatch",
3481                         device, work_ccb->ccb_h.path->device));
3482                 proto = xpt_proto_find(device->protocol);
3483                 if (proto && proto->ops->debug_out)
3484                         proto->ops->debug_out(work_ccb);
3485
3486                 /*
3487                  * Device queues can be shared among multiple SIM instances
3488                  * that reside on different buses.  Use the SIM from the
3489                  * queued device, rather than the one from the calling bus.
3490                  */
3491                 sim = device->sim;
3492                 mtx = sim->mtx;
3493                 if (mtx && !mtx_owned(mtx))
3494                         mtx_lock(mtx);
3495                 else
3496                         mtx = NULL;
3497                 work_ccb->ccb_h.qos.sim_data = sbinuptime(); // xxx uintprt_t too small 32bit platforms
3498                 (*(sim->sim_action))(sim, work_ccb);
3499                 if (mtx)
3500                         mtx_unlock(mtx);
3501                 mtx_lock(&devq->send_mtx);
3502         }
3503         devq->send_queue.qfrozen_cnt--;
3504 }
3505
3506 /*
3507  * This function merges stuff from the slave ccb into the master ccb, while
3508  * keeping important fields in the master ccb constant.
3509  */
3510 void
3511 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3512 {
3513
3514         /*
3515          * Pull fields that are valid for peripheral drivers to set
3516          * into the master CCB along with the CCB "payload".
3517          */
3518         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3519         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3520         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3521         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3522         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3523               sizeof(union ccb) - sizeof(struct ccb_hdr));
3524 }
3525
3526 void
3527 xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path,
3528                     u_int32_t priority, u_int32_t flags)
3529 {
3530
3531         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3532         ccb_h->pinfo.priority = priority;
3533         ccb_h->path = path;
3534         ccb_h->path_id = path->bus->path_id;
3535         if (path->target)
3536                 ccb_h->target_id = path->target->target_id;
3537         else
3538                 ccb_h->target_id = CAM_TARGET_WILDCARD;
3539         if (path->device) {
3540                 ccb_h->target_lun = path->device->lun_id;
3541                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3542         } else {
3543                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3544         }
3545         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3546         ccb_h->flags = flags;
3547         ccb_h->xflags = 0;
3548 }
3549
3550 void
3551 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3552 {
3553         xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0);
3554 }
3555
3556 /* Path manipulation functions */
3557 cam_status
3558 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3559                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3560 {
3561         struct     cam_path *path;
3562         cam_status status;
3563
3564         path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3565
3566         if (path == NULL) {
3567                 status = CAM_RESRC_UNAVAIL;
3568                 return(status);
3569         }
3570         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3571         if (status != CAM_REQ_CMP) {
3572                 free(path, M_CAMPATH);
3573                 path = NULL;
3574         }
3575         *new_path_ptr = path;
3576         return (status);
3577 }
3578
3579 cam_status
3580 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3581                          struct cam_periph *periph, path_id_t path_id,
3582                          target_id_t target_id, lun_id_t lun_id)
3583 {
3584
3585         return (xpt_create_path(new_path_ptr, periph, path_id, target_id,
3586             lun_id));
3587 }
3588
3589 cam_status
3590 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3591                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3592 {
3593         struct       cam_eb *bus;
3594         struct       cam_et *target;
3595         struct       cam_ed *device;
3596         cam_status   status;
3597
3598         status = CAM_REQ_CMP;   /* Completed without error */
3599         target = NULL;          /* Wildcarded */
3600         device = NULL;          /* Wildcarded */
3601
3602         /*
3603          * We will potentially modify the EDT, so block interrupts
3604          * that may attempt to create cam paths.
3605          */
3606         bus = xpt_find_bus(path_id);
3607         if (bus == NULL) {
3608                 status = CAM_PATH_INVALID;
3609         } else {
3610                 xpt_lock_buses();
3611                 mtx_lock(&bus->eb_mtx);
3612                 target = xpt_find_target(bus, target_id);
3613                 if (target == NULL) {
3614                         /* Create one */
3615                         struct cam_et *new_target;
3616
3617                         new_target = xpt_alloc_target(bus, target_id);
3618                         if (new_target == NULL) {
3619                                 status = CAM_RESRC_UNAVAIL;
3620                         } else {
3621                                 target = new_target;
3622                         }
3623                 }
3624                 xpt_unlock_buses();
3625                 if (target != NULL) {
3626                         device = xpt_find_device(target, lun_id);
3627                         if (device == NULL) {
3628                                 /* Create one */
3629                                 struct cam_ed *new_device;
3630
3631                                 new_device =
3632                                     (*(bus->xport->ops->alloc_device))(bus,
3633                                                                        target,
3634                                                                        lun_id);
3635                                 if (new_device == NULL) {
3636                                         status = CAM_RESRC_UNAVAIL;
3637                                 } else {
3638                                         device = new_device;
3639                                 }
3640                         }
3641                 }
3642                 mtx_unlock(&bus->eb_mtx);
3643         }
3644
3645         /*
3646          * Only touch the user's data if we are successful.
3647          */
3648         if (status == CAM_REQ_CMP) {
3649                 new_path->periph = perph;
3650                 new_path->bus = bus;
3651                 new_path->target = target;
3652                 new_path->device = device;
3653                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3654         } else {
3655                 if (device != NULL)
3656                         xpt_release_device(device);
3657                 if (target != NULL)
3658                         xpt_release_target(target);
3659                 if (bus != NULL)
3660                         xpt_release_bus(bus);
3661         }
3662         return (status);
3663 }
3664
3665 cam_status
3666 xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path)
3667 {
3668         struct     cam_path *new_path;
3669
3670         new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3671         if (new_path == NULL)
3672                 return(CAM_RESRC_UNAVAIL);
3673         xpt_copy_path(new_path, path);
3674         *new_path_ptr = new_path;
3675         return (CAM_REQ_CMP);
3676 }
3677
3678 void
3679 xpt_copy_path(struct cam_path *new_path, struct cam_path *path)
3680 {
3681
3682         *new_path = *path;
3683         if (path->bus != NULL)
3684                 xpt_acquire_bus(path->bus);
3685         if (path->target != NULL)
3686                 xpt_acquire_target(path->target);
3687         if (path->device != NULL)
3688                 xpt_acquire_device(path->device);
3689 }
3690
3691 void
3692 xpt_release_path(struct cam_path *path)
3693 {
3694         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3695         if (path->device != NULL) {
3696                 xpt_release_device(path->device);
3697                 path->device = NULL;
3698         }
3699         if (path->target != NULL) {
3700                 xpt_release_target(path->target);
3701                 path->target = NULL;
3702         }
3703         if (path->bus != NULL) {
3704                 xpt_release_bus(path->bus);
3705                 path->bus = NULL;
3706         }
3707 }
3708
3709 void
3710 xpt_free_path(struct cam_path *path)
3711 {
3712
3713         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3714         xpt_release_path(path);
3715         free(path, M_CAMPATH);
3716 }
3717
3718 void
3719 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
3720     uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
3721 {
3722
3723         xpt_lock_buses();
3724         if (bus_ref) {
3725                 if (path->bus)
3726                         *bus_ref = path->bus->refcount;
3727                 else
3728                         *bus_ref = 0;
3729         }
3730         if (periph_ref) {
3731                 if (path->periph)
3732                         *periph_ref = path->periph->refcount;
3733                 else
3734                         *periph_ref = 0;
3735         }
3736         xpt_unlock_buses();
3737         if (target_ref) {
3738                 if (path->target)
3739                         *target_ref = path->target->refcount;
3740                 else
3741                         *target_ref = 0;
3742         }
3743         if (device_ref) {
3744                 if (path->device)
3745                         *device_ref = path->device->refcount;
3746                 else
3747                         *device_ref = 0;
3748         }
3749 }
3750
3751 /*
3752  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3753  * in path1, 2 for match with wildcards in path2.
3754  */
3755 int
3756 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3757 {
3758         int retval = 0;
3759
3760         if (path1->bus != path2->bus) {
3761                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
3762                         retval = 1;
3763                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3764                         retval = 2;
3765                 else
3766                         return (-1);
3767         }
3768         if (path1->target != path2->target) {
3769                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3770                         if (retval == 0)
3771                                 retval = 1;
3772                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3773                         retval = 2;
3774                 else
3775                         return (-1);
3776         }
3777         if (path1->device != path2->device) {
3778                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3779                         if (retval == 0)
3780                                 retval = 1;
3781                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3782                         retval = 2;
3783                 else
3784                         return (-1);
3785         }
3786         return (retval);
3787 }
3788
3789 int
3790 xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev)
3791 {
3792         int retval = 0;
3793
3794         if (path->bus != dev->target->bus) {
3795                 if (path->bus->path_id == CAM_BUS_WILDCARD)
3796                         retval = 1;
3797                 else if (dev->target->bus->path_id == CAM_BUS_WILDCARD)
3798                         retval = 2;
3799                 else
3800                         return (-1);
3801         }
3802         if (path->target != dev->target) {
3803                 if (path->target->target_id == CAM_TARGET_WILDCARD) {
3804                         if (retval == 0)
3805                                 retval = 1;
3806                 } else if (dev->target->target_id == CAM_TARGET_WILDCARD)
3807                         retval = 2;
3808                 else
3809                         return (-1);
3810         }
3811         if (path->device != dev) {
3812                 if (path->device->lun_id == CAM_LUN_WILDCARD) {
3813                         if (retval == 0)
3814                                 retval = 1;
3815                 } else if (dev->lun_id == CAM_LUN_WILDCARD)
3816                         retval = 2;
3817                 else
3818                         return (-1);
3819         }
3820         return (retval);
3821 }
3822
3823 void
3824 xpt_print_path(struct cam_path *path)
3825 {
3826         struct sbuf sb;
3827         char buffer[XPT_PRINT_LEN];
3828
3829         sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN);
3830         xpt_path_sbuf(path, &sb);
3831         sbuf_finish(&sb);
3832         printf("%s", sbuf_data(&sb));
3833         sbuf_delete(&sb);
3834 }
3835
3836 void
3837 xpt_print_device(struct cam_ed *device)
3838 {
3839
3840         if (device == NULL)
3841                 printf("(nopath): ");
3842         else {
3843                 printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name,
3844                        device->sim->unit_number,
3845                        device->sim->bus_id,
3846                        device->target->target_id,
3847                        (uintmax_t)device->lun_id);
3848         }
3849 }
3850
3851 void
3852 xpt_print(struct cam_path *path, const char *fmt, ...)
3853 {
3854         va_list ap;
3855         struct sbuf sb;
3856         char buffer[XPT_PRINT_LEN];
3857
3858         sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN);
3859
3860         xpt_path_sbuf(path, &sb);
3861         va_start(ap, fmt);
3862         sbuf_vprintf(&sb, fmt, ap);
3863         va_end(ap);
3864
3865         sbuf_finish(&sb);
3866         printf("%s", sbuf_data(&sb));
3867         sbuf_delete(&sb);
3868 }
3869
3870 int
3871 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
3872 {
3873         struct sbuf sb;
3874         int len;
3875
3876         sbuf_new(&sb, str, str_len, 0);
3877         len = xpt_path_sbuf(path, &sb);
3878         sbuf_finish(&sb);
3879         return (len);
3880 }
3881
3882 int
3883 xpt_path_sbuf(struct cam_path *path, struct sbuf *sb)
3884 {
3885
3886         if (path == NULL)
3887                 sbuf_printf(sb, "(nopath): ");
3888         else {
3889                 if (path->periph != NULL)
3890                         sbuf_printf(sb, "(%s%d:", path->periph->periph_name,
3891                                     path->periph->unit_number);
3892                 else
3893                         sbuf_printf(sb, "(noperiph:");
3894
3895                 if (path->bus != NULL)
3896                         sbuf_printf(sb, "%s%d:%d:", path->bus->sim->sim_name,
3897                                     path->bus->sim->unit_number,
3898                                     path->bus->sim->bus_id);
3899                 else
3900                         sbuf_printf(sb, "nobus:");
3901
3902                 if (path->target != NULL)
3903                         sbuf_printf(sb, "%d:", path->target->target_id);
3904                 else
3905                         sbuf_printf(sb, "X:");
3906
3907                 if (path->device != NULL)
3908                         sbuf_printf(sb, "%jx): ",
3909                             (uintmax_t)path->device->lun_id);
3910                 else
3911                         sbuf_printf(sb, "X): ");
3912         }
3913
3914         return(sbuf_len(sb));
3915 }
3916
3917 path_id_t
3918 xpt_path_path_id(struct cam_path *path)
3919 {
3920         return(path->bus->path_id);
3921 }
3922
3923 target_id_t
3924 xpt_path_target_id(struct cam_path *path)
3925 {
3926         if (path->target != NULL)
3927                 return (path->target->target_id);
3928         else
3929                 return (CAM_TARGET_WILDCARD);
3930 }
3931
3932 lun_id_t
3933 xpt_path_lun_id(struct cam_path *path)
3934 {
3935         if (path->device != NULL)
3936                 return (path->device->lun_id);
3937         else
3938                 return (CAM_LUN_WILDCARD);
3939 }
3940
3941 struct cam_sim *
3942 xpt_path_sim(struct cam_path *path)
3943 {
3944
3945         return (path->bus->sim);
3946 }
3947
3948 struct cam_periph*
3949 xpt_path_periph(struct cam_path *path)
3950 {
3951
3952         return (path->periph);
3953 }
3954
3955 /*
3956  * Release a CAM control block for the caller.  Remit the cost of the structure
3957  * to the device referenced by the path.  If the this device had no 'credits'
3958  * and peripheral drivers have registered async callbacks for this notification
3959  * call them now.
3960  */
3961 void
3962 xpt_release_ccb(union ccb *free_ccb)
3963 {
3964         struct   cam_ed *device;
3965         struct   cam_periph *periph;
3966
3967         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3968         xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED);
3969         device = free_ccb->ccb_h.path->device;
3970         periph = free_ccb->ccb_h.path->periph;
3971
3972         xpt_free_ccb(free_ccb);
3973         periph->periph_allocated--;
3974         cam_ccbq_release_opening(&device->ccbq);
3975         xpt_run_allocq(periph, 0);
3976 }
3977
3978 /* Functions accessed by SIM drivers */
3979
3980 static struct xpt_xport_ops xport_default_ops = {
3981         .alloc_device = xpt_alloc_device_default,
3982         .action = xpt_action_default,
3983         .async = xpt_dev_async_default,
3984 };
3985 static struct xpt_xport xport_default = {
3986         .xport = XPORT_UNKNOWN,
3987         .name = "unknown",
3988         .ops = &xport_default_ops,
3989 };
3990
3991 CAM_XPT_XPORT(xport_default);
3992
3993 /*
3994  * A sim structure, listing the SIM entry points and instance
3995  * identification info is passed to xpt_bus_register to hook the SIM
3996  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
3997  * for this new bus and places it in the array of buses and assigns
3998  * it a path_id.  The path_id may be influenced by "hard wiring"
3999  * information specified by the user.  Once interrupt services are
4000  * available, the bus will be probed.
4001  */
4002 int32_t
4003 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
4004 {
4005         struct cam_eb *new_bus;
4006         struct cam_eb *old_bus;
4007         struct ccb_pathinq cpi;
4008         struct cam_path *path;
4009         cam_status status;
4010
4011         sim->bus_id = bus;
4012         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
4013                                           M_CAMXPT, M_NOWAIT|M_ZERO);
4014         if (new_bus == NULL) {
4015                 /* Couldn't satisfy request */
4016                 return (CAM_RESRC_UNAVAIL);
4017         }
4018
4019         mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF);
4020         TAILQ_INIT(&new_bus->et_entries);
4021         cam_sim_hold(sim);
4022         new_bus->sim = sim;
4023         timevalclear(&new_bus->last_reset);
4024         new_bus->flags = 0;
4025         new_bus->refcount = 1;  /* Held until a bus_deregister event */
4026         new_bus->generation = 0;
4027
4028         xpt_lock_buses();
4029         sim->path_id = new_bus->path_id =
4030             xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4031         old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4032         while (old_bus != NULL
4033             && old_bus->path_id < new_bus->path_id)
4034                 old_bus = TAILQ_NEXT(old_bus, links);
4035         if (old_bus != NULL)
4036                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4037         else
4038                 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
4039         xsoftc.bus_generation++;
4040         xpt_unlock_buses();
4041
4042         /*
4043          * Set a default transport so that a PATH_INQ can be issued to
4044          * the SIM.  This will then allow for probing and attaching of
4045          * a more appropriate transport.
4046          */
4047         new_bus->xport = &xport_default;
4048
4049         status = xpt_create_path(&path, /*periph*/NULL, sim->path_id,
4050                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4051         if (status != CAM_REQ_CMP) {
4052                 xpt_release_bus(new_bus);
4053                 free(path, M_CAMXPT);
4054                 return (CAM_RESRC_UNAVAIL);
4055         }
4056
4057         xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
4058         cpi.ccb_h.func_code = XPT_PATH_INQ;
4059         xpt_action((union ccb *)&cpi);
4060
4061         if (cpi.ccb_h.status == CAM_REQ_CMP) {
4062                 struct xpt_xport **xpt;
4063
4064                 SET_FOREACH(xpt, cam_xpt_xport_set) {
4065                         if ((*xpt)->xport == cpi.transport) {
4066                                 new_bus->xport = *xpt;
4067                                 break;
4068                         }
4069                 }
4070                 if (new_bus->xport == NULL) {
4071                         xpt_print(path,
4072                             "No transport found for %d\n", cpi.transport);
4073                         xpt_release_bus(new_bus);
4074                         free(path, M_CAMXPT);
4075                         return (CAM_RESRC_UNAVAIL);
4076                 }
4077         }
4078
4079         /* Notify interested parties */
4080         if (sim->path_id != CAM_XPT_PATH_ID) {
4081
4082                 xpt_async(AC_PATH_REGISTERED, path, &cpi);
4083                 if ((cpi.hba_misc & PIM_NOSCAN) == 0) {
4084                         union   ccb *scan_ccb;
4085
4086                         /* Initiate bus rescan. */
4087                         scan_ccb = xpt_alloc_ccb_nowait();
4088                         if (scan_ccb != NULL) {
4089                                 scan_ccb->ccb_h.path = path;
4090                                 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
4091                                 scan_ccb->crcn.flags = 0;
4092                                 xpt_rescan(scan_ccb);
4093                         } else {
4094                                 xpt_print(path,
4095                                           "Can't allocate CCB to scan bus\n");
4096                                 xpt_free_path(path);
4097                         }
4098                 } else
4099                         xpt_free_path(path);
4100         } else
4101                 xpt_free_path(path);
4102         return (CAM_SUCCESS);
4103 }
4104
4105 int32_t
4106 xpt_bus_deregister(path_id_t pathid)
4107 {
4108         struct cam_path bus_path;
4109         cam_status status;
4110
4111         status = xpt_compile_path(&bus_path, NULL, pathid,
4112                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4113         if (status != CAM_REQ_CMP)
4114                 return (status);
4115
4116         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4117         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4118
4119         /* Release the reference count held while registered. */
4120         xpt_release_bus(bus_path.bus);
4121         xpt_release_path(&bus_path);
4122
4123         return (CAM_REQ_CMP);
4124 }
4125
4126 static path_id_t
4127 xptnextfreepathid(void)
4128 {
4129         struct cam_eb *bus;
4130         path_id_t pathid;
4131         const char *strval;
4132
4133         mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4134         pathid = 0;
4135         bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4136 retry:
4137         /* Find an unoccupied pathid */
4138         while (bus != NULL && bus->path_id <= pathid) {
4139                 if (bus->path_id == pathid)
4140                         pathid++;
4141                 bus = TAILQ_NEXT(bus, links);
4142         }
4143
4144         /*
4145          * Ensure that this pathid is not reserved for
4146          * a bus that may be registered in the future.
4147          */
4148         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4149                 ++pathid;
4150                 /* Start the search over */
4151                 goto retry;
4152         }
4153         return (pathid);
4154 }
4155
4156 static path_id_t
4157 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4158 {
4159         path_id_t pathid;
4160         int i, dunit, val;
4161         char buf[32];
4162         const char *dname;
4163
4164         pathid = CAM_XPT_PATH_ID;
4165         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4166         if (strcmp(buf, "xpt0") == 0 && sim_bus == 0)
4167                 return (pathid);
4168         i = 0;
4169         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4170                 if (strcmp(dname, "scbus")) {
4171                         /* Avoid a bit of foot shooting. */
4172                         continue;
4173                 }
4174                 if (dunit < 0)          /* unwired?! */
4175                         continue;
4176                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4177                         if (sim_bus == val) {
4178                                 pathid = dunit;
4179                                 break;
4180                         }
4181                 } else if (sim_bus == 0) {
4182                         /* Unspecified matches bus 0 */
4183                         pathid = dunit;
4184                         break;
4185                 } else {
4186                         printf("Ambiguous scbus configuration for %s%d "
4187                                "bus %d, cannot wire down.  The kernel "
4188                                "config entry for scbus%d should "
4189                                "specify a controller bus.\n"
4190                                "Scbus will be assigned dynamically.\n",
4191                                sim_name, sim_unit, sim_bus, dunit);
4192                         break;
4193                 }
4194         }
4195
4196         if (pathid == CAM_XPT_PATH_ID)
4197                 pathid = xptnextfreepathid();
4198         return (pathid);
4199 }
4200
4201 static const char *
4202 xpt_async_string(u_int32_t async_code)
4203 {
4204
4205         switch (async_code) {
4206         case AC_BUS_RESET: return ("AC_BUS_RESET");
4207         case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
4208         case AC_SCSI_AEN: return ("AC_SCSI_AEN");
4209         case AC_SENT_BDR: return ("AC_SENT_BDR");
4210         case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
4211         case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
4212         case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
4213         case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
4214         case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
4215         case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
4216         case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
4217         case AC_CONTRACT: return ("AC_CONTRACT");
4218         case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED");
4219         case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION");
4220         }
4221         return ("AC_UNKNOWN");
4222 }
4223
4224 static int
4225 xpt_async_size(u_int32_t async_code)
4226 {
4227
4228         switch (async_code) {
4229         case AC_BUS_RESET: return (0);
4230         case AC_UNSOL_RESEL: return (0);
4231         case AC_SCSI_AEN: return (0);
4232         case AC_SENT_BDR: return (0);
4233         case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq));
4234         case AC_PATH_DEREGISTERED: return (0);
4235         case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev));
4236         case AC_LOST_DEVICE: return (0);
4237         case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings));
4238         case AC_INQ_CHANGED: return (0);
4239         case AC_GETDEV_CHANGED: return (0);
4240         case AC_CONTRACT: return (sizeof(struct ac_contract));
4241         case AC_ADVINFO_CHANGED: return (-1);
4242         case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio));
4243         }
4244         return (0);
4245 }
4246
4247 static int
4248 xpt_async_process_dev(struct cam_ed *device, void *arg)
4249 {
4250         union ccb *ccb = arg;
4251         struct cam_path *path = ccb->ccb_h.path;
4252         void *async_arg = ccb->casync.async_arg_ptr;
4253         u_int32_t async_code = ccb->casync.async_code;
4254         int relock;
4255
4256         if (path->device != device
4257          && path->device->lun_id != CAM_LUN_WILDCARD
4258          && device->lun_id != CAM_LUN_WILDCARD)
4259                 return (1);
4260
4261         /*
4262          * The async callback could free the device.
4263          * If it is a broadcast async, it doesn't hold
4264          * device reference, so take our own reference.
4265          */
4266         xpt_acquire_device(device);
4267
4268         /*
4269          * If async for specific device is to be delivered to
4270          * the wildcard client, take the specific device lock.
4271          * XXX: We may need a way for client to specify it.
4272          */
4273         if ((device->lun_id == CAM_LUN_WILDCARD &&
4274              path->device->lun_id != CAM_LUN_WILDCARD) ||
4275             (device->target->target_id == CAM_TARGET_WILDCARD &&
4276              path->target->target_id != CAM_TARGET_WILDCARD) ||
4277             (device->target->bus->path_id == CAM_BUS_WILDCARD &&
4278              path->target->bus->path_id != CAM_BUS_WILDCARD)) {
4279                 mtx_unlock(&device->device_mtx);
4280                 xpt_path_lock(path);
4281                 relock = 1;
4282         } else
4283                 relock = 0;
4284
4285         (*(device->target->bus->xport->ops->async))(async_code,
4286             device->target->bus, device->target, device, async_arg);
4287         xpt_async_bcast(&device->asyncs, async_code, path, async_arg);
4288
4289         if (relock) {
4290                 xpt_path_unlock(path);
4291                 mtx_lock(&device->device_mtx);
4292         }
4293         xpt_release_device(device);
4294         return (1);
4295 }
4296
4297 static int
4298 xpt_async_process_tgt(struct cam_et *target, void *arg)
4299 {
4300         union ccb *ccb = arg;
4301         struct cam_path *path = ccb->ccb_h.path;
4302
4303         if (path->target != target
4304          && path->target->target_id != CAM_TARGET_WILDCARD
4305          && target->target_id != CAM_TARGET_WILDCARD)
4306                 return (1);
4307
4308         if (ccb->casync.async_code == AC_SENT_BDR) {
4309                 /* Update our notion of when the last reset occurred */
4310                 microtime(&target->last_reset);
4311         }
4312
4313         return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb));
4314 }
4315
4316 static void
4317 xpt_async_process(struct cam_periph *periph, union ccb *ccb)
4318 {
4319         struct cam_eb *bus;
4320         struct cam_path *path;
4321         void *async_arg;
4322         u_int32_t async_code;
4323
4324         path = ccb->ccb_h.path;
4325         async_code = ccb->casync.async_code;
4326         async_arg = ccb->casync.async_arg_ptr;
4327         CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
4328             ("xpt_async(%s)\n", xpt_async_string(async_code)));
4329         bus = path->bus;
4330
4331         if (async_code == AC_BUS_RESET) {
4332                 /* Update our notion of when the last reset occurred */
4333                 microtime(&bus->last_reset);
4334         }
4335
4336         xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb);
4337
4338         /*
4339          * If this wasn't a fully wildcarded async, tell all
4340          * clients that want all async events.
4341          */
4342         if (bus != xpt_periph->path->bus) {
4343                 xpt_path_lock(xpt_periph->path);
4344                 xpt_async_process_dev(xpt_periph->path->device, ccb);
4345                 xpt_path_unlock(xpt_periph->path);
4346         }
4347
4348         if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4349                 xpt_release_devq(path, 1, TRUE);
4350         else
4351                 xpt_release_simq(path->bus->sim, TRUE);
4352         if (ccb->casync.async_arg_size > 0)
4353                 free(async_arg, M_CAMXPT);
4354         xpt_free_path(path);
4355         xpt_free_ccb(ccb);
4356 }
4357
4358 static void
4359 xpt_async_bcast(struct async_list *async_head,
4360                 u_int32_t async_code,
4361                 struct cam_path *path, void *async_arg)
4362 {
4363         struct async_node *cur_entry;
4364         struct mtx *mtx;
4365
4366         cur_entry = SLIST_FIRST(async_head);
4367         while (cur_entry != NULL) {
4368                 struct async_node *next_entry;
4369                 /*
4370                  * Grab the next list entry before we call the current
4371                  * entry's callback.  This is because the callback function
4372                  * can delete its async callback entry.
4373                  */
4374                 next_entry = SLIST_NEXT(cur_entry, links);
4375                 if ((cur_entry->event_enable & async_code) != 0) {
4376                         mtx = cur_entry->event_lock ?
4377                             path->device->sim->mtx : NULL;
4378                         if (mtx)
4379                                 mtx_lock(mtx);
4380                         cur_entry->callback(cur_entry->callback_arg,
4381                                             async_code, path,
4382                                             async_arg);
4383                         if (mtx)
4384                                 mtx_unlock(mtx);
4385                 }
4386                 cur_entry = next_entry;
4387         }
4388 }
4389
4390 void
4391 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4392 {
4393         union ccb *ccb;
4394         int size;
4395
4396         ccb = xpt_alloc_ccb_nowait();
4397         if (ccb == NULL) {
4398                 xpt_print(path, "Can't allocate CCB to send %s\n",
4399                     xpt_async_string(async_code));
4400                 return;
4401         }
4402
4403         if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) {
4404                 xpt_print(path, "Can't allocate path to send %s\n",
4405                     xpt_async_string(async_code));
4406                 xpt_free_ccb(ccb);
4407                 return;
4408         }
4409         ccb->ccb_h.path->periph = NULL;
4410         ccb->ccb_h.func_code = XPT_ASYNC;
4411         ccb->ccb_h.cbfcnp = xpt_async_process;
4412         ccb->ccb_h.flags |= CAM_UNLOCKED;
4413         ccb->casync.async_code = async_code;
4414         ccb->casync.async_arg_size = 0;
4415         size = xpt_async_size(async_code);
4416         CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
4417             ("xpt_async: func %#x %s aync_code %d %s\n",
4418                 ccb->ccb_h.func_code,
4419                 xpt_action_name(ccb->ccb_h.func_code),
4420                 async_code,
4421                 xpt_async_string(async_code)));
4422         if (size > 0 && async_arg != NULL) {
4423                 ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT);
4424                 if (ccb->casync.async_arg_ptr == NULL) {
4425                         xpt_print(path, "Can't allocate argument to send %s\n",
4426                             xpt_async_string(async_code));
4427                         xpt_free_path(ccb->ccb_h.path);
4428                         xpt_free_ccb(ccb);
4429                         return;
4430                 }
4431                 memcpy(ccb->casync.async_arg_ptr, async_arg, size);
4432                 ccb->casync.async_arg_size = size;
4433         } else if (size < 0) {
4434                 ccb->casync.async_arg_ptr = async_arg;
4435                 ccb->casync.async_arg_size = size;
4436         }
4437         if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4438                 xpt_freeze_devq(path, 1);
4439         else
4440                 xpt_freeze_simq(path->bus->sim, 1);
4441         xpt_done(ccb);
4442 }
4443
4444 static void
4445 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
4446                       struct cam_et *target, struct cam_ed *device,
4447                       void *async_arg)
4448 {
4449
4450         /*
4451          * We only need to handle events for real devices.
4452          */
4453         if (target->target_id == CAM_TARGET_WILDCARD
4454          || device->lun_id == CAM_LUN_WILDCARD)
4455                 return;
4456
4457         printf("%s called\n", __func__);
4458 }
4459
4460 static uint32_t
4461 xpt_freeze_devq_device(struct cam_ed *dev, u_int count)
4462 {
4463         struct cam_devq *devq;
4464         uint32_t freeze;
4465
4466         devq = dev->sim->devq;
4467         mtx_assert(&devq->send_mtx, MA_OWNED);
4468         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4469             ("xpt_freeze_devq_device(%d) %u->%u\n", count,
4470             dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count));
4471         freeze = (dev->ccbq.queue.qfrozen_cnt += count);
4472         /* Remove frozen device from sendq. */
4473         if (device_is_queued(dev))
4474                 camq_remove(&devq->send_queue, dev->devq_entry.index);
4475         return (freeze);
4476 }
4477
4478 u_int32_t
4479 xpt_freeze_devq(struct cam_path *path, u_int count)
4480 {
4481         struct cam_ed   *dev = path->device;
4482         struct cam_devq *devq;
4483         uint32_t         freeze;
4484
4485         devq = dev->sim->devq;
4486         mtx_lock(&devq->send_mtx);
4487         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count));
4488         freeze = xpt_freeze_devq_device(dev, count);
4489         mtx_unlock(&devq->send_mtx);
4490         return (freeze);
4491 }
4492
4493 u_int32_t
4494 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4495 {
4496         struct cam_devq *devq;
4497         uint32_t         freeze;
4498
4499         devq = sim->devq;
4500         mtx_lock(&devq->send_mtx);
4501         freeze = (devq->send_queue.qfrozen_cnt += count);
4502         mtx_unlock(&devq->send_mtx);
4503         return (freeze);
4504 }
4505
4506 static void
4507 xpt_release_devq_timeout(void *arg)
4508 {
4509         struct cam_ed *dev;
4510         struct cam_devq *devq;
4511
4512         dev = (struct cam_ed *)arg;
4513         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n"));
4514         devq = dev->sim->devq;
4515         mtx_assert(&devq->send_mtx, MA_OWNED);
4516         if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE))
4517                 xpt_run_devq(devq);
4518 }
4519
4520 void
4521 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4522 {
4523         struct cam_ed *dev;
4524         struct cam_devq *devq;
4525
4526         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n",
4527             count, run_queue));
4528         dev = path->device;
4529         devq = dev->sim->devq;
4530         mtx_lock(&devq->send_mtx);
4531         if (xpt_release_devq_device(dev, count, run_queue))
4532                 xpt_run_devq(dev->sim->devq);
4533         mtx_unlock(&devq->send_mtx);
4534 }
4535
4536 static int
4537 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4538 {
4539
4540         mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED);
4541         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4542             ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue,
4543             dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count));
4544         if (count > dev->ccbq.queue.qfrozen_cnt) {
4545 #ifdef INVARIANTS
4546                 printf("xpt_release_devq(): requested %u > present %u\n",
4547                     count, dev->ccbq.queue.qfrozen_cnt);
4548 #endif
4549                 count = dev->ccbq.queue.qfrozen_cnt;
4550         }
4551         dev->ccbq.queue.qfrozen_cnt -= count;
4552         if (dev->ccbq.queue.qfrozen_cnt == 0) {
4553                 /*
4554                  * No longer need to wait for a successful
4555                  * command completion.
4556                  */
4557                 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4558                 /*
4559                  * Remove any timeouts that might be scheduled
4560                  * to release this queue.
4561                  */
4562                 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4563                         callout_stop(&dev->callout);
4564                         dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4565                 }
4566                 /*
4567                  * Now that we are unfrozen schedule the
4568                  * device so any pending transactions are
4569                  * run.
4570                  */
4571                 xpt_schedule_devq(dev->sim->devq, dev);
4572         } else
4573                 run_queue = 0;
4574         return (run_queue);
4575 }
4576
4577 void
4578 xpt_release_simq(struct cam_sim *sim, int run_queue)
4579 {
4580         struct cam_devq *devq;
4581
4582         devq = sim->devq;
4583         mtx_lock(&devq->send_mtx);
4584         if (devq->send_queue.qfrozen_cnt <= 0) {
4585 #ifdef INVARIANTS
4586                 printf("xpt_release_simq: requested 1 > present %u\n",
4587                     devq->send_queue.qfrozen_cnt);
4588 #endif
4589         } else
4590                 devq->send_queue.qfrozen_cnt--;
4591         if (devq->send_queue.qfrozen_cnt == 0) {
4592                 /*
4593                  * If there is a timeout scheduled to release this
4594                  * sim queue, remove it.  The queue frozen count is
4595                  * already at 0.
4596                  */
4597                 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4598                         callout_stop(&sim->callout);
4599                         sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4600                 }
4601                 if (run_queue) {
4602                         /*
4603                          * Now that we are unfrozen run the send queue.
4604                          */
4605                         xpt_run_devq(sim->devq);
4606                 }
4607         }
4608         mtx_unlock(&devq->send_mtx);
4609 }
4610
4611 /*
4612  * XXX Appears to be unused.
4613  */
4614 static void
4615 xpt_release_simq_timeout(void *arg)
4616 {
4617         struct cam_sim *sim;
4618
4619         sim = (struct cam_sim *)arg;
4620         xpt_release_simq(sim, /* run_queue */ TRUE);
4621 }
4622
4623 void
4624 xpt_done(union ccb *done_ccb)
4625 {
4626         struct cam_doneq *queue;
4627         int     run, hash;
4628
4629 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4630         if (done_ccb->ccb_h.func_code == XPT_SCSI_IO &&
4631             done_ccb->csio.bio != NULL)
4632                 biotrack(done_ccb->csio.bio, __func__);
4633 #endif
4634
4635         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4636             ("xpt_done: func= %#x %s status %#x\n",
4637                 done_ccb->ccb_h.func_code,
4638                 xpt_action_name(done_ccb->ccb_h.func_code),
4639                 done_ccb->ccb_h.status));
4640         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4641                 return;
4642
4643         /* Store the time the ccb was in the sim */
4644         done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data;
4645         hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id +
4646             done_ccb->ccb_h.target_lun) % cam_num_doneqs;
4647         queue = &cam_doneqs[hash];
4648         mtx_lock(&queue->cam_doneq_mtx);
4649         run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq));
4650         STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe);
4651         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4652         mtx_unlock(&queue->cam_doneq_mtx);
4653         if (run)
4654                 wakeup(&queue->cam_doneq);
4655 }
4656
4657 void
4658 xpt_done_direct(union ccb *done_ccb)
4659 {
4660
4661         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4662             ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status));
4663         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4664                 return;
4665
4666         /* Store the time the ccb was in the sim */
4667         done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data;
4668         xpt_done_process(&done_ccb->ccb_h);
4669 }
4670
4671 union ccb *
4672 xpt_alloc_ccb()
4673 {
4674         union ccb *new_ccb;
4675
4676         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4677         return (new_ccb);
4678 }
4679
4680 union ccb *
4681 xpt_alloc_ccb_nowait()
4682 {
4683         union ccb *new_ccb;
4684
4685         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4686         return (new_ccb);
4687 }
4688
4689 void
4690 xpt_free_ccb(union ccb *free_ccb)
4691 {
4692         free(free_ccb, M_CAMCCB);
4693 }
4694
4695
4696
4697 /* Private XPT functions */
4698
4699 /*
4700  * Get a CAM control block for the caller. Charge the structure to the device
4701  * referenced by the path.  If we don't have sufficient resources to allocate
4702  * more ccbs, we return NULL.
4703  */
4704 static union ccb *
4705 xpt_get_ccb_nowait(struct cam_periph *periph)
4706 {
4707         union ccb *new_ccb;
4708
4709         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4710         if (new_ccb == NULL)
4711                 return (NULL);
4712         periph->periph_allocated++;
4713         cam_ccbq_take_opening(&periph->path->device->ccbq);
4714         return (new_ccb);
4715 }
4716
4717 static union ccb *
4718 xpt_get_ccb(struct cam_periph *periph)
4719 {
4720         union ccb *new_ccb;
4721
4722         cam_periph_unlock(periph);
4723         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4724         cam_periph_lock(periph);
4725         periph->periph_allocated++;
4726         cam_ccbq_take_opening(&periph->path->device->ccbq);
4727         return (new_ccb);
4728 }
4729
4730 union ccb *
4731 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
4732 {
4733         struct ccb_hdr *ccb_h;
4734
4735         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n"));
4736         cam_periph_assert(periph, MA_OWNED);
4737         while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL ||
4738             ccb_h->pinfo.priority != priority) {
4739                 if (priority < periph->immediate_priority) {
4740                         periph->immediate_priority = priority;
4741                         xpt_run_allocq(periph, 0);
4742                 } else
4743                         cam_periph_sleep(periph, &periph->ccb_list, PRIBIO,
4744                             "cgticb", 0);
4745         }
4746         SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
4747         return ((union ccb *)ccb_h);
4748 }
4749
4750 static void
4751 xpt_acquire_bus(struct cam_eb *bus)
4752 {
4753
4754         xpt_lock_buses();
4755         bus->refcount++;
4756         xpt_unlock_buses();
4757 }
4758
4759 static void
4760 xpt_release_bus(struct cam_eb *bus)
4761 {
4762
4763         xpt_lock_buses();
4764         KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
4765         if (--bus->refcount > 0) {
4766                 xpt_unlock_buses();
4767                 return;
4768         }
4769         TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4770         xsoftc.bus_generation++;
4771         xpt_unlock_buses();
4772         KASSERT(TAILQ_EMPTY(&bus->et_entries),
4773             ("destroying bus, but target list is not empty"));
4774         cam_sim_release(bus->sim);
4775         mtx_destroy(&bus->eb_mtx);
4776         free(bus, M_CAMXPT);
4777 }
4778
4779 static struct cam_et *
4780 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4781 {
4782         struct cam_et *cur_target, *target;
4783
4784         mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4785         mtx_assert(&bus->eb_mtx, MA_OWNED);
4786         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
4787                                          M_NOWAIT|M_ZERO);
4788         if (target == NULL)
4789                 return (NULL);
4790
4791         TAILQ_INIT(&target->ed_entries);
4792         target->bus = bus;
4793         target->target_id = target_id;
4794         target->refcount = 1;
4795         target->generation = 0;
4796         target->luns = NULL;
4797         mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF);
4798         timevalclear(&target->last_reset);
4799         /*
4800          * Hold a reference to our parent bus so it
4801          * will not go away before we do.
4802          */
4803         bus->refcount++;
4804
4805         /* Insertion sort into our bus's target list */
4806         cur_target = TAILQ_FIRST(&bus->et_entries);
4807         while (cur_target != NULL && cur_target->target_id < target_id)
4808                 cur_target = TAILQ_NEXT(cur_target, links);
4809         if (cur_target != NULL) {
4810                 TAILQ_INSERT_BEFORE(cur_target, target, links);
4811         } else {
4812                 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4813         }
4814         bus->generation++;
4815         return (target);
4816 }
4817
4818 static void
4819 xpt_acquire_target(struct cam_et *target)
4820 {
4821         struct cam_eb *bus = target->bus;
4822
4823         mtx_lock(&bus->eb_mtx);
4824         target->refcount++;
4825         mtx_unlock(&bus->eb_mtx);
4826 }
4827
4828 static void
4829 xpt_release_target(struct cam_et *target)
4830 {
4831         struct cam_eb *bus = target->bus;
4832
4833         mtx_lock(&bus->eb_mtx);
4834         if (--target->refcount > 0) {
4835                 mtx_unlock(&bus->eb_mtx);
4836                 return;
4837         }
4838         TAILQ_REMOVE(&bus->et_entries, target, links);
4839         bus->generation++;
4840         mtx_unlock(&bus->eb_mtx);
4841         KASSERT(TAILQ_EMPTY(&target->ed_entries),
4842             ("destroying target, but device list is not empty"));
4843         xpt_release_bus(bus);
4844         mtx_destroy(&target->luns_mtx);
4845         if (target->luns)
4846                 free(target->luns, M_CAMXPT);
4847         free(target, M_CAMXPT);
4848 }
4849
4850 static struct cam_ed *
4851 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
4852                          lun_id_t lun_id)
4853 {
4854         struct cam_ed *device;
4855
4856         device = xpt_alloc_device(bus, target, lun_id);
4857         if (device == NULL)
4858                 return (NULL);
4859
4860         device->mintags = 1;
4861         device->maxtags = 1;
4862         return (device);
4863 }
4864
4865 static void
4866 xpt_destroy_device(void *context, int pending)
4867 {
4868         struct cam_ed   *device = context;
4869
4870         mtx_lock(&device->device_mtx);
4871         mtx_destroy(&device->device_mtx);
4872         free(device, M_CAMDEV);
4873 }
4874
4875 struct cam_ed *
4876 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4877 {
4878         struct cam_ed   *cur_device, *device;
4879         struct cam_devq *devq;
4880         cam_status status;
4881
4882         mtx_assert(&bus->eb_mtx, MA_OWNED);
4883         /* Make space for us in the device queue on our bus */
4884         devq = bus->sim->devq;
4885         mtx_lock(&devq->send_mtx);
4886         status = cam_devq_resize(devq, devq->send_queue.array_size + 1);
4887         mtx_unlock(&devq->send_mtx);
4888         if (status != CAM_REQ_CMP)
4889                 return (NULL);
4890
4891         device = (struct cam_ed *)malloc(sizeof(*device),
4892                                          M_CAMDEV, M_NOWAIT|M_ZERO);
4893         if (device == NULL)
4894                 return (NULL);
4895
4896         cam_init_pinfo(&device->devq_entry);
4897         device->target = target;
4898         device->lun_id = lun_id;
4899         device->sim = bus->sim;
4900         if (cam_ccbq_init(&device->ccbq,
4901                           bus->sim->max_dev_openings) != 0) {
4902                 free(device, M_CAMDEV);
4903                 return (NULL);
4904         }
4905         SLIST_INIT(&device->asyncs);
4906         SLIST_INIT(&device->periphs);
4907         device->generation = 0;
4908         device->flags = CAM_DEV_UNCONFIGURED;
4909         device->tag_delay_count = 0;
4910         device->tag_saved_openings = 0;
4911         device->refcount = 1;
4912         mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF);
4913         callout_init_mtx(&device->callout, &devq->send_mtx, 0);
4914         TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device);
4915         /*
4916          * Hold a reference to our parent bus so it
4917          * will not go away before we do.
4918          */
4919         target->refcount++;
4920
4921         cur_device = TAILQ_FIRST(&target->ed_entries);
4922         while (cur_device != NULL && cur_device->lun_id < lun_id)
4923                 cur_device = TAILQ_NEXT(cur_device, links);
4924         if (cur_device != NULL)
4925                 TAILQ_INSERT_BEFORE(cur_device, device, links);
4926         else
4927                 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4928         target->generation++;
4929         return (device);
4930 }
4931
4932 void
4933 xpt_acquire_device(struct cam_ed *device)
4934 {
4935         struct cam_eb *bus = device->target->bus;
4936
4937         mtx_lock(&bus->eb_mtx);
4938         device->refcount++;
4939         mtx_unlock(&bus->eb_mtx);
4940 }
4941
4942 void
4943 xpt_release_device(struct cam_ed *device)
4944 {
4945         struct cam_eb *bus = device->target->bus;
4946         struct cam_devq *devq;
4947
4948         mtx_lock(&bus->eb_mtx);
4949         if (--device->refcount > 0) {
4950                 mtx_unlock(&bus->eb_mtx);
4951                 return;
4952         }
4953
4954         TAILQ_REMOVE(&device->target->ed_entries, device,links);
4955         device->target->generation++;
4956         mtx_unlock(&bus->eb_mtx);
4957
4958         /* Release our slot in the devq */
4959         devq = bus->sim->devq;
4960         mtx_lock(&devq->send_mtx);
4961         cam_devq_resize(devq, devq->send_queue.array_size - 1);
4962         mtx_unlock(&devq->send_mtx);
4963
4964         KASSERT(SLIST_EMPTY(&device->periphs),
4965             ("destroying device, but periphs list is not empty"));
4966         KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX,
4967             ("destroying device while still queued for ccbs"));
4968
4969         if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
4970                 callout_stop(&device->callout);
4971
4972         xpt_release_target(device->target);
4973
4974         cam_ccbq_fini(&device->ccbq);
4975         /*
4976          * Free allocated memory.  free(9) does nothing if the
4977          * supplied pointer is NULL, so it is safe to call without
4978          * checking.
4979          */
4980         free(device->supported_vpds, M_CAMXPT);
4981         free(device->device_id, M_CAMXPT);
4982         free(device->ext_inq, M_CAMXPT);
4983         free(device->physpath, M_CAMXPT);
4984         free(device->rcap_buf, M_CAMXPT);
4985         free(device->serial_num, M_CAMXPT);
4986         taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task);
4987 }
4988
4989 u_int32_t
4990 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4991 {
4992         int     result;
4993         struct  cam_ed *dev;
4994
4995         dev = path->device;
4996         mtx_lock(&dev->sim->devq->send_mtx);
4997         result = cam_ccbq_resize(&dev->ccbq, newopenings);
4998         mtx_unlock(&dev->sim->devq->send_mtx);
4999         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5000          || (dev->inq_flags & SID_CmdQue) != 0)
5001                 dev->tag_saved_openings = newopenings;
5002         return (result);
5003 }
5004
5005 static struct cam_eb *
5006 xpt_find_bus(path_id_t path_id)
5007 {
5008         struct cam_eb *bus;
5009
5010         xpt_lock_buses();
5011         for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
5012              bus != NULL;
5013              bus = TAILQ_NEXT(bus, links)) {
5014                 if (bus->path_id == path_id) {
5015                         bus->refcount++;
5016                         break;
5017                 }
5018         }
5019         xpt_unlock_buses();
5020         return (bus);
5021 }
5022
5023 static struct cam_et *
5024 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
5025 {
5026         struct cam_et *target;
5027
5028         mtx_assert(&bus->eb_mtx, MA_OWNED);
5029         for (target = TAILQ_FIRST(&bus->et_entries);
5030              target != NULL;
5031              target = TAILQ_NEXT(target, links)) {
5032                 if (target->target_id == target_id) {
5033                         target->refcount++;
5034                         break;
5035                 }
5036         }
5037         return (target);
5038 }
5039
5040 static struct cam_ed *
5041 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
5042 {
5043         struct cam_ed *device;
5044
5045         mtx_assert(&target->bus->eb_mtx, MA_OWNED);
5046         for (device = TAILQ_FIRST(&target->ed_entries);
5047              device != NULL;
5048              device = TAILQ_NEXT(device, links)) {
5049                 if (device->lun_id == lun_id) {
5050                         device->refcount++;
5051                         break;
5052                 }
5053         }
5054         return (device);
5055 }
5056
5057 void
5058 xpt_start_tags(struct cam_path *path)
5059 {
5060         struct ccb_relsim crs;
5061         struct cam_ed *device;
5062         struct cam_sim *sim;
5063         int    newopenings;
5064
5065         device = path->device;
5066         sim = path->bus->sim;
5067         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
5068         xpt_freeze_devq(path, /*count*/1);
5069         device->inq_flags |= SID_CmdQue;
5070         if (device->tag_saved_openings != 0)
5071                 newopenings = device->tag_saved_openings;
5072         else
5073                 newopenings = min(device->maxtags,
5074                                   sim->max_tagged_dev_openings);
5075         xpt_dev_ccbq_resize(path, newopenings);
5076         xpt_async(AC_GETDEV_CHANGED, path, NULL);
5077         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
5078         crs.ccb_h.func_code = XPT_REL_SIMQ;
5079         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5080         crs.openings
5081             = crs.release_timeout
5082             = crs.qfrozen_cnt
5083             = 0;
5084         xpt_action((union ccb *)&crs);
5085 }
5086
5087 void
5088 xpt_stop_tags(struct cam_path *path)
5089 {
5090         struct ccb_relsim crs;
5091         struct cam_ed *device;
5092         struct cam_sim *sim;
5093
5094         device = path->device;
5095         sim = path->bus->sim;
5096         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
5097         device->tag_delay_count = 0;
5098         xpt_freeze_devq(path, /*count*/1);
5099         device->inq_flags &= ~SID_CmdQue;
5100         xpt_dev_ccbq_resize(path, sim->max_dev_openings);
5101         xpt_async(AC_GETDEV_CHANGED, path, NULL);
5102         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
5103         crs.ccb_h.func_code = XPT_REL_SIMQ;
5104         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5105         crs.openings
5106             = crs.release_timeout
5107             = crs.qfrozen_cnt
5108             = 0;
5109         xpt_action((union ccb *)&crs);
5110 }
5111
5112 static void
5113 xpt_boot_delay(void *arg)
5114 {
5115
5116         xpt_release_boot();
5117 }
5118
5119 static void
5120 xpt_config(void *arg)
5121 {
5122         /*
5123          * Now that interrupts are enabled, go find our devices
5124          */
5125         if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq"))
5126                 printf("xpt_config: failed to create taskqueue thread.\n");
5127
5128         /* Setup debugging path */
5129         if (cam_dflags != CAM_DEBUG_NONE) {
5130                 if (xpt_create_path(&cam_dpath, NULL,
5131                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
5132                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
5133                         printf("xpt_config: xpt_create_path() failed for debug"
5134                                " target %d:%d:%d, debugging disabled\n",
5135                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
5136                         cam_dflags = CAM_DEBUG_NONE;
5137                 }
5138         } else
5139                 cam_dpath = NULL;
5140
5141         periphdriver_init(1);
5142         xpt_hold_boot();
5143         callout_init(&xsoftc.boot_callout, 1);
5144         callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0,
5145             xpt_boot_delay, NULL, 0);
5146         /* Fire up rescan thread. */
5147         if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0,
5148             "cam", "scanner")) {
5149                 printf("xpt_config: failed to create rescan thread.\n");
5150         }
5151 }
5152
5153 void
5154 xpt_hold_boot(void)
5155 {
5156         xpt_lock_buses();
5157         xsoftc.buses_to_config++;
5158         xpt_unlock_buses();
5159 }
5160
5161 void
5162 xpt_release_boot(void)
5163 {
5164         xpt_lock_buses();
5165         xsoftc.buses_to_config--;
5166         if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) {
5167                 struct  xpt_task *task;
5168
5169                 xsoftc.buses_config_done = 1;
5170                 xpt_unlock_buses();
5171                 /* Call manually because we don't have any buses */
5172                 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
5173                 if (task != NULL) {
5174                         TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
5175                         taskqueue_enqueue(taskqueue_thread, &task->task);
5176                 }
5177         } else
5178                 xpt_unlock_buses();
5179 }
5180
5181 /*
5182  * If the given device only has one peripheral attached to it, and if that
5183  * peripheral is the passthrough driver, announce it.  This insures that the
5184  * user sees some sort of announcement for every peripheral in their system.
5185  */
5186 static int
5187 xptpassannouncefunc(struct cam_ed *device, void *arg)
5188 {
5189         struct cam_periph *periph;
5190         int i;
5191
5192         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
5193              periph = SLIST_NEXT(periph, periph_links), i++);
5194
5195         periph = SLIST_FIRST(&device->periphs);
5196         if ((i == 1)
5197          && (strncmp(periph->periph_name, "pass", 4) == 0))
5198                 xpt_announce_periph(periph, NULL);
5199
5200         return(1);
5201 }
5202
5203 static void
5204 xpt_finishconfig_task(void *context, int pending)
5205 {
5206
5207         periphdriver_init(2);
5208         /*
5209          * Check for devices with no "standard" peripheral driver
5210          * attached.  For any devices like that, announce the
5211          * passthrough driver so the user will see something.
5212          */
5213         if (!bootverbose)
5214                 xpt_for_all_devices(xptpassannouncefunc, NULL);
5215
5216         /* Release our hook so that the boot can continue. */
5217         config_intrhook_disestablish(xsoftc.xpt_config_hook);
5218         free(xsoftc.xpt_config_hook, M_CAMXPT);
5219         xsoftc.xpt_config_hook = NULL;
5220
5221         free(context, M_CAMXPT);
5222 }
5223
5224 cam_status
5225 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
5226                    struct cam_path *path)
5227 {
5228         struct ccb_setasync csa;
5229         cam_status status;
5230         int xptpath = 0;
5231
5232         if (path == NULL) {
5233                 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
5234                                          CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
5235                 if (status != CAM_REQ_CMP)
5236                         return (status);
5237                 xpt_path_lock(path);
5238                 xptpath = 1;
5239         }
5240
5241         xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
5242         csa.ccb_h.func_code = XPT_SASYNC_CB;
5243         csa.event_enable = event;
5244         csa.callback = cbfunc;
5245         csa.callback_arg = cbarg;
5246         xpt_action((union ccb *)&csa);
5247         status = csa.ccb_h.status;
5248
5249         CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE,
5250             ("xpt_register_async: func %p\n", cbfunc));
5251
5252         if (xptpath) {
5253                 xpt_path_unlock(path);
5254                 xpt_free_path(path);
5255         }
5256
5257         if ((status == CAM_REQ_CMP) &&
5258             (csa.event_enable & AC_FOUND_DEVICE)) {
5259                 /*
5260                  * Get this peripheral up to date with all
5261                  * the currently existing devices.
5262                  */
5263                 xpt_for_all_devices(xptsetasyncfunc, &csa);
5264         }
5265         if ((status == CAM_REQ_CMP) &&
5266             (csa.event_enable & AC_PATH_REGISTERED)) {
5267                 /*
5268                  * Get this peripheral up to date with all
5269                  * the currently existing buses.
5270                  */
5271                 xpt_for_all_busses(xptsetasyncbusfunc, &csa);
5272         }
5273
5274         return (status);
5275 }
5276
5277 static void
5278 xptaction(struct cam_sim *sim, union ccb *work_ccb)
5279 {
5280         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
5281
5282         switch (work_ccb->ccb_h.func_code) {
5283         /* Common cases first */
5284         case XPT_PATH_INQ:              /* Path routing inquiry */
5285         {
5286                 struct ccb_pathinq *cpi;
5287
5288                 cpi = &work_ccb->cpi;
5289                 cpi->version_num = 1; /* XXX??? */
5290                 cpi->hba_inquiry = 0;
5291                 cpi->target_sprt = 0;
5292                 cpi->hba_misc = 0;
5293                 cpi->hba_eng_cnt = 0;
5294                 cpi->max_target = 0;
5295                 cpi->max_lun = 0;
5296                 cpi->initiator_id = 0;
5297                 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
5298                 strlcpy(cpi->hba_vid, "", HBA_IDLEN);
5299                 strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
5300                 cpi->unit_number = sim->unit_number;
5301                 cpi->bus_id = sim->bus_id;
5302                 cpi->base_transfer_speed = 0;
5303                 cpi->protocol = PROTO_UNSPECIFIED;
5304                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
5305                 cpi->transport = XPORT_UNSPECIFIED;
5306                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
5307                 cpi->ccb_h.status = CAM_REQ_CMP;
5308                 xpt_done(work_ccb);
5309                 break;
5310         }
5311         default:
5312                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
5313                 xpt_done(work_ccb);
5314                 break;
5315         }
5316 }
5317
5318 /*
5319  * The xpt as a "controller" has no interrupt sources, so polling
5320  * is a no-op.
5321  */
5322 static void
5323 xptpoll(struct cam_sim *sim)
5324 {
5325 }
5326
5327 void
5328 xpt_lock_buses(void)
5329 {
5330         mtx_lock(&xsoftc.xpt_topo_lock);
5331 }
5332
5333 void
5334 xpt_unlock_buses(void)
5335 {
5336         mtx_unlock(&xsoftc.xpt_topo_lock);
5337 }
5338
5339 struct mtx *
5340 xpt_path_mtx(struct cam_path *path)
5341 {
5342
5343         return (&path->device->device_mtx);
5344 }
5345
5346 static void
5347 xpt_done_process(struct ccb_hdr *ccb_h)
5348 {
5349         struct cam_sim *sim;
5350         struct cam_devq *devq;
5351         struct mtx *mtx = NULL;
5352
5353 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
5354         struct ccb_scsiio *csio;
5355
5356         if (ccb_h->func_code == XPT_SCSI_IO) {
5357                 csio = &((union ccb *)ccb_h)->csio;
5358                 if (csio->bio != NULL)
5359                         biotrack(csio->bio, __func__);
5360         }
5361 #endif
5362
5363         if (ccb_h->flags & CAM_HIGH_POWER) {
5364                 struct highpowerlist    *hphead;
5365                 struct cam_ed           *device;
5366
5367                 mtx_lock(&xsoftc.xpt_highpower_lock);
5368                 hphead = &xsoftc.highpowerq;
5369
5370                 device = STAILQ_FIRST(hphead);
5371
5372                 /*
5373                  * Increment the count since this command is done.
5374                  */
5375                 xsoftc.num_highpower++;
5376
5377                 /*
5378                  * Any high powered commands queued up?
5379                  */
5380                 if (device != NULL) {
5381
5382                         STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
5383                         mtx_unlock(&xsoftc.xpt_highpower_lock);
5384
5385                         mtx_lock(&device->sim->devq->send_mtx);
5386                         xpt_release_devq_device(device,
5387                                          /*count*/1, /*runqueue*/TRUE);
5388                         mtx_unlock(&device->sim->devq->send_mtx);
5389                 } else
5390                         mtx_unlock(&xsoftc.xpt_highpower_lock);
5391         }
5392
5393         sim = ccb_h->path->bus->sim;
5394
5395         if (ccb_h->status & CAM_RELEASE_SIMQ) {
5396                 xpt_release_simq(sim, /*run_queue*/FALSE);
5397                 ccb_h->status &= ~CAM_RELEASE_SIMQ;
5398         }
5399
5400         if ((ccb_h->flags & CAM_DEV_QFRZDIS)
5401          && (ccb_h->status & CAM_DEV_QFRZN)) {
5402                 xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE);
5403                 ccb_h->status &= ~CAM_DEV_QFRZN;
5404         }
5405
5406         devq = sim->devq;
5407         if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
5408                 struct cam_ed *dev = ccb_h->path->device;
5409
5410                 mtx_lock(&devq->send_mtx);
5411                 devq->send_active--;
5412                 devq->send_openings++;
5413                 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
5414
5415                 if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
5416                   && (dev->ccbq.dev_active == 0))) {
5417                         dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
5418                         xpt_release_devq_device(dev, /*count*/1,
5419                                          /*run_queue*/FALSE);
5420                 }
5421
5422                 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
5423                   && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
5424                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
5425                         xpt_release_devq_device(dev, /*count*/1,
5426                                          /*run_queue*/FALSE);
5427                 }
5428
5429                 if (!device_is_queued(dev))
5430                         (void)xpt_schedule_devq(devq, dev);
5431                 xpt_run_devq(devq);
5432                 mtx_unlock(&devq->send_mtx);
5433
5434                 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) {
5435                         mtx = xpt_path_mtx(ccb_h->path);
5436                         mtx_lock(mtx);
5437
5438                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5439                          && (--dev->tag_delay_count == 0))
5440                                 xpt_start_tags(ccb_h->path);
5441                 }
5442         }
5443
5444         if ((ccb_h->flags & CAM_UNLOCKED) == 0) {
5445                 if (mtx == NULL) {
5446                         mtx = xpt_path_mtx(ccb_h->path);
5447                         mtx_lock(mtx);
5448                 }
5449         } else {
5450                 if (mtx != NULL) {
5451                         mtx_unlock(mtx);
5452                         mtx = NULL;
5453                 }
5454         }
5455
5456         /* Call the peripheral driver's callback */
5457         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
5458         (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
5459         if (mtx != NULL)
5460                 mtx_unlock(mtx);
5461 }
5462
5463 void
5464 xpt_done_td(void *arg)
5465 {
5466         struct cam_doneq *queue = arg;
5467         struct ccb_hdr *ccb_h;
5468         STAILQ_HEAD(, ccb_hdr)  doneq;
5469
5470         STAILQ_INIT(&doneq);
5471         mtx_lock(&queue->cam_doneq_mtx);
5472         while (1) {
5473                 while (STAILQ_EMPTY(&queue->cam_doneq)) {
5474                         queue->cam_doneq_sleep = 1;
5475                         msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
5476                             PRIBIO, "-", 0);
5477                         queue->cam_doneq_sleep = 0;
5478                 }
5479                 STAILQ_CONCAT(&doneq, &queue->cam_doneq);
5480                 mtx_unlock(&queue->cam_doneq_mtx);
5481
5482                 THREAD_NO_SLEEPING();
5483                 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
5484                         STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
5485                         xpt_done_process(ccb_h);
5486                 }
5487                 THREAD_SLEEPING_OK();
5488
5489                 mtx_lock(&queue->cam_doneq_mtx);
5490         }
5491 }
5492
5493 static void
5494 camisr_runqueue(void)
5495 {
5496         struct  ccb_hdr *ccb_h;
5497         struct cam_doneq *queue;
5498         int i;
5499
5500         /* Process global queues. */
5501         for (i = 0; i < cam_num_doneqs; i++) {
5502                 queue = &cam_doneqs[i];
5503                 mtx_lock(&queue->cam_doneq_mtx);
5504                 while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) {
5505                         STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe);
5506                         mtx_unlock(&queue->cam_doneq_mtx);
5507                         xpt_done_process(ccb_h);
5508                         mtx_lock(&queue->cam_doneq_mtx);
5509                 }
5510                 mtx_unlock(&queue->cam_doneq_mtx);
5511         }
5512 }
5513
5514 struct kv 
5515 {
5516         uint32_t v;
5517         const char *name;
5518 };
5519
5520 static struct kv map[] = {
5521         { XPT_NOOP, "XPT_NOOP" },
5522         { XPT_SCSI_IO, "XPT_SCSI_IO" },
5523         { XPT_GDEV_TYPE, "XPT_GDEV_TYPE" },
5524         { XPT_GDEVLIST, "XPT_GDEVLIST" },
5525         { XPT_PATH_INQ, "XPT_PATH_INQ" },
5526         { XPT_REL_SIMQ, "XPT_REL_SIMQ" },
5527         { XPT_SASYNC_CB, "XPT_SASYNC_CB" },
5528         { XPT_SDEV_TYPE, "XPT_SDEV_TYPE" },
5529         { XPT_SCAN_BUS, "XPT_SCAN_BUS" },
5530         { XPT_DEV_MATCH, "XPT_DEV_MATCH" },
5531         { XPT_DEBUG, "XPT_DEBUG" },
5532         { XPT_PATH_STATS, "XPT_PATH_STATS" },
5533         { XPT_GDEV_STATS, "XPT_GDEV_STATS" },
5534         { XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" },
5535         { XPT_ASYNC, "XPT_ASYNC" },
5536         { XPT_ABORT, "XPT_ABORT" },
5537         { XPT_RESET_BUS, "XPT_RESET_BUS" },
5538         { XPT_RESET_DEV, "XPT_RESET_DEV" },
5539         { XPT_TERM_IO, "XPT_TERM_IO" },
5540         { XPT_SCAN_LUN, "XPT_SCAN_LUN" },
5541         { XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" },
5542         { XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" },
5543         { XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" },
5544         { XPT_ATA_IO, "XPT_ATA_IO" },
5545         { XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" },
5546         { XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" },
5547         { XPT_NVME_IO, "XPT_NVME_IO" },
5548         { XPT_MMC_IO, "XPT_MMC_IO" },
5549         { XPT_SMP_IO, "XPT_SMP_IO" },
5550         { XPT_SCAN_TGT, "XPT_SCAN_TGT" },
5551         { XPT_ENG_INQ, "XPT_ENG_INQ" },
5552         { XPT_ENG_EXEC, "XPT_ENG_EXEC" },
5553         { XPT_EN_LUN, "XPT_EN_LUN" },
5554         { XPT_TARGET_IO, "XPT_TARGET_IO" },
5555         { XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" },
5556         { XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" },
5557         { XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" },
5558         { XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" },
5559         { XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" },
5560         { XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" },
5561         { 0, 0 }
5562 };
5563
5564 const char *
5565 xpt_action_name(uint32_t action) 
5566 {
5567         static char buffer[32]; /* Only for unknown messages -- racy */
5568         struct kv *walker = map;
5569
5570         while (walker->name != NULL) {
5571                 if (walker->v == action)
5572                         return (walker->name);
5573                 walker++;
5574         }
5575
5576         snprintf(buffer, sizeof(buffer), "%#x", action);
5577         return (buffer);
5578 }