]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/cam/cam_xpt.c
sysctl(9): Fix a few mandoc related issues
[FreeBSD/FreeBSD.git] / sys / cam / cam_xpt.c
1 /*-
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
5  *
6  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
7  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions, and the following disclaimer,
15  *    without modification, immediately at the beginning of the file.
16  * 2. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31
32 #include "opt_printf.h"
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include <sys/param.h>
38 #include <sys/bio.h>
39 #include <sys/bus.h>
40 #include <sys/systm.h>
41 #include <sys/types.h>
42 #include <sys/malloc.h>
43 #include <sys/kernel.h>
44 #include <sys/time.h>
45 #include <sys/conf.h>
46 #include <sys/fcntl.h>
47 #include <sys/proc.h>
48 #include <sys/sbuf.h>
49 #include <sys/smp.h>
50 #include <sys/taskqueue.h>
51
52 #include <sys/lock.h>
53 #include <sys/mutex.h>
54 #include <sys/sysctl.h>
55 #include <sys/kthread.h>
56
57 #include <cam/cam.h>
58 #include <cam/cam_ccb.h>
59 #include <cam/cam_iosched.h>
60 #include <cam/cam_periph.h>
61 #include <cam/cam_queue.h>
62 #include <cam/cam_sim.h>
63 #include <cam/cam_xpt.h>
64 #include <cam/cam_xpt_sim.h>
65 #include <cam/cam_xpt_periph.h>
66 #include <cam/cam_xpt_internal.h>
67 #include <cam/cam_debug.h>
68 #include <cam/cam_compat.h>
69
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #include <cam/scsi/scsi_pass.h>
73
74 #include <machine/stdarg.h>     /* for xpt_print below */
75
76 #include "opt_cam.h"
77
78 /* Wild guess based on not wanting to grow the stack too much */
79 #define XPT_PRINT_MAXLEN        512
80 #ifdef PRINTF_BUFR_SIZE
81 #define XPT_PRINT_LEN   PRINTF_BUFR_SIZE
82 #else
83 #define XPT_PRINT_LEN   128
84 #endif
85 _Static_assert(XPT_PRINT_LEN <= XPT_PRINT_MAXLEN, "XPT_PRINT_LEN is too large");
86
87 /*
88  * This is the maximum number of high powered commands (e.g. start unit)
89  * that can be outstanding at a particular time.
90  */
91 #ifndef CAM_MAX_HIGHPOWER
92 #define CAM_MAX_HIGHPOWER  4
93 #endif
94
95 /* Datastructures internal to the xpt layer */
96 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
97 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices");
98 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs");
99 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths");
100
101 struct xpt_softc {
102         uint32_t                xpt_generation;
103
104         /* number of high powered commands that can go through right now */
105         struct mtx              xpt_highpower_lock;
106         STAILQ_HEAD(highpowerlist, cam_ed)      highpowerq;
107         int                     num_highpower;
108
109         /* queue for handling async rescan requests. */
110         TAILQ_HEAD(, ccb_hdr) ccb_scanq;
111         int buses_to_config;
112         int buses_config_done;
113         int announce_nosbuf;
114
115         /*
116          * Registered buses
117          *
118          * N.B., "busses" is an archaic spelling of "buses".  In new code
119          * "buses" is preferred.
120          */
121         TAILQ_HEAD(,cam_eb)     xpt_busses;
122         u_int                   bus_generation;
123
124         int                     boot_delay;
125         struct callout          boot_callout;
126         struct task             boot_task;
127         struct root_hold_token  xpt_rootmount;
128
129         struct mtx              xpt_topo_lock;
130         struct taskqueue        *xpt_taskq;
131 };
132
133 typedef enum {
134         DM_RET_COPY             = 0x01,
135         DM_RET_FLAG_MASK        = 0x0f,
136         DM_RET_NONE             = 0x00,
137         DM_RET_STOP             = 0x10,
138         DM_RET_DESCEND          = 0x20,
139         DM_RET_ERROR            = 0x30,
140         DM_RET_ACTION_MASK      = 0xf0
141 } dev_match_ret;
142
143 typedef enum {
144         XPT_DEPTH_BUS,
145         XPT_DEPTH_TARGET,
146         XPT_DEPTH_DEVICE,
147         XPT_DEPTH_PERIPH
148 } xpt_traverse_depth;
149
150 struct xpt_traverse_config {
151         xpt_traverse_depth      depth;
152         void                    *tr_func;
153         void                    *tr_arg;
154 };
155
156 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
157 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
158 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
159 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
160 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
161
162 /* Transport layer configuration information */
163 static struct xpt_softc xsoftc;
164
165 MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF);
166
167 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
168            &xsoftc.boot_delay, 0, "Bus registration wait time");
169 SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD,
170             &xsoftc.xpt_generation, 0, "CAM peripheral generation count");
171 SYSCTL_INT(_kern_cam, OID_AUTO, announce_nosbuf, CTLFLAG_RWTUN,
172             &xsoftc.announce_nosbuf, 0, "Don't use sbuf for announcements");
173
174 struct cam_doneq {
175         struct mtx_padalign     cam_doneq_mtx;
176         STAILQ_HEAD(, ccb_hdr)  cam_doneq;
177         int                     cam_doneq_sleep;
178 };
179
180 static struct cam_doneq cam_doneqs[MAXCPU];
181 static u_int __read_mostly cam_num_doneqs;
182 static struct proc *cam_proc;
183
184 SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
185            &cam_num_doneqs, 0, "Number of completion queues/threads");
186
187 struct cam_periph *xpt_periph;
188
189 static periph_init_t xpt_periph_init;
190
191 static struct periph_driver xpt_driver =
192 {
193         xpt_periph_init, "xpt",
194         TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
195         CAM_PERIPH_DRV_EARLY
196 };
197
198 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
199
200 static d_open_t xptopen;
201 static d_close_t xptclose;
202 static d_ioctl_t xptioctl;
203 static d_ioctl_t xptdoioctl;
204
205 static struct cdevsw xpt_cdevsw = {
206         .d_version =    D_VERSION,
207         .d_flags =      0,
208         .d_open =       xptopen,
209         .d_close =      xptclose,
210         .d_ioctl =      xptioctl,
211         .d_name =       "xpt",
212 };
213
214 /* Storage for debugging datastructures */
215 struct cam_path *cam_dpath;
216 u_int32_t __read_mostly cam_dflags = CAM_DEBUG_FLAGS;
217 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN,
218         &cam_dflags, 0, "Enabled debug flags");
219 u_int32_t cam_debug_delay = CAM_DEBUG_DELAY;
220 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN,
221         &cam_debug_delay, 0, "Delay in us after each debug message");
222
223 /* Our boot-time initialization hook */
224 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
225
226 static moduledata_t cam_moduledata = {
227         "cam",
228         cam_module_event_handler,
229         NULL
230 };
231
232 static int      xpt_init(void *);
233
234 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
235 MODULE_VERSION(cam, 1);
236
237 static void             xpt_async_bcast(struct async_list *async_head,
238                                         u_int32_t async_code,
239                                         struct cam_path *path,
240                                         void *async_arg);
241 static path_id_t xptnextfreepathid(void);
242 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
243 static union ccb *xpt_get_ccb(struct cam_periph *periph);
244 static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph);
245 static void      xpt_run_allocq(struct cam_periph *periph, int sleep);
246 static void      xpt_run_allocq_task(void *context, int pending);
247 static void      xpt_run_devq(struct cam_devq *devq);
248 static callout_func_t xpt_release_devq_timeout;
249 static void      xpt_acquire_bus(struct cam_eb *bus);
250 static void      xpt_release_bus(struct cam_eb *bus);
251 static uint32_t  xpt_freeze_devq_device(struct cam_ed *dev, u_int count);
252 static int       xpt_release_devq_device(struct cam_ed *dev, u_int count,
253                     int run_queue);
254 static struct cam_et*
255                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
256 static void      xpt_acquire_target(struct cam_et *target);
257 static void      xpt_release_target(struct cam_et *target);
258 static struct cam_eb*
259                  xpt_find_bus(path_id_t path_id);
260 static struct cam_et*
261                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
262 static struct cam_ed*
263                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
264 static void      xpt_config(void *arg);
265 static void      xpt_hold_boot_locked(void);
266 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
267                                  u_int32_t new_priority);
268 static xpt_devicefunc_t xptpassannouncefunc;
269 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
270 static void      xptpoll(struct cam_sim *sim);
271 static void      camisr_runqueue(void);
272 static void      xpt_done_process(struct ccb_hdr *ccb_h);
273 static void      xpt_done_td(void *);
274 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
275                                     u_int num_patterns, struct cam_eb *bus);
276 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
277                                        u_int num_patterns,
278                                        struct cam_ed *device);
279 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
280                                        u_int num_patterns,
281                                        struct cam_periph *periph);
282 static xpt_busfunc_t    xptedtbusfunc;
283 static xpt_targetfunc_t xptedttargetfunc;
284 static xpt_devicefunc_t xptedtdevicefunc;
285 static xpt_periphfunc_t xptedtperiphfunc;
286 static xpt_pdrvfunc_t   xptplistpdrvfunc;
287 static xpt_periphfunc_t xptplistperiphfunc;
288 static int              xptedtmatch(struct ccb_dev_match *cdm);
289 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
290 static int              xptbustraverse(struct cam_eb *start_bus,
291                                        xpt_busfunc_t *tr_func, void *arg);
292 static int              xpttargettraverse(struct cam_eb *bus,
293                                           struct cam_et *start_target,
294                                           xpt_targetfunc_t *tr_func, void *arg);
295 static int              xptdevicetraverse(struct cam_et *target,
296                                           struct cam_ed *start_device,
297                                           xpt_devicefunc_t *tr_func, void *arg);
298 static int              xptperiphtraverse(struct cam_ed *device,
299                                           struct cam_periph *start_periph,
300                                           xpt_periphfunc_t *tr_func, void *arg);
301 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
302                                         xpt_pdrvfunc_t *tr_func, void *arg);
303 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
304                                             struct cam_periph *start_periph,
305                                             xpt_periphfunc_t *tr_func,
306                                             void *arg);
307 static xpt_busfunc_t    xptdefbusfunc;
308 static xpt_targetfunc_t xptdeftargetfunc;
309 static xpt_devicefunc_t xptdefdevicefunc;
310 static xpt_periphfunc_t xptdefperiphfunc;
311 static void             xpt_finishconfig_task(void *context, int pending);
312 static void             xpt_dev_async_default(u_int32_t async_code,
313                                               struct cam_eb *bus,
314                                               struct cam_et *target,
315                                               struct cam_ed *device,
316                                               void *async_arg);
317 static struct cam_ed *  xpt_alloc_device_default(struct cam_eb *bus,
318                                                  struct cam_et *target,
319                                                  lun_id_t lun_id);
320 static xpt_devicefunc_t xptsetasyncfunc;
321 static xpt_busfunc_t    xptsetasyncbusfunc;
322 static cam_status       xptregister(struct cam_periph *periph,
323                                     void *arg);
324
325 static __inline int
326 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
327 {
328         int     retval;
329
330         mtx_assert(&devq->send_mtx, MA_OWNED);
331         if ((dev->ccbq.queue.entries > 0) &&
332             (dev->ccbq.dev_openings > 0) &&
333             (dev->ccbq.queue.qfrozen_cnt == 0)) {
334                 /*
335                  * The priority of a device waiting for controller
336                  * resources is that of the highest priority CCB
337                  * enqueued.
338                  */
339                 retval =
340                     xpt_schedule_dev(&devq->send_queue,
341                                      &dev->devq_entry,
342                                      CAMQ_GET_PRIO(&dev->ccbq.queue));
343         } else {
344                 retval = 0;
345         }
346         return (retval);
347 }
348
349 static __inline int
350 device_is_queued(struct cam_ed *device)
351 {
352         return (device->devq_entry.index != CAM_UNQUEUED_INDEX);
353 }
354
355 static void
356 xpt_periph_init(void)
357 {
358         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
359 }
360
361 static int
362 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
363 {
364
365         /*
366          * Only allow read-write access.
367          */
368         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
369                 return(EPERM);
370
371         /*
372          * We don't allow nonblocking access.
373          */
374         if ((flags & O_NONBLOCK) != 0) {
375                 printf("%s: can't do nonblocking access\n", devtoname(dev));
376                 return(ENODEV);
377         }
378
379         return(0);
380 }
381
382 static int
383 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
384 {
385
386         return(0);
387 }
388
389 /*
390  * Don't automatically grab the xpt softc lock here even though this is going
391  * through the xpt device.  The xpt device is really just a back door for
392  * accessing other devices and SIMs, so the right thing to do is to grab
393  * the appropriate SIM lock once the bus/SIM is located.
394  */
395 static int
396 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
397 {
398         int error;
399
400         if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
401                 error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl);
402         }
403         return (error);
404 }
405
406 static int
407 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
408 {
409         int error;
410
411         error = 0;
412
413         switch(cmd) {
414         /*
415          * For the transport layer CAMIOCOMMAND ioctl, we really only want
416          * to accept CCB types that don't quite make sense to send through a
417          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
418          * in the CAM spec.
419          */
420         case CAMIOCOMMAND: {
421                 union ccb *ccb;
422                 union ccb *inccb;
423                 struct cam_eb *bus;
424
425                 inccb = (union ccb *)addr;
426 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
427                 if (inccb->ccb_h.func_code == XPT_SCSI_IO)
428                         inccb->csio.bio = NULL;
429 #endif
430
431                 if (inccb->ccb_h.flags & CAM_UNLOCKED)
432                         return (EINVAL);
433
434                 bus = xpt_find_bus(inccb->ccb_h.path_id);
435                 if (bus == NULL)
436                         return (EINVAL);
437
438                 switch (inccb->ccb_h.func_code) {
439                 case XPT_SCAN_BUS:
440                 case XPT_RESET_BUS:
441                         if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
442                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
443                                 xpt_release_bus(bus);
444                                 return (EINVAL);
445                         }
446                         break;
447                 case XPT_SCAN_TGT:
448                         if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
449                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
450                                 xpt_release_bus(bus);
451                                 return (EINVAL);
452                         }
453                         break;
454                 default:
455                         break;
456                 }
457
458                 switch(inccb->ccb_h.func_code) {
459                 case XPT_SCAN_BUS:
460                 case XPT_RESET_BUS:
461                 case XPT_PATH_INQ:
462                 case XPT_ENG_INQ:
463                 case XPT_SCAN_LUN:
464                 case XPT_SCAN_TGT:
465
466                         ccb = xpt_alloc_ccb();
467
468                         /*
469                          * Create a path using the bus, target, and lun the
470                          * user passed in.
471                          */
472                         if (xpt_create_path(&ccb->ccb_h.path, NULL,
473                                             inccb->ccb_h.path_id,
474                                             inccb->ccb_h.target_id,
475                                             inccb->ccb_h.target_lun) !=
476                                             CAM_REQ_CMP){
477                                 error = EINVAL;
478                                 xpt_free_ccb(ccb);
479                                 break;
480                         }
481                         /* Ensure all of our fields are correct */
482                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
483                                       inccb->ccb_h.pinfo.priority);
484                         xpt_merge_ccb(ccb, inccb);
485                         xpt_path_lock(ccb->ccb_h.path);
486                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
487                         xpt_path_unlock(ccb->ccb_h.path);
488                         bcopy(ccb, inccb, sizeof(union ccb));
489                         xpt_free_path(ccb->ccb_h.path);
490                         xpt_free_ccb(ccb);
491                         break;
492
493                 case XPT_DEBUG: {
494                         union ccb ccb;
495
496                         /*
497                          * This is an immediate CCB, so it's okay to
498                          * allocate it on the stack.
499                          */
500
501                         /*
502                          * Create a path using the bus, target, and lun the
503                          * user passed in.
504                          */
505                         if (xpt_create_path(&ccb.ccb_h.path, NULL,
506                                             inccb->ccb_h.path_id,
507                                             inccb->ccb_h.target_id,
508                                             inccb->ccb_h.target_lun) !=
509                                             CAM_REQ_CMP){
510                                 error = EINVAL;
511                                 break;
512                         }
513                         /* Ensure all of our fields are correct */
514                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
515                                       inccb->ccb_h.pinfo.priority);
516                         xpt_merge_ccb(&ccb, inccb);
517                         xpt_action(&ccb);
518                         bcopy(&ccb, inccb, sizeof(union ccb));
519                         xpt_free_path(ccb.ccb_h.path);
520                         break;
521                 }
522                 case XPT_DEV_MATCH: {
523                         struct cam_periph_map_info mapinfo;
524                         struct cam_path *old_path;
525
526                         /*
527                          * We can't deal with physical addresses for this
528                          * type of transaction.
529                          */
530                         if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
531                             CAM_DATA_VADDR) {
532                                 error = EINVAL;
533                                 break;
534                         }
535
536                         /*
537                          * Save this in case the caller had it set to
538                          * something in particular.
539                          */
540                         old_path = inccb->ccb_h.path;
541
542                         /*
543                          * We really don't need a path for the matching
544                          * code.  The path is needed because of the
545                          * debugging statements in xpt_action().  They
546                          * assume that the CCB has a valid path.
547                          */
548                         inccb->ccb_h.path = xpt_periph->path;
549
550                         bzero(&mapinfo, sizeof(mapinfo));
551
552                         /*
553                          * Map the pattern and match buffers into kernel
554                          * virtual address space.
555                          */
556                         error = cam_periph_mapmem(inccb, &mapinfo, maxphys);
557
558                         if (error) {
559                                 inccb->ccb_h.path = old_path;
560                                 break;
561                         }
562
563                         /*
564                          * This is an immediate CCB, we can send it on directly.
565                          */
566                         xpt_action(inccb);
567
568                         /*
569                          * Map the buffers back into user space.
570                          */
571                         cam_periph_unmapmem(inccb, &mapinfo);
572
573                         inccb->ccb_h.path = old_path;
574
575                         error = 0;
576                         break;
577                 }
578                 default:
579                         error = ENOTSUP;
580                         break;
581                 }
582                 xpt_release_bus(bus);
583                 break;
584         }
585         /*
586          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
587          * with the periphal driver name and unit name filled in.  The other
588          * fields don't really matter as input.  The passthrough driver name
589          * ("pass"), and unit number are passed back in the ccb.  The current
590          * device generation number, and the index into the device peripheral
591          * driver list, and the status are also passed back.  Note that
592          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
593          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
594          * (or rather should be) impossible for the device peripheral driver
595          * list to change since we look at the whole thing in one pass, and
596          * we do it with lock protection.
597          *
598          */
599         case CAMGETPASSTHRU: {
600                 union ccb *ccb;
601                 struct cam_periph *periph;
602                 struct periph_driver **p_drv;
603                 char   *name;
604                 u_int unit;
605                 int base_periph_found;
606
607                 ccb = (union ccb *)addr;
608                 unit = ccb->cgdl.unit_number;
609                 name = ccb->cgdl.periph_name;
610                 base_periph_found = 0;
611 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
612                 if (ccb->ccb_h.func_code == XPT_SCSI_IO)
613                         ccb->csio.bio = NULL;
614 #endif
615
616                 /*
617                  * Sanity check -- make sure we don't get a null peripheral
618                  * driver name.
619                  */
620                 if (*ccb->cgdl.periph_name == '\0') {
621                         error = EINVAL;
622                         break;
623                 }
624
625                 /* Keep the list from changing while we traverse it */
626                 xpt_lock_buses();
627
628                 /* first find our driver in the list of drivers */
629                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
630                         if (strcmp((*p_drv)->driver_name, name) == 0)
631                                 break;
632
633                 if (*p_drv == NULL) {
634                         xpt_unlock_buses();
635                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
636                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
637                         *ccb->cgdl.periph_name = '\0';
638                         ccb->cgdl.unit_number = 0;
639                         error = ENOENT;
640                         break;
641                 }
642
643                 /*
644                  * Run through every peripheral instance of this driver
645                  * and check to see whether it matches the unit passed
646                  * in by the user.  If it does, get out of the loops and
647                  * find the passthrough driver associated with that
648                  * peripheral driver.
649                  */
650                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
651                      periph = TAILQ_NEXT(periph, unit_links)) {
652                         if (periph->unit_number == unit)
653                                 break;
654                 }
655                 /*
656                  * If we found the peripheral driver that the user passed
657                  * in, go through all of the peripheral drivers for that
658                  * particular device and look for a passthrough driver.
659                  */
660                 if (periph != NULL) {
661                         struct cam_ed *device;
662                         int i;
663
664                         base_periph_found = 1;
665                         device = periph->path->device;
666                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
667                              periph != NULL;
668                              periph = SLIST_NEXT(periph, periph_links), i++) {
669                                 /*
670                                  * Check to see whether we have a
671                                  * passthrough device or not.
672                                  */
673                                 if (strcmp(periph->periph_name, "pass") == 0) {
674                                         /*
675                                          * Fill in the getdevlist fields.
676                                          */
677                                         strlcpy(ccb->cgdl.periph_name,
678                                                periph->periph_name,
679                                                sizeof(ccb->cgdl.periph_name));
680                                         ccb->cgdl.unit_number =
681                                                 periph->unit_number;
682                                         if (SLIST_NEXT(periph, periph_links))
683                                                 ccb->cgdl.status =
684                                                         CAM_GDEVLIST_MORE_DEVS;
685                                         else
686                                                 ccb->cgdl.status =
687                                                        CAM_GDEVLIST_LAST_DEVICE;
688                                         ccb->cgdl.generation =
689                                                 device->generation;
690                                         ccb->cgdl.index = i;
691                                         /*
692                                          * Fill in some CCB header fields
693                                          * that the user may want.
694                                          */
695                                         ccb->ccb_h.path_id =
696                                                 periph->path->bus->path_id;
697                                         ccb->ccb_h.target_id =
698                                                 periph->path->target->target_id;
699                                         ccb->ccb_h.target_lun =
700                                                 periph->path->device->lun_id;
701                                         ccb->ccb_h.status = CAM_REQ_CMP;
702                                         break;
703                                 }
704                         }
705                 }
706
707                 /*
708                  * If the periph is null here, one of two things has
709                  * happened.  The first possibility is that we couldn't
710                  * find the unit number of the particular peripheral driver
711                  * that the user is asking about.  e.g. the user asks for
712                  * the passthrough driver for "da11".  We find the list of
713                  * "da" peripherals all right, but there is no unit 11.
714                  * The other possibility is that we went through the list
715                  * of peripheral drivers attached to the device structure,
716                  * but didn't find one with the name "pass".  Either way,
717                  * we return ENOENT, since we couldn't find something.
718                  */
719                 if (periph == NULL) {
720                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
721                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
722                         *ccb->cgdl.periph_name = '\0';
723                         ccb->cgdl.unit_number = 0;
724                         error = ENOENT;
725                         /*
726                          * It is unfortunate that this is even necessary,
727                          * but there are many, many clueless users out there.
728                          * If this is true, the user is looking for the
729                          * passthrough driver, but doesn't have one in his
730                          * kernel.
731                          */
732                         if (base_periph_found == 1) {
733                                 printf("xptioctl: pass driver is not in the "
734                                        "kernel\n");
735                                 printf("xptioctl: put \"device pass\" in "
736                                        "your kernel config file\n");
737                         }
738                 }
739                 xpt_unlock_buses();
740                 break;
741                 }
742         default:
743                 error = ENOTTY;
744                 break;
745         }
746
747         return(error);
748 }
749
750 static int
751 cam_module_event_handler(module_t mod, int what, void *arg)
752 {
753         int error;
754
755         switch (what) {
756         case MOD_LOAD:
757                 if ((error = xpt_init(NULL)) != 0)
758                         return (error);
759                 break;
760         case MOD_UNLOAD:
761                 return EBUSY;
762         default:
763                 return EOPNOTSUPP;
764         }
765
766         return 0;
767 }
768
769 static struct xpt_proto *
770 xpt_proto_find(cam_proto proto)
771 {
772         struct xpt_proto **pp;
773
774         SET_FOREACH(pp, cam_xpt_proto_set) {
775                 if ((*pp)->proto == proto)
776                         return *pp;
777         }
778
779         return NULL;
780 }
781
782 static void
783 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
784 {
785
786         if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
787                 xpt_free_path(done_ccb->ccb_h.path);
788                 xpt_free_ccb(done_ccb);
789         } else {
790                 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
791                 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
792         }
793         xpt_release_boot();
794 }
795
796 /* thread to handle bus rescans */
797 static void
798 xpt_scanner_thread(void *dummy)
799 {
800         union ccb       *ccb;
801         struct mtx      *mtx;
802         struct cam_ed   *device;
803
804         xpt_lock_buses();
805         for (;;) {
806                 if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
807                         msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
808                                "-", 0);
809                 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
810                         TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
811                         xpt_unlock_buses();
812
813                         /*
814                          * We need to lock the device's mutex which we use as
815                          * the path mutex. We can't do it directly because the
816                          * cam_path in the ccb may wind up going away because
817                          * the path lock may be dropped and the path retired in
818                          * the completion callback. We do this directly to keep
819                          * the reference counts in cam_path sane. We also have
820                          * to copy the device pointer because ccb_h.path may
821                          * be freed in the callback.
822                          */
823                         mtx = xpt_path_mtx(ccb->ccb_h.path);
824                         device = ccb->ccb_h.path->device;
825                         xpt_acquire_device(device);
826                         mtx_lock(mtx);
827                         xpt_action(ccb);
828                         mtx_unlock(mtx);
829                         xpt_release_device(device);
830
831                         xpt_lock_buses();
832                 }
833         }
834 }
835
836 void
837 xpt_rescan(union ccb *ccb)
838 {
839         struct ccb_hdr *hdr;
840
841         /* Prepare request */
842         if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
843             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
844                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
845         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
846             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
847                 ccb->ccb_h.func_code = XPT_SCAN_TGT;
848         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
849             ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
850                 ccb->ccb_h.func_code = XPT_SCAN_LUN;
851         else {
852                 xpt_print(ccb->ccb_h.path, "illegal scan path\n");
853                 xpt_free_path(ccb->ccb_h.path);
854                 xpt_free_ccb(ccb);
855                 return;
856         }
857         CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
858             ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code,
859                 xpt_action_name(ccb->ccb_h.func_code)));
860
861         ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
862         ccb->ccb_h.cbfcnp = xpt_rescan_done;
863         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
864         /* Don't make duplicate entries for the same paths. */
865         xpt_lock_buses();
866         if (ccb->ccb_h.ppriv_ptr1 == NULL) {
867                 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
868                         if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
869                                 wakeup(&xsoftc.ccb_scanq);
870                                 xpt_unlock_buses();
871                                 xpt_print(ccb->ccb_h.path, "rescan already queued\n");
872                                 xpt_free_path(ccb->ccb_h.path);
873                                 xpt_free_ccb(ccb);
874                                 return;
875                         }
876                 }
877         }
878         TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
879         xpt_hold_boot_locked();
880         wakeup(&xsoftc.ccb_scanq);
881         xpt_unlock_buses();
882 }
883
884 /* Functions accessed by the peripheral drivers */
885 static int
886 xpt_init(void *dummy)
887 {
888         struct cam_sim *xpt_sim;
889         struct cam_path *path;
890         struct cam_devq *devq;
891         cam_status status;
892         int error, i;
893
894         TAILQ_INIT(&xsoftc.xpt_busses);
895         TAILQ_INIT(&xsoftc.ccb_scanq);
896         STAILQ_INIT(&xsoftc.highpowerq);
897         xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
898
899         mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF);
900         xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK,
901             taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq);
902
903 #ifdef CAM_BOOT_DELAY
904         /*
905          * Override this value at compile time to assist our users
906          * who don't use loader to boot a kernel.
907          */
908         xsoftc.boot_delay = CAM_BOOT_DELAY;
909 #endif
910
911         /*
912          * The xpt layer is, itself, the equivalent of a SIM.
913          * Allow 16 ccbs in the ccb pool for it.  This should
914          * give decent parallelism when we probe buses and
915          * perform other XPT functions.
916          */
917         devq = cam_simq_alloc(16);
918         xpt_sim = cam_sim_alloc(xptaction,
919                                 xptpoll,
920                                 "xpt",
921                                 /*softc*/NULL,
922                                 /*unit*/0,
923                                 /*mtx*/NULL,
924                                 /*max_dev_transactions*/0,
925                                 /*max_tagged_dev_transactions*/0,
926                                 devq);
927         if (xpt_sim == NULL)
928                 return (ENOMEM);
929
930         if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
931                 printf("xpt_init: xpt_bus_register failed with status %#x,"
932                        " failing attach\n", status);
933                 return (EINVAL);
934         }
935
936         /*
937          * Looking at the XPT from the SIM layer, the XPT is
938          * the equivalent of a peripheral driver.  Allocate
939          * a peripheral driver entry for us.
940          */
941         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
942                                       CAM_TARGET_WILDCARD,
943                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
944                 printf("xpt_init: xpt_create_path failed with status %#x,"
945                        " failing attach\n", status);
946                 return (EINVAL);
947         }
948         xpt_path_lock(path);
949         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
950                          path, NULL, 0, xpt_sim);
951         xpt_path_unlock(path);
952         xpt_free_path(path);
953
954         if (cam_num_doneqs < 1)
955                 cam_num_doneqs = 1 + mp_ncpus / 6;
956         else if (cam_num_doneqs > MAXCPU)
957                 cam_num_doneqs = MAXCPU;
958         for (i = 0; i < cam_num_doneqs; i++) {
959                 mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL,
960                     MTX_DEF);
961                 STAILQ_INIT(&cam_doneqs[i].cam_doneq);
962                 error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i],
963                     &cam_proc, NULL, 0, 0, "cam", "doneq%d", i);
964                 if (error != 0) {
965                         cam_num_doneqs = i;
966                         break;
967                 }
968         }
969         if (cam_num_doneqs < 1) {
970                 printf("xpt_init: Cannot init completion queues "
971                        "- failing attach\n");
972                 return (ENOMEM);
973         }
974
975         /*
976          * Register a callback for when interrupts are enabled.
977          */
978         config_intrhook_oneshot(xpt_config, NULL);
979
980         return (0);
981 }
982
983 static cam_status
984 xptregister(struct cam_periph *periph, void *arg)
985 {
986         struct cam_sim *xpt_sim;
987
988         if (periph == NULL) {
989                 printf("xptregister: periph was NULL!!\n");
990                 return(CAM_REQ_CMP_ERR);
991         }
992
993         xpt_sim = (struct cam_sim *)arg;
994         xpt_sim->softc = periph;
995         xpt_periph = periph;
996         periph->softc = NULL;
997
998         return(CAM_REQ_CMP);
999 }
1000
1001 int32_t
1002 xpt_add_periph(struct cam_periph *periph)
1003 {
1004         struct cam_ed *device;
1005         int32_t  status;
1006
1007         TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph);
1008         device = periph->path->device;
1009         status = CAM_REQ_CMP;
1010         if (device != NULL) {
1011                 mtx_lock(&device->target->bus->eb_mtx);
1012                 device->generation++;
1013                 SLIST_INSERT_HEAD(&device->periphs, periph, periph_links);
1014                 mtx_unlock(&device->target->bus->eb_mtx);
1015                 atomic_add_32(&xsoftc.xpt_generation, 1);
1016         }
1017
1018         return (status);
1019 }
1020
1021 void
1022 xpt_remove_periph(struct cam_periph *periph)
1023 {
1024         struct cam_ed *device;
1025
1026         device = periph->path->device;
1027         if (device != NULL) {
1028                 mtx_lock(&device->target->bus->eb_mtx);
1029                 device->generation++;
1030                 SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links);
1031                 mtx_unlock(&device->target->bus->eb_mtx);
1032                 atomic_add_32(&xsoftc.xpt_generation, 1);
1033         }
1034 }
1035
1036 void
1037 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1038 {
1039         struct  cam_path *path = periph->path;
1040         struct  xpt_proto *proto;
1041
1042         cam_periph_assert(periph, MA_OWNED);
1043         periph->flags |= CAM_PERIPH_ANNOUNCED;
1044
1045         printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1046                periph->periph_name, periph->unit_number,
1047                path->bus->sim->sim_name,
1048                path->bus->sim->unit_number,
1049                path->bus->sim->bus_id,
1050                path->bus->path_id,
1051                path->target->target_id,
1052                (uintmax_t)path->device->lun_id);
1053         printf("%s%d: ", periph->periph_name, periph->unit_number);
1054         proto = xpt_proto_find(path->device->protocol);
1055         if (proto)
1056                 proto->ops->announce(path->device);
1057         else
1058                 printf("%s%d: Unknown protocol device %d\n",
1059                     periph->periph_name, periph->unit_number,
1060                     path->device->protocol);
1061         if (path->device->serial_num_len > 0) {
1062                 /* Don't wrap the screen  - print only the first 60 chars */
1063                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1064                        periph->unit_number, path->device->serial_num);
1065         }
1066         /* Announce transport details. */
1067         path->bus->xport->ops->announce(periph);
1068         /* Announce command queueing. */
1069         if (path->device->inq_flags & SID_CmdQue
1070          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1071                 printf("%s%d: Command Queueing enabled\n",
1072                        periph->periph_name, periph->unit_number);
1073         }
1074         /* Announce caller's details if they've passed in. */
1075         if (announce_string != NULL)
1076                 printf("%s%d: %s\n", periph->periph_name,
1077                        periph->unit_number, announce_string);
1078 }
1079
1080 void
1081 xpt_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb,
1082     char *announce_string)
1083 {
1084         struct  cam_path *path = periph->path;
1085         struct  xpt_proto *proto;
1086
1087         cam_periph_assert(periph, MA_OWNED);
1088         periph->flags |= CAM_PERIPH_ANNOUNCED;
1089
1090         /* Fall back to the non-sbuf method if necessary */
1091         if (xsoftc.announce_nosbuf != 0) {
1092                 xpt_announce_periph(periph, announce_string);
1093                 return;
1094         }
1095         proto = xpt_proto_find(path->device->protocol);
1096         if (((proto != NULL) && (proto->ops->announce_sbuf == NULL)) ||
1097             (path->bus->xport->ops->announce_sbuf == NULL)) {
1098                 xpt_announce_periph(periph, announce_string);
1099                 return;
1100         }
1101
1102         sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1103             periph->periph_name, periph->unit_number,
1104             path->bus->sim->sim_name,
1105             path->bus->sim->unit_number,
1106             path->bus->sim->bus_id,
1107             path->bus->path_id,
1108             path->target->target_id,
1109             (uintmax_t)path->device->lun_id);
1110         sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number);
1111
1112         if (proto)
1113                 proto->ops->announce_sbuf(path->device, sb);
1114         else
1115                 sbuf_printf(sb, "%s%d: Unknown protocol device %d\n",
1116                     periph->periph_name, periph->unit_number,
1117                     path->device->protocol);
1118         if (path->device->serial_num_len > 0) {
1119                 /* Don't wrap the screen  - print only the first 60 chars */
1120                 sbuf_printf(sb, "%s%d: Serial Number %.60s\n",
1121                     periph->periph_name, periph->unit_number,
1122                     path->device->serial_num);
1123         }
1124         /* Announce transport details. */
1125         path->bus->xport->ops->announce_sbuf(periph, sb);
1126         /* Announce command queueing. */
1127         if (path->device->inq_flags & SID_CmdQue
1128          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1129                 sbuf_printf(sb, "%s%d: Command Queueing enabled\n",
1130                     periph->periph_name, periph->unit_number);
1131         }
1132         /* Announce caller's details if they've passed in. */
1133         if (announce_string != NULL)
1134                 sbuf_printf(sb, "%s%d: %s\n", periph->periph_name,
1135                     periph->unit_number, announce_string);
1136 }
1137
1138 void
1139 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string)
1140 {
1141         if (quirks != 0) {
1142                 printf("%s%d: quirks=0x%b\n", periph->periph_name,
1143                     periph->unit_number, quirks, bit_string);
1144         }
1145 }
1146
1147 void
1148 xpt_announce_quirks_sbuf(struct cam_periph *periph, struct sbuf *sb,
1149                          int quirks, char *bit_string)
1150 {
1151         if (xsoftc.announce_nosbuf != 0) {
1152                 xpt_announce_quirks(periph, quirks, bit_string);
1153                 return;
1154         }
1155
1156         if (quirks != 0) {
1157                 sbuf_printf(sb, "%s%d: quirks=0x%b\n", periph->periph_name,
1158                     periph->unit_number, quirks, bit_string);
1159         }
1160 }
1161
1162 void
1163 xpt_denounce_periph(struct cam_periph *periph)
1164 {
1165         struct  cam_path *path = periph->path;
1166         struct  xpt_proto *proto;
1167
1168         cam_periph_assert(periph, MA_OWNED);
1169         printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1170                periph->periph_name, periph->unit_number,
1171                path->bus->sim->sim_name,
1172                path->bus->sim->unit_number,
1173                path->bus->sim->bus_id,
1174                path->bus->path_id,
1175                path->target->target_id,
1176                (uintmax_t)path->device->lun_id);
1177         printf("%s%d: ", periph->periph_name, periph->unit_number);
1178         proto = xpt_proto_find(path->device->protocol);
1179         if (proto)
1180                 proto->ops->denounce(path->device);
1181         else
1182                 printf("%s%d: Unknown protocol device %d\n",
1183                     periph->periph_name, periph->unit_number,
1184                     path->device->protocol);
1185         if (path->device->serial_num_len > 0)
1186                 printf(" s/n %.60s", path->device->serial_num);
1187         printf(" detached\n");
1188 }
1189
1190 void
1191 xpt_denounce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb)
1192 {
1193         struct cam_path *path = periph->path;
1194         struct xpt_proto *proto;
1195
1196         cam_periph_assert(periph, MA_OWNED);
1197
1198         /* Fall back to the non-sbuf method if necessary */
1199         if (xsoftc.announce_nosbuf != 0) {
1200                 xpt_denounce_periph(periph);
1201                 return;
1202         }
1203         proto = xpt_proto_find(path->device->protocol);
1204         if ((proto != NULL) && (proto->ops->denounce_sbuf == NULL)) {
1205                 xpt_denounce_periph(periph);
1206                 return;
1207         }
1208
1209         sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1210             periph->periph_name, periph->unit_number,
1211             path->bus->sim->sim_name,
1212             path->bus->sim->unit_number,
1213             path->bus->sim->bus_id,
1214             path->bus->path_id,
1215             path->target->target_id,
1216             (uintmax_t)path->device->lun_id);
1217         sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number);
1218
1219         if (proto)
1220                 proto->ops->denounce_sbuf(path->device, sb);
1221         else
1222                 sbuf_printf(sb, "%s%d: Unknown protocol device %d\n",
1223                     periph->periph_name, periph->unit_number,
1224                     path->device->protocol);
1225         if (path->device->serial_num_len > 0)
1226                 sbuf_printf(sb, " s/n %.60s", path->device->serial_num);
1227         sbuf_printf(sb, " detached\n");
1228 }
1229
1230 int
1231 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
1232 {
1233         int ret = -1, l, o;
1234         struct ccb_dev_advinfo cdai;
1235         struct scsi_vpd_device_id *did;
1236         struct scsi_vpd_id_descriptor *idd;
1237
1238         xpt_path_assert(path, MA_OWNED);
1239
1240         memset(&cdai, 0, sizeof(cdai));
1241         xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
1242         cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
1243         cdai.flags = CDAI_FLAG_NONE;
1244         cdai.bufsiz = len;
1245         cdai.buf = buf;
1246
1247         if (!strcmp(attr, "GEOM::ident"))
1248                 cdai.buftype = CDAI_TYPE_SERIAL_NUM;
1249         else if (!strcmp(attr, "GEOM::physpath"))
1250                 cdai.buftype = CDAI_TYPE_PHYS_PATH;
1251         else if (strcmp(attr, "GEOM::lunid") == 0 ||
1252                  strcmp(attr, "GEOM::lunname") == 0) {
1253                 cdai.buftype = CDAI_TYPE_SCSI_DEVID;
1254                 cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
1255                 cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT);
1256                 if (cdai.buf == NULL) {
1257                         ret = ENOMEM;
1258                         goto out;
1259                 }
1260         } else
1261                 goto out;
1262
1263         xpt_action((union ccb *)&cdai); /* can only be synchronous */
1264         if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
1265                 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
1266         if (cdai.provsiz == 0)
1267                 goto out;
1268         switch(cdai.buftype) {
1269         case CDAI_TYPE_SCSI_DEVID:
1270                 did = (struct scsi_vpd_device_id *)cdai.buf;
1271                 if (strcmp(attr, "GEOM::lunid") == 0) {
1272                         idd = scsi_get_devid(did, cdai.provsiz,
1273                             scsi_devid_is_lun_naa);
1274                         if (idd == NULL)
1275                                 idd = scsi_get_devid(did, cdai.provsiz,
1276                                     scsi_devid_is_lun_eui64);
1277                         if (idd == NULL)
1278                                 idd = scsi_get_devid(did, cdai.provsiz,
1279                                     scsi_devid_is_lun_uuid);
1280                         if (idd == NULL)
1281                                 idd = scsi_get_devid(did, cdai.provsiz,
1282                                     scsi_devid_is_lun_md5);
1283                 } else
1284                         idd = NULL;
1285
1286                 if (idd == NULL)
1287                         idd = scsi_get_devid(did, cdai.provsiz,
1288                             scsi_devid_is_lun_t10);
1289                 if (idd == NULL)
1290                         idd = scsi_get_devid(did, cdai.provsiz,
1291                             scsi_devid_is_lun_name);
1292                 if (idd == NULL)
1293                         break;
1294
1295                 ret = 0;
1296                 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) ==
1297                     SVPD_ID_CODESET_ASCII) {
1298                         if (idd->length < len) {
1299                                 for (l = 0; l < idd->length; l++)
1300                                         buf[l] = idd->identifier[l] ?
1301                                             idd->identifier[l] : ' ';
1302                                 buf[l] = 0;
1303                         } else
1304                                 ret = EFAULT;
1305                         break;
1306                 }
1307                 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) ==
1308                     SVPD_ID_CODESET_UTF8) {
1309                         l = strnlen(idd->identifier, idd->length);
1310                         if (l < len) {
1311                                 bcopy(idd->identifier, buf, l);
1312                                 buf[l] = 0;
1313                         } else
1314                                 ret = EFAULT;
1315                         break;
1316                 }
1317                 if ((idd->id_type & SVPD_ID_TYPE_MASK) ==
1318                     SVPD_ID_TYPE_UUID && idd->identifier[0] == 0x10) {
1319                         if ((idd->length - 2) * 2 + 4 >= len) {
1320                                 ret = EFAULT;
1321                                 break;
1322                         }
1323                         for (l = 2, o = 0; l < idd->length; l++) {
1324                                 if (l == 6 || l == 8 || l == 10 || l == 12)
1325                                     o += sprintf(buf + o, "-");
1326                                 o += sprintf(buf + o, "%02x",
1327                                     idd->identifier[l]);
1328                         }
1329                         break;
1330                 }
1331                 if (idd->length * 2 < len) {
1332                         for (l = 0; l < idd->length; l++)
1333                                 sprintf(buf + l * 2, "%02x",
1334                                     idd->identifier[l]);
1335                 } else
1336                                 ret = EFAULT;
1337                 break;
1338         default:
1339                 if (cdai.provsiz < len) {
1340                         cdai.buf[cdai.provsiz] = 0;
1341                         ret = 0;
1342                 } else
1343                         ret = EFAULT;
1344                 break;
1345         }
1346
1347 out:
1348         if ((char *)cdai.buf != buf)
1349                 free(cdai.buf, M_CAMXPT);
1350         return ret;
1351 }
1352
1353 static dev_match_ret
1354 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1355             struct cam_eb *bus)
1356 {
1357         dev_match_ret retval;
1358         u_int i;
1359
1360         retval = DM_RET_NONE;
1361
1362         /*
1363          * If we aren't given something to match against, that's an error.
1364          */
1365         if (bus == NULL)
1366                 return(DM_RET_ERROR);
1367
1368         /*
1369          * If there are no match entries, then this bus matches no
1370          * matter what.
1371          */
1372         if ((patterns == NULL) || (num_patterns == 0))
1373                 return(DM_RET_DESCEND | DM_RET_COPY);
1374
1375         for (i = 0; i < num_patterns; i++) {
1376                 struct bus_match_pattern *cur_pattern;
1377
1378                 /*
1379                  * If the pattern in question isn't for a bus node, we
1380                  * aren't interested.  However, we do indicate to the
1381                  * calling routine that we should continue descending the
1382                  * tree, since the user wants to match against lower-level
1383                  * EDT elements.
1384                  */
1385                 if (patterns[i].type != DEV_MATCH_BUS) {
1386                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1387                                 retval |= DM_RET_DESCEND;
1388                         continue;
1389                 }
1390
1391                 cur_pattern = &patterns[i].pattern.bus_pattern;
1392
1393                 /*
1394                  * If they want to match any bus node, we give them any
1395                  * device node.
1396                  */
1397                 if (cur_pattern->flags == BUS_MATCH_ANY) {
1398                         /* set the copy flag */
1399                         retval |= DM_RET_COPY;
1400
1401                         /*
1402                          * If we've already decided on an action, go ahead
1403                          * and return.
1404                          */
1405                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1406                                 return(retval);
1407                 }
1408
1409                 /*
1410                  * Not sure why someone would do this...
1411                  */
1412                 if (cur_pattern->flags == BUS_MATCH_NONE)
1413                         continue;
1414
1415                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1416                  && (cur_pattern->path_id != bus->path_id))
1417                         continue;
1418
1419                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1420                  && (cur_pattern->bus_id != bus->sim->bus_id))
1421                         continue;
1422
1423                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1424                  && (cur_pattern->unit_number != bus->sim->unit_number))
1425                         continue;
1426
1427                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1428                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1429                              DEV_IDLEN) != 0))
1430                         continue;
1431
1432                 /*
1433                  * If we get to this point, the user definitely wants
1434                  * information on this bus.  So tell the caller to copy the
1435                  * data out.
1436                  */
1437                 retval |= DM_RET_COPY;
1438
1439                 /*
1440                  * If the return action has been set to descend, then we
1441                  * know that we've already seen a non-bus matching
1442                  * expression, therefore we need to further descend the tree.
1443                  * This won't change by continuing around the loop, so we
1444                  * go ahead and return.  If we haven't seen a non-bus
1445                  * matching expression, we keep going around the loop until
1446                  * we exhaust the matching expressions.  We'll set the stop
1447                  * flag once we fall out of the loop.
1448                  */
1449                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1450                         return(retval);
1451         }
1452
1453         /*
1454          * If the return action hasn't been set to descend yet, that means
1455          * we haven't seen anything other than bus matching patterns.  So
1456          * tell the caller to stop descending the tree -- the user doesn't
1457          * want to match against lower level tree elements.
1458          */
1459         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1460                 retval |= DM_RET_STOP;
1461
1462         return(retval);
1463 }
1464
1465 static dev_match_ret
1466 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1467                struct cam_ed *device)
1468 {
1469         dev_match_ret retval;
1470         u_int i;
1471
1472         retval = DM_RET_NONE;
1473
1474         /*
1475          * If we aren't given something to match against, that's an error.
1476          */
1477         if (device == NULL)
1478                 return(DM_RET_ERROR);
1479
1480         /*
1481          * If there are no match entries, then this device matches no
1482          * matter what.
1483          */
1484         if ((patterns == NULL) || (num_patterns == 0))
1485                 return(DM_RET_DESCEND | DM_RET_COPY);
1486
1487         for (i = 0; i < num_patterns; i++) {
1488                 struct device_match_pattern *cur_pattern;
1489                 struct scsi_vpd_device_id *device_id_page;
1490
1491                 /*
1492                  * If the pattern in question isn't for a device node, we
1493                  * aren't interested.
1494                  */
1495                 if (patterns[i].type != DEV_MATCH_DEVICE) {
1496                         if ((patterns[i].type == DEV_MATCH_PERIPH)
1497                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1498                                 retval |= DM_RET_DESCEND;
1499                         continue;
1500                 }
1501
1502                 cur_pattern = &patterns[i].pattern.device_pattern;
1503
1504                 /* Error out if mutually exclusive options are specified. */
1505                 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1506                  == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1507                         return(DM_RET_ERROR);
1508
1509                 /*
1510                  * If they want to match any device node, we give them any
1511                  * device node.
1512                  */
1513                 if (cur_pattern->flags == DEV_MATCH_ANY)
1514                         goto copy_dev_node;
1515
1516                 /*
1517                  * Not sure why someone would do this...
1518                  */
1519                 if (cur_pattern->flags == DEV_MATCH_NONE)
1520                         continue;
1521
1522                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1523                  && (cur_pattern->path_id != device->target->bus->path_id))
1524                         continue;
1525
1526                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1527                  && (cur_pattern->target_id != device->target->target_id))
1528                         continue;
1529
1530                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1531                  && (cur_pattern->target_lun != device->lun_id))
1532                         continue;
1533
1534                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1535                  && (cam_quirkmatch((caddr_t)&device->inq_data,
1536                                     (caddr_t)&cur_pattern->data.inq_pat,
1537                                     1, sizeof(cur_pattern->data.inq_pat),
1538                                     scsi_static_inquiry_match) == NULL))
1539                         continue;
1540
1541                 device_id_page = (struct scsi_vpd_device_id *)device->device_id;
1542                 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
1543                  && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
1544                   || scsi_devid_match((uint8_t *)device_id_page->desc_list,
1545                                       device->device_id_len
1546                                     - SVPD_DEVICE_ID_HDR_LEN,
1547                                       cur_pattern->data.devid_pat.id,
1548                                       cur_pattern->data.devid_pat.id_len) != 0))
1549                         continue;
1550
1551 copy_dev_node:
1552                 /*
1553                  * If we get to this point, the user definitely wants
1554                  * information on this device.  So tell the caller to copy
1555                  * the data out.
1556                  */
1557                 retval |= DM_RET_COPY;
1558
1559                 /*
1560                  * If the return action has been set to descend, then we
1561                  * know that we've already seen a peripheral matching
1562                  * expression, therefore we need to further descend the tree.
1563                  * This won't change by continuing around the loop, so we
1564                  * go ahead and return.  If we haven't seen a peripheral
1565                  * matching expression, we keep going around the loop until
1566                  * we exhaust the matching expressions.  We'll set the stop
1567                  * flag once we fall out of the loop.
1568                  */
1569                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1570                         return(retval);
1571         }
1572
1573         /*
1574          * If the return action hasn't been set to descend yet, that means
1575          * we haven't seen any peripheral matching patterns.  So tell the
1576          * caller to stop descending the tree -- the user doesn't want to
1577          * match against lower level tree elements.
1578          */
1579         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1580                 retval |= DM_RET_STOP;
1581
1582         return(retval);
1583 }
1584
1585 /*
1586  * Match a single peripheral against any number of match patterns.
1587  */
1588 static dev_match_ret
1589 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1590                struct cam_periph *periph)
1591 {
1592         dev_match_ret retval;
1593         u_int i;
1594
1595         /*
1596          * If we aren't given something to match against, that's an error.
1597          */
1598         if (periph == NULL)
1599                 return(DM_RET_ERROR);
1600
1601         /*
1602          * If there are no match entries, then this peripheral matches no
1603          * matter what.
1604          */
1605         if ((patterns == NULL) || (num_patterns == 0))
1606                 return(DM_RET_STOP | DM_RET_COPY);
1607
1608         /*
1609          * There aren't any nodes below a peripheral node, so there's no
1610          * reason to descend the tree any further.
1611          */
1612         retval = DM_RET_STOP;
1613
1614         for (i = 0; i < num_patterns; i++) {
1615                 struct periph_match_pattern *cur_pattern;
1616
1617                 /*
1618                  * If the pattern in question isn't for a peripheral, we
1619                  * aren't interested.
1620                  */
1621                 if (patterns[i].type != DEV_MATCH_PERIPH)
1622                         continue;
1623
1624                 cur_pattern = &patterns[i].pattern.periph_pattern;
1625
1626                 /*
1627                  * If they want to match on anything, then we will do so.
1628                  */
1629                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1630                         /* set the copy flag */
1631                         retval |= DM_RET_COPY;
1632
1633                         /*
1634                          * We've already set the return action to stop,
1635                          * since there are no nodes below peripherals in
1636                          * the tree.
1637                          */
1638                         return(retval);
1639                 }
1640
1641                 /*
1642                  * Not sure why someone would do this...
1643                  */
1644                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
1645                         continue;
1646
1647                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1648                  && (cur_pattern->path_id != periph->path->bus->path_id))
1649                         continue;
1650
1651                 /*
1652                  * For the target and lun id's, we have to make sure the
1653                  * target and lun pointers aren't NULL.  The xpt peripheral
1654                  * has a wildcard target and device.
1655                  */
1656                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1657                  && ((periph->path->target == NULL)
1658                  ||(cur_pattern->target_id != periph->path->target->target_id)))
1659                         continue;
1660
1661                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1662                  && ((periph->path->device == NULL)
1663                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
1664                         continue;
1665
1666                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1667                  && (cur_pattern->unit_number != periph->unit_number))
1668                         continue;
1669
1670                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1671                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
1672                              DEV_IDLEN) != 0))
1673                         continue;
1674
1675                 /*
1676                  * If we get to this point, the user definitely wants
1677                  * information on this peripheral.  So tell the caller to
1678                  * copy the data out.
1679                  */
1680                 retval |= DM_RET_COPY;
1681
1682                 /*
1683                  * The return action has already been set to stop, since
1684                  * peripherals don't have any nodes below them in the EDT.
1685                  */
1686                 return(retval);
1687         }
1688
1689         /*
1690          * If we get to this point, the peripheral that was passed in
1691          * doesn't match any of the patterns.
1692          */
1693         return(retval);
1694 }
1695
1696 static int
1697 xptedtbusfunc(struct cam_eb *bus, void *arg)
1698 {
1699         struct ccb_dev_match *cdm;
1700         struct cam_et *target;
1701         dev_match_ret retval;
1702
1703         cdm = (struct ccb_dev_match *)arg;
1704
1705         /*
1706          * If our position is for something deeper in the tree, that means
1707          * that we've already seen this node.  So, we keep going down.
1708          */
1709         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1710          && (cdm->pos.cookie.bus == bus)
1711          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1712          && (cdm->pos.cookie.target != NULL))
1713                 retval = DM_RET_DESCEND;
1714         else
1715                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1716
1717         /*
1718          * If we got an error, bail out of the search.
1719          */
1720         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1721                 cdm->status = CAM_DEV_MATCH_ERROR;
1722                 return(0);
1723         }
1724
1725         /*
1726          * If the copy flag is set, copy this bus out.
1727          */
1728         if (retval & DM_RET_COPY) {
1729                 int spaceleft, j;
1730
1731                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1732                         sizeof(struct dev_match_result));
1733
1734                 /*
1735                  * If we don't have enough space to put in another
1736                  * match result, save our position and tell the
1737                  * user there are more devices to check.
1738                  */
1739                 if (spaceleft < sizeof(struct dev_match_result)) {
1740                         bzero(&cdm->pos, sizeof(cdm->pos));
1741                         cdm->pos.position_type =
1742                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1743
1744                         cdm->pos.cookie.bus = bus;
1745                         cdm->pos.generations[CAM_BUS_GENERATION]=
1746                                 xsoftc.bus_generation;
1747                         cdm->status = CAM_DEV_MATCH_MORE;
1748                         return(0);
1749                 }
1750                 j = cdm->num_matches;
1751                 cdm->num_matches++;
1752                 cdm->matches[j].type = DEV_MATCH_BUS;
1753                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
1754                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1755                 cdm->matches[j].result.bus_result.unit_number =
1756                         bus->sim->unit_number;
1757                 strlcpy(cdm->matches[j].result.bus_result.dev_name,
1758                         bus->sim->sim_name,
1759                         sizeof(cdm->matches[j].result.bus_result.dev_name));
1760         }
1761
1762         /*
1763          * If the user is only interested in buses, there's no
1764          * reason to descend to the next level in the tree.
1765          */
1766         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1767                 return(1);
1768
1769         /*
1770          * If there is a target generation recorded, check it to
1771          * make sure the target list hasn't changed.
1772          */
1773         mtx_lock(&bus->eb_mtx);
1774         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1775          && (cdm->pos.cookie.bus == bus)
1776          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1777          && (cdm->pos.cookie.target != NULL)) {
1778                 if ((cdm->pos.generations[CAM_TARGET_GENERATION] !=
1779                     bus->generation)) {
1780                         mtx_unlock(&bus->eb_mtx);
1781                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1782                         return (0);
1783                 }
1784                 target = (struct cam_et *)cdm->pos.cookie.target;
1785                 target->refcount++;
1786         } else
1787                 target = NULL;
1788         mtx_unlock(&bus->eb_mtx);
1789
1790         return (xpttargettraverse(bus, target, xptedttargetfunc, arg));
1791 }
1792
1793 static int
1794 xptedttargetfunc(struct cam_et *target, void *arg)
1795 {
1796         struct ccb_dev_match *cdm;
1797         struct cam_eb *bus;
1798         struct cam_ed *device;
1799
1800         cdm = (struct ccb_dev_match *)arg;
1801         bus = target->bus;
1802
1803         /*
1804          * If there is a device list generation recorded, check it to
1805          * make sure the device list hasn't changed.
1806          */
1807         mtx_lock(&bus->eb_mtx);
1808         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1809          && (cdm->pos.cookie.bus == bus)
1810          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1811          && (cdm->pos.cookie.target == target)
1812          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1813          && (cdm->pos.cookie.device != NULL)) {
1814                 if (cdm->pos.generations[CAM_DEV_GENERATION] !=
1815                     target->generation) {
1816                         mtx_unlock(&bus->eb_mtx);
1817                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1818                         return(0);
1819                 }
1820                 device = (struct cam_ed *)cdm->pos.cookie.device;
1821                 device->refcount++;
1822         } else
1823                 device = NULL;
1824         mtx_unlock(&bus->eb_mtx);
1825
1826         return (xptdevicetraverse(target, device, xptedtdevicefunc, arg));
1827 }
1828
1829 static int
1830 xptedtdevicefunc(struct cam_ed *device, void *arg)
1831 {
1832         struct cam_eb *bus;
1833         struct cam_periph *periph;
1834         struct ccb_dev_match *cdm;
1835         dev_match_ret retval;
1836
1837         cdm = (struct ccb_dev_match *)arg;
1838         bus = device->target->bus;
1839
1840         /*
1841          * If our position is for something deeper in the tree, that means
1842          * that we've already seen this node.  So, we keep going down.
1843          */
1844         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1845          && (cdm->pos.cookie.device == device)
1846          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1847          && (cdm->pos.cookie.periph != NULL))
1848                 retval = DM_RET_DESCEND;
1849         else
1850                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
1851                                         device);
1852
1853         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1854                 cdm->status = CAM_DEV_MATCH_ERROR;
1855                 return(0);
1856         }
1857
1858         /*
1859          * If the copy flag is set, copy this device out.
1860          */
1861         if (retval & DM_RET_COPY) {
1862                 int spaceleft, j;
1863
1864                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1865                         sizeof(struct dev_match_result));
1866
1867                 /*
1868                  * If we don't have enough space to put in another
1869                  * match result, save our position and tell the
1870                  * user there are more devices to check.
1871                  */
1872                 if (spaceleft < sizeof(struct dev_match_result)) {
1873                         bzero(&cdm->pos, sizeof(cdm->pos));
1874                         cdm->pos.position_type =
1875                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1876                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
1877
1878                         cdm->pos.cookie.bus = device->target->bus;
1879                         cdm->pos.generations[CAM_BUS_GENERATION]=
1880                                 xsoftc.bus_generation;
1881                         cdm->pos.cookie.target = device->target;
1882                         cdm->pos.generations[CAM_TARGET_GENERATION] =
1883                                 device->target->bus->generation;
1884                         cdm->pos.cookie.device = device;
1885                         cdm->pos.generations[CAM_DEV_GENERATION] =
1886                                 device->target->generation;
1887                         cdm->status = CAM_DEV_MATCH_MORE;
1888                         return(0);
1889                 }
1890                 j = cdm->num_matches;
1891                 cdm->num_matches++;
1892                 cdm->matches[j].type = DEV_MATCH_DEVICE;
1893                 cdm->matches[j].result.device_result.path_id =
1894                         device->target->bus->path_id;
1895                 cdm->matches[j].result.device_result.target_id =
1896                         device->target->target_id;
1897                 cdm->matches[j].result.device_result.target_lun =
1898                         device->lun_id;
1899                 cdm->matches[j].result.device_result.protocol =
1900                         device->protocol;
1901                 bcopy(&device->inq_data,
1902                       &cdm->matches[j].result.device_result.inq_data,
1903                       sizeof(struct scsi_inquiry_data));
1904                 bcopy(&device->ident_data,
1905                       &cdm->matches[j].result.device_result.ident_data,
1906                       sizeof(struct ata_params));
1907
1908                 /* Let the user know whether this device is unconfigured */
1909                 if (device->flags & CAM_DEV_UNCONFIGURED)
1910                         cdm->matches[j].result.device_result.flags =
1911                                 DEV_RESULT_UNCONFIGURED;
1912                 else
1913                         cdm->matches[j].result.device_result.flags =
1914                                 DEV_RESULT_NOFLAG;
1915         }
1916
1917         /*
1918          * If the user isn't interested in peripherals, don't descend
1919          * the tree any further.
1920          */
1921         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1922                 return(1);
1923
1924         /*
1925          * If there is a peripheral list generation recorded, make sure
1926          * it hasn't changed.
1927          */
1928         xpt_lock_buses();
1929         mtx_lock(&bus->eb_mtx);
1930         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1931          && (cdm->pos.cookie.bus == bus)
1932          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1933          && (cdm->pos.cookie.target == device->target)
1934          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1935          && (cdm->pos.cookie.device == device)
1936          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1937          && (cdm->pos.cookie.periph != NULL)) {
1938                 if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1939                     device->generation) {
1940                         mtx_unlock(&bus->eb_mtx);
1941                         xpt_unlock_buses();
1942                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1943                         return(0);
1944                 }
1945                 periph = (struct cam_periph *)cdm->pos.cookie.periph;
1946                 periph->refcount++;
1947         } else
1948                 periph = NULL;
1949         mtx_unlock(&bus->eb_mtx);
1950         xpt_unlock_buses();
1951
1952         return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg));
1953 }
1954
1955 static int
1956 xptedtperiphfunc(struct cam_periph *periph, void *arg)
1957 {
1958         struct ccb_dev_match *cdm;
1959         dev_match_ret retval;
1960
1961         cdm = (struct ccb_dev_match *)arg;
1962
1963         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1964
1965         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1966                 cdm->status = CAM_DEV_MATCH_ERROR;
1967                 return(0);
1968         }
1969
1970         /*
1971          * If the copy flag is set, copy this peripheral out.
1972          */
1973         if (retval & DM_RET_COPY) {
1974                 int spaceleft, j;
1975                 size_t l;
1976
1977                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1978                         sizeof(struct dev_match_result));
1979
1980                 /*
1981                  * If we don't have enough space to put in another
1982                  * match result, save our position and tell the
1983                  * user there are more devices to check.
1984                  */
1985                 if (spaceleft < sizeof(struct dev_match_result)) {
1986                         bzero(&cdm->pos, sizeof(cdm->pos));
1987                         cdm->pos.position_type =
1988                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1989                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
1990                                 CAM_DEV_POS_PERIPH;
1991
1992                         cdm->pos.cookie.bus = periph->path->bus;
1993                         cdm->pos.generations[CAM_BUS_GENERATION]=
1994                                 xsoftc.bus_generation;
1995                         cdm->pos.cookie.target = periph->path->target;
1996                         cdm->pos.generations[CAM_TARGET_GENERATION] =
1997                                 periph->path->bus->generation;
1998                         cdm->pos.cookie.device = periph->path->device;
1999                         cdm->pos.generations[CAM_DEV_GENERATION] =
2000                                 periph->path->target->generation;
2001                         cdm->pos.cookie.periph = periph;
2002                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2003                                 periph->path->device->generation;
2004                         cdm->status = CAM_DEV_MATCH_MORE;
2005                         return(0);
2006                 }
2007
2008                 j = cdm->num_matches;
2009                 cdm->num_matches++;
2010                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2011                 cdm->matches[j].result.periph_result.path_id =
2012                         periph->path->bus->path_id;
2013                 cdm->matches[j].result.periph_result.target_id =
2014                         periph->path->target->target_id;
2015                 cdm->matches[j].result.periph_result.target_lun =
2016                         periph->path->device->lun_id;
2017                 cdm->matches[j].result.periph_result.unit_number =
2018                         periph->unit_number;
2019                 l = sizeof(cdm->matches[j].result.periph_result.periph_name);
2020                 strlcpy(cdm->matches[j].result.periph_result.periph_name,
2021                         periph->periph_name, l);
2022         }
2023
2024         return(1);
2025 }
2026
2027 static int
2028 xptedtmatch(struct ccb_dev_match *cdm)
2029 {
2030         struct cam_eb *bus;
2031         int ret;
2032
2033         cdm->num_matches = 0;
2034
2035         /*
2036          * Check the bus list generation.  If it has changed, the user
2037          * needs to reset everything and start over.
2038          */
2039         xpt_lock_buses();
2040         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2041          && (cdm->pos.cookie.bus != NULL)) {
2042                 if (cdm->pos.generations[CAM_BUS_GENERATION] !=
2043                     xsoftc.bus_generation) {
2044                         xpt_unlock_buses();
2045                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2046                         return(0);
2047                 }
2048                 bus = (struct cam_eb *)cdm->pos.cookie.bus;
2049                 bus->refcount++;
2050         } else
2051                 bus = NULL;
2052         xpt_unlock_buses();
2053
2054         ret = xptbustraverse(bus, xptedtbusfunc, cdm);
2055
2056         /*
2057          * If we get back 0, that means that we had to stop before fully
2058          * traversing the EDT.  It also means that one of the subroutines
2059          * has set the status field to the proper value.  If we get back 1,
2060          * we've fully traversed the EDT and copied out any matching entries.
2061          */
2062         if (ret == 1)
2063                 cdm->status = CAM_DEV_MATCH_LAST;
2064
2065         return(ret);
2066 }
2067
2068 static int
2069 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2070 {
2071         struct cam_periph *periph;
2072         struct ccb_dev_match *cdm;
2073
2074         cdm = (struct ccb_dev_match *)arg;
2075
2076         xpt_lock_buses();
2077         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2078          && (cdm->pos.cookie.pdrv == pdrv)
2079          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2080          && (cdm->pos.cookie.periph != NULL)) {
2081                 if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2082                     (*pdrv)->generation) {
2083                         xpt_unlock_buses();
2084                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2085                         return(0);
2086                 }
2087                 periph = (struct cam_periph *)cdm->pos.cookie.periph;
2088                 periph->refcount++;
2089         } else
2090                 periph = NULL;
2091         xpt_unlock_buses();
2092
2093         return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg));
2094 }
2095
2096 static int
2097 xptplistperiphfunc(struct cam_periph *periph, void *arg)
2098 {
2099         struct ccb_dev_match *cdm;
2100         dev_match_ret retval;
2101
2102         cdm = (struct ccb_dev_match *)arg;
2103
2104         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2105
2106         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2107                 cdm->status = CAM_DEV_MATCH_ERROR;
2108                 return(0);
2109         }
2110
2111         /*
2112          * If the copy flag is set, copy this peripheral out.
2113          */
2114         if (retval & DM_RET_COPY) {
2115                 int spaceleft, j;
2116                 size_t l;
2117
2118                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2119                         sizeof(struct dev_match_result));
2120
2121                 /*
2122                  * If we don't have enough space to put in another
2123                  * match result, save our position and tell the
2124                  * user there are more devices to check.
2125                  */
2126                 if (spaceleft < sizeof(struct dev_match_result)) {
2127                         struct periph_driver **pdrv;
2128
2129                         pdrv = NULL;
2130                         bzero(&cdm->pos, sizeof(cdm->pos));
2131                         cdm->pos.position_type =
2132                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2133                                 CAM_DEV_POS_PERIPH;
2134
2135                         /*
2136                          * This may look a bit non-sensical, but it is
2137                          * actually quite logical.  There are very few
2138                          * peripheral drivers, and bloating every peripheral
2139                          * structure with a pointer back to its parent
2140                          * peripheral driver linker set entry would cost
2141                          * more in the long run than doing this quick lookup.
2142                          */
2143                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2144                                 if (strcmp((*pdrv)->driver_name,
2145                                     periph->periph_name) == 0)
2146                                         break;
2147                         }
2148
2149                         if (*pdrv == NULL) {
2150                                 cdm->status = CAM_DEV_MATCH_ERROR;
2151                                 return(0);
2152                         }
2153
2154                         cdm->pos.cookie.pdrv = pdrv;
2155                         /*
2156                          * The periph generation slot does double duty, as
2157                          * does the periph pointer slot.  They are used for
2158                          * both edt and pdrv lookups and positioning.
2159                          */
2160                         cdm->pos.cookie.periph = periph;
2161                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2162                                 (*pdrv)->generation;
2163                         cdm->status = CAM_DEV_MATCH_MORE;
2164                         return(0);
2165                 }
2166
2167                 j = cdm->num_matches;
2168                 cdm->num_matches++;
2169                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2170                 cdm->matches[j].result.periph_result.path_id =
2171                         periph->path->bus->path_id;
2172
2173                 /*
2174                  * The transport layer peripheral doesn't have a target or
2175                  * lun.
2176                  */
2177                 if (periph->path->target)
2178                         cdm->matches[j].result.periph_result.target_id =
2179                                 periph->path->target->target_id;
2180                 else
2181                         cdm->matches[j].result.periph_result.target_id =
2182                                 CAM_TARGET_WILDCARD;
2183
2184                 if (periph->path->device)
2185                         cdm->matches[j].result.periph_result.target_lun =
2186                                 periph->path->device->lun_id;
2187                 else
2188                         cdm->matches[j].result.periph_result.target_lun =
2189                                 CAM_LUN_WILDCARD;
2190
2191                 cdm->matches[j].result.periph_result.unit_number =
2192                         periph->unit_number;
2193                 l = sizeof(cdm->matches[j].result.periph_result.periph_name);
2194                 strlcpy(cdm->matches[j].result.periph_result.periph_name,
2195                         periph->periph_name, l);
2196         }
2197
2198         return(1);
2199 }
2200
2201 static int
2202 xptperiphlistmatch(struct ccb_dev_match *cdm)
2203 {
2204         int ret;
2205
2206         cdm->num_matches = 0;
2207
2208         /*
2209          * At this point in the edt traversal function, we check the bus
2210          * list generation to make sure that no buses have been added or
2211          * removed since the user last sent a XPT_DEV_MATCH ccb through.
2212          * For the peripheral driver list traversal function, however, we
2213          * don't have to worry about new peripheral driver types coming or
2214          * going; they're in a linker set, and therefore can't change
2215          * without a recompile.
2216          */
2217
2218         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2219          && (cdm->pos.cookie.pdrv != NULL))
2220                 ret = xptpdrvtraverse(
2221                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
2222                                 xptplistpdrvfunc, cdm);
2223         else
2224                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2225
2226         /*
2227          * If we get back 0, that means that we had to stop before fully
2228          * traversing the peripheral driver tree.  It also means that one of
2229          * the subroutines has set the status field to the proper value.  If
2230          * we get back 1, we've fully traversed the EDT and copied out any
2231          * matching entries.
2232          */
2233         if (ret == 1)
2234                 cdm->status = CAM_DEV_MATCH_LAST;
2235
2236         return(ret);
2237 }
2238
2239 static int
2240 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2241 {
2242         struct cam_eb *bus, *next_bus;
2243         int retval;
2244
2245         retval = 1;
2246         if (start_bus)
2247                 bus = start_bus;
2248         else {
2249                 xpt_lock_buses();
2250                 bus = TAILQ_FIRST(&xsoftc.xpt_busses);
2251                 if (bus == NULL) {
2252                         xpt_unlock_buses();
2253                         return (retval);
2254                 }
2255                 bus->refcount++;
2256                 xpt_unlock_buses();
2257         }
2258         for (; bus != NULL; bus = next_bus) {
2259                 retval = tr_func(bus, arg);
2260                 if (retval == 0) {
2261                         xpt_release_bus(bus);
2262                         break;
2263                 }
2264                 xpt_lock_buses();
2265                 next_bus = TAILQ_NEXT(bus, links);
2266                 if (next_bus)
2267                         next_bus->refcount++;
2268                 xpt_unlock_buses();
2269                 xpt_release_bus(bus);
2270         }
2271         return(retval);
2272 }
2273
2274 static int
2275 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2276                   xpt_targetfunc_t *tr_func, void *arg)
2277 {
2278         struct cam_et *target, *next_target;
2279         int retval;
2280
2281         retval = 1;
2282         if (start_target)
2283                 target = start_target;
2284         else {
2285                 mtx_lock(&bus->eb_mtx);
2286                 target = TAILQ_FIRST(&bus->et_entries);
2287                 if (target == NULL) {
2288                         mtx_unlock(&bus->eb_mtx);
2289                         return (retval);
2290                 }
2291                 target->refcount++;
2292                 mtx_unlock(&bus->eb_mtx);
2293         }
2294         for (; target != NULL; target = next_target) {
2295                 retval = tr_func(target, arg);
2296                 if (retval == 0) {
2297                         xpt_release_target(target);
2298                         break;
2299                 }
2300                 mtx_lock(&bus->eb_mtx);
2301                 next_target = TAILQ_NEXT(target, links);
2302                 if (next_target)
2303                         next_target->refcount++;
2304                 mtx_unlock(&bus->eb_mtx);
2305                 xpt_release_target(target);
2306         }
2307         return(retval);
2308 }
2309
2310 static int
2311 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2312                   xpt_devicefunc_t *tr_func, void *arg)
2313 {
2314         struct cam_eb *bus;
2315         struct cam_ed *device, *next_device;
2316         int retval;
2317
2318         retval = 1;
2319         bus = target->bus;
2320         if (start_device)
2321                 device = start_device;
2322         else {
2323                 mtx_lock(&bus->eb_mtx);
2324                 device = TAILQ_FIRST(&target->ed_entries);
2325                 if (device == NULL) {
2326                         mtx_unlock(&bus->eb_mtx);
2327                         return (retval);
2328                 }
2329                 device->refcount++;
2330                 mtx_unlock(&bus->eb_mtx);
2331         }
2332         for (; device != NULL; device = next_device) {
2333                 mtx_lock(&device->device_mtx);
2334                 retval = tr_func(device, arg);
2335                 mtx_unlock(&device->device_mtx);
2336                 if (retval == 0) {
2337                         xpt_release_device(device);
2338                         break;
2339                 }
2340                 mtx_lock(&bus->eb_mtx);
2341                 next_device = TAILQ_NEXT(device, links);
2342                 if (next_device)
2343                         next_device->refcount++;
2344                 mtx_unlock(&bus->eb_mtx);
2345                 xpt_release_device(device);
2346         }
2347         return(retval);
2348 }
2349
2350 static int
2351 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2352                   xpt_periphfunc_t *tr_func, void *arg)
2353 {
2354         struct cam_eb *bus;
2355         struct cam_periph *periph, *next_periph;
2356         int retval;
2357
2358         retval = 1;
2359
2360         bus = device->target->bus;
2361         if (start_periph)
2362                 periph = start_periph;
2363         else {
2364                 xpt_lock_buses();
2365                 mtx_lock(&bus->eb_mtx);
2366                 periph = SLIST_FIRST(&device->periphs);
2367                 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2368                         periph = SLIST_NEXT(periph, periph_links);
2369                 if (periph == NULL) {
2370                         mtx_unlock(&bus->eb_mtx);
2371                         xpt_unlock_buses();
2372                         return (retval);
2373                 }
2374                 periph->refcount++;
2375                 mtx_unlock(&bus->eb_mtx);
2376                 xpt_unlock_buses();
2377         }
2378         for (; periph != NULL; periph = next_periph) {
2379                 retval = tr_func(periph, arg);
2380                 if (retval == 0) {
2381                         cam_periph_release_locked(periph);
2382                         break;
2383                 }
2384                 xpt_lock_buses();
2385                 mtx_lock(&bus->eb_mtx);
2386                 next_periph = SLIST_NEXT(periph, periph_links);
2387                 while (next_periph != NULL &&
2388                     (next_periph->flags & CAM_PERIPH_FREE) != 0)
2389                         next_periph = SLIST_NEXT(next_periph, periph_links);
2390                 if (next_periph)
2391                         next_periph->refcount++;
2392                 mtx_unlock(&bus->eb_mtx);
2393                 xpt_unlock_buses();
2394                 cam_periph_release_locked(periph);
2395         }
2396         return(retval);
2397 }
2398
2399 static int
2400 xptpdrvtraverse(struct periph_driver **start_pdrv,
2401                 xpt_pdrvfunc_t *tr_func, void *arg)
2402 {
2403         struct periph_driver **pdrv;
2404         int retval;
2405
2406         retval = 1;
2407
2408         /*
2409          * We don't traverse the peripheral driver list like we do the
2410          * other lists, because it is a linker set, and therefore cannot be
2411          * changed during runtime.  If the peripheral driver list is ever
2412          * re-done to be something other than a linker set (i.e. it can
2413          * change while the system is running), the list traversal should
2414          * be modified to work like the other traversal functions.
2415          */
2416         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2417              *pdrv != NULL; pdrv++) {
2418                 retval = tr_func(pdrv, arg);
2419
2420                 if (retval == 0)
2421                         return(retval);
2422         }
2423
2424         return(retval);
2425 }
2426
2427 static int
2428 xptpdperiphtraverse(struct periph_driver **pdrv,
2429                     struct cam_periph *start_periph,
2430                     xpt_periphfunc_t *tr_func, void *arg)
2431 {
2432         struct cam_periph *periph, *next_periph;
2433         int retval;
2434
2435         retval = 1;
2436
2437         if (start_periph)
2438                 periph = start_periph;
2439         else {
2440                 xpt_lock_buses();
2441                 periph = TAILQ_FIRST(&(*pdrv)->units);
2442                 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2443                         periph = TAILQ_NEXT(periph, unit_links);
2444                 if (periph == NULL) {
2445                         xpt_unlock_buses();
2446                         return (retval);
2447                 }
2448                 periph->refcount++;
2449                 xpt_unlock_buses();
2450         }
2451         for (; periph != NULL; periph = next_periph) {
2452                 cam_periph_lock(periph);
2453                 retval = tr_func(periph, arg);
2454                 cam_periph_unlock(periph);
2455                 if (retval == 0) {
2456                         cam_periph_release(periph);
2457                         break;
2458                 }
2459                 xpt_lock_buses();
2460                 next_periph = TAILQ_NEXT(periph, unit_links);
2461                 while (next_periph != NULL &&
2462                     (next_periph->flags & CAM_PERIPH_FREE) != 0)
2463                         next_periph = TAILQ_NEXT(next_periph, unit_links);
2464                 if (next_periph)
2465                         next_periph->refcount++;
2466                 xpt_unlock_buses();
2467                 cam_periph_release(periph);
2468         }
2469         return(retval);
2470 }
2471
2472 static int
2473 xptdefbusfunc(struct cam_eb *bus, void *arg)
2474 {
2475         struct xpt_traverse_config *tr_config;
2476
2477         tr_config = (struct xpt_traverse_config *)arg;
2478
2479         if (tr_config->depth == XPT_DEPTH_BUS) {
2480                 xpt_busfunc_t *tr_func;
2481
2482                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2483
2484                 return(tr_func(bus, tr_config->tr_arg));
2485         } else
2486                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2487 }
2488
2489 static int
2490 xptdeftargetfunc(struct cam_et *target, void *arg)
2491 {
2492         struct xpt_traverse_config *tr_config;
2493
2494         tr_config = (struct xpt_traverse_config *)arg;
2495
2496         if (tr_config->depth == XPT_DEPTH_TARGET) {
2497                 xpt_targetfunc_t *tr_func;
2498
2499                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2500
2501                 return(tr_func(target, tr_config->tr_arg));
2502         } else
2503                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2504 }
2505
2506 static int
2507 xptdefdevicefunc(struct cam_ed *device, void *arg)
2508 {
2509         struct xpt_traverse_config *tr_config;
2510
2511         tr_config = (struct xpt_traverse_config *)arg;
2512
2513         if (tr_config->depth == XPT_DEPTH_DEVICE) {
2514                 xpt_devicefunc_t *tr_func;
2515
2516                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2517
2518                 return(tr_func(device, tr_config->tr_arg));
2519         } else
2520                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2521 }
2522
2523 static int
2524 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2525 {
2526         struct xpt_traverse_config *tr_config;
2527         xpt_periphfunc_t *tr_func;
2528
2529         tr_config = (struct xpt_traverse_config *)arg;
2530
2531         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2532
2533         /*
2534          * Unlike the other default functions, we don't check for depth
2535          * here.  The peripheral driver level is the last level in the EDT,
2536          * so if we're here, we should execute the function in question.
2537          */
2538         return(tr_func(periph, tr_config->tr_arg));
2539 }
2540
2541 /*
2542  * Execute the given function for every bus in the EDT.
2543  */
2544 static int
2545 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2546 {
2547         struct xpt_traverse_config tr_config;
2548
2549         tr_config.depth = XPT_DEPTH_BUS;
2550         tr_config.tr_func = tr_func;
2551         tr_config.tr_arg = arg;
2552
2553         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2554 }
2555
2556 /*
2557  * Execute the given function for every device in the EDT.
2558  */
2559 static int
2560 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2561 {
2562         struct xpt_traverse_config tr_config;
2563
2564         tr_config.depth = XPT_DEPTH_DEVICE;
2565         tr_config.tr_func = tr_func;
2566         tr_config.tr_arg = arg;
2567
2568         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2569 }
2570
2571 static int
2572 xptsetasyncfunc(struct cam_ed *device, void *arg)
2573 {
2574         struct cam_path path;
2575         struct ccb_getdev cgd;
2576         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2577
2578         /*
2579          * Don't report unconfigured devices (Wildcard devs,
2580          * devices only for target mode, device instances
2581          * that have been invalidated but are waiting for
2582          * their last reference count to be released).
2583          */
2584         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2585                 return (1);
2586
2587         xpt_compile_path(&path,
2588                          NULL,
2589                          device->target->bus->path_id,
2590                          device->target->target_id,
2591                          device->lun_id);
2592         xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
2593         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2594         xpt_action((union ccb *)&cgd);
2595         csa->callback(csa->callback_arg,
2596                             AC_FOUND_DEVICE,
2597                             &path, &cgd);
2598         xpt_release_path(&path);
2599
2600         return(1);
2601 }
2602
2603 static int
2604 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2605 {
2606         struct cam_path path;
2607         struct ccb_pathinq cpi;
2608         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2609
2610         xpt_compile_path(&path, /*periph*/NULL,
2611                          bus->path_id,
2612                          CAM_TARGET_WILDCARD,
2613                          CAM_LUN_WILDCARD);
2614         xpt_path_lock(&path);
2615         xpt_path_inq(&cpi, &path);
2616         csa->callback(csa->callback_arg,
2617                             AC_PATH_REGISTERED,
2618                             &path, &cpi);
2619         xpt_path_unlock(&path);
2620         xpt_release_path(&path);
2621
2622         return(1);
2623 }
2624
2625 void
2626 xpt_action(union ccb *start_ccb)
2627 {
2628
2629         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE,
2630             ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code,
2631                 xpt_action_name(start_ccb->ccb_h.func_code)));
2632
2633         start_ccb->ccb_h.status = CAM_REQ_INPROG;
2634         (*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb);
2635 }
2636
2637 void
2638 xpt_action_default(union ccb *start_ccb)
2639 {
2640         struct cam_path *path;
2641         struct cam_sim *sim;
2642         struct mtx *mtx;
2643
2644         path = start_ccb->ccb_h.path;
2645         CAM_DEBUG(path, CAM_DEBUG_TRACE,
2646             ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code,
2647                 xpt_action_name(start_ccb->ccb_h.func_code)));
2648
2649         switch (start_ccb->ccb_h.func_code) {
2650         case XPT_SCSI_IO:
2651         {
2652                 struct cam_ed *device;
2653
2654                 /*
2655                  * For the sake of compatibility with SCSI-1
2656                  * devices that may not understand the identify
2657                  * message, we include lun information in the
2658                  * second byte of all commands.  SCSI-1 specifies
2659                  * that luns are a 3 bit value and reserves only 3
2660                  * bits for lun information in the CDB.  Later
2661                  * revisions of the SCSI spec allow for more than 8
2662                  * luns, but have deprecated lun information in the
2663                  * CDB.  So, if the lun won't fit, we must omit.
2664                  *
2665                  * Also be aware that during initial probing for devices,
2666                  * the inquiry information is unknown but initialized to 0.
2667                  * This means that this code will be exercised while probing
2668                  * devices with an ANSI revision greater than 2.
2669                  */
2670                 device = path->device;
2671                 if (device->protocol_version <= SCSI_REV_2
2672                  && start_ccb->ccb_h.target_lun < 8
2673                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2674                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
2675                             start_ccb->ccb_h.target_lun << 5;
2676                 }
2677                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2678         }
2679         /* FALLTHROUGH */
2680         case XPT_TARGET_IO:
2681         case XPT_CONT_TARGET_IO:
2682                 start_ccb->csio.sense_resid = 0;
2683                 start_ccb->csio.resid = 0;
2684                 /* FALLTHROUGH */
2685         case XPT_ATA_IO:
2686                 if (start_ccb->ccb_h.func_code == XPT_ATA_IO)
2687                         start_ccb->ataio.resid = 0;
2688                 /* FALLTHROUGH */
2689         case XPT_NVME_IO:
2690         case XPT_NVME_ADMIN:
2691         case XPT_MMC_IO:
2692         case XPT_RESET_DEV:
2693         case XPT_ENG_EXEC:
2694         case XPT_SMP_IO:
2695         {
2696                 struct cam_devq *devq;
2697
2698                 devq = path->bus->sim->devq;
2699                 mtx_lock(&devq->send_mtx);
2700                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2701                 if (xpt_schedule_devq(devq, path->device) != 0)
2702                         xpt_run_devq(devq);
2703                 mtx_unlock(&devq->send_mtx);
2704                 break;
2705         }
2706         case XPT_CALC_GEOMETRY:
2707                 /* Filter out garbage */
2708                 if (start_ccb->ccg.block_size == 0
2709                  || start_ccb->ccg.volume_size == 0) {
2710                         start_ccb->ccg.cylinders = 0;
2711                         start_ccb->ccg.heads = 0;
2712                         start_ccb->ccg.secs_per_track = 0;
2713                         start_ccb->ccb_h.status = CAM_REQ_CMP;
2714                         break;
2715                 }
2716                 goto call_sim;
2717         case XPT_ABORT:
2718         {
2719                 union ccb* abort_ccb;
2720
2721                 abort_ccb = start_ccb->cab.abort_ccb;
2722                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2723                         struct cam_ed *device;
2724                         struct cam_devq *devq;
2725
2726                         device = abort_ccb->ccb_h.path->device;
2727                         devq = device->sim->devq;
2728
2729                         mtx_lock(&devq->send_mtx);
2730                         if (abort_ccb->ccb_h.pinfo.index > 0) {
2731                                 cam_ccbq_remove_ccb(&device->ccbq, abort_ccb);
2732                                 abort_ccb->ccb_h.status =
2733                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2734                                 xpt_freeze_devq_device(device, 1);
2735                                 mtx_unlock(&devq->send_mtx);
2736                                 xpt_done(abort_ccb);
2737                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2738                                 break;
2739                         }
2740                         mtx_unlock(&devq->send_mtx);
2741
2742                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2743                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2744                                 /*
2745                                  * We've caught this ccb en route to
2746                                  * the SIM.  Flag it for abort and the
2747                                  * SIM will do so just before starting
2748                                  * real work on the CCB.
2749                                  */
2750                                 abort_ccb->ccb_h.status =
2751                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2752                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2753                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2754                                 break;
2755                         }
2756                 }
2757                 if (XPT_FC_IS_QUEUED(abort_ccb)
2758                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2759                         /*
2760                          * It's already completed but waiting
2761                          * for our SWI to get to it.
2762                          */
2763                         start_ccb->ccb_h.status = CAM_UA_ABORT;
2764                         break;
2765                 }
2766                 /*
2767                  * If we weren't able to take care of the abort request
2768                  * in the XPT, pass the request down to the SIM for processing.
2769                  */
2770         }
2771         /* FALLTHROUGH */
2772         case XPT_ACCEPT_TARGET_IO:
2773         case XPT_EN_LUN:
2774         case XPT_IMMED_NOTIFY:
2775         case XPT_NOTIFY_ACK:
2776         case XPT_RESET_BUS:
2777         case XPT_IMMEDIATE_NOTIFY:
2778         case XPT_NOTIFY_ACKNOWLEDGE:
2779         case XPT_GET_SIM_KNOB_OLD:
2780         case XPT_GET_SIM_KNOB:
2781         case XPT_SET_SIM_KNOB:
2782         case XPT_GET_TRAN_SETTINGS:
2783         case XPT_SET_TRAN_SETTINGS:
2784         case XPT_PATH_INQ:
2785 call_sim:
2786                 sim = path->bus->sim;
2787                 mtx = sim->mtx;
2788                 if (mtx && !mtx_owned(mtx))
2789                         mtx_lock(mtx);
2790                 else
2791                         mtx = NULL;
2792
2793                 CAM_DEBUG(path, CAM_DEBUG_TRACE,
2794                     ("Calling sim->sim_action(): func=%#x\n", start_ccb->ccb_h.func_code));
2795                 (*(sim->sim_action))(sim, start_ccb);
2796                 CAM_DEBUG(path, CAM_DEBUG_TRACE,
2797                     ("sim->sim_action returned: status=%#x\n", start_ccb->ccb_h.status));
2798                 if (mtx)
2799                         mtx_unlock(mtx);
2800                 break;
2801         case XPT_PATH_STATS:
2802                 start_ccb->cpis.last_reset = path->bus->last_reset;
2803                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2804                 break;
2805         case XPT_GDEV_TYPE:
2806         {
2807                 struct cam_ed *dev;
2808
2809                 dev = path->device;
2810                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2811                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2812                 } else {
2813                         struct ccb_getdev *cgd;
2814
2815                         cgd = &start_ccb->cgd;
2816                         cgd->protocol = dev->protocol;
2817                         cgd->inq_data = dev->inq_data;
2818                         cgd->ident_data = dev->ident_data;
2819                         cgd->inq_flags = dev->inq_flags;
2820                         cgd->ccb_h.status = CAM_REQ_CMP;
2821                         cgd->serial_num_len = dev->serial_num_len;
2822                         if ((dev->serial_num_len > 0)
2823                          && (dev->serial_num != NULL))
2824                                 bcopy(dev->serial_num, cgd->serial_num,
2825                                       dev->serial_num_len);
2826                 }
2827                 break;
2828         }
2829         case XPT_GDEV_STATS:
2830         {
2831                 struct ccb_getdevstats *cgds = &start_ccb->cgds;
2832                 struct cam_ed *dev = path->device;
2833                 struct cam_eb *bus = path->bus;
2834                 struct cam_et *tar = path->target;
2835                 struct cam_devq *devq = bus->sim->devq;
2836
2837                 mtx_lock(&devq->send_mtx);
2838                 cgds->dev_openings = dev->ccbq.dev_openings;
2839                 cgds->dev_active = dev->ccbq.dev_active;
2840                 cgds->allocated = dev->ccbq.allocated;
2841                 cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq);
2842                 cgds->held = cgds->allocated - cgds->dev_active - cgds->queued;
2843                 cgds->last_reset = tar->last_reset;
2844                 cgds->maxtags = dev->maxtags;
2845                 cgds->mintags = dev->mintags;
2846                 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2847                         cgds->last_reset = bus->last_reset;
2848                 mtx_unlock(&devq->send_mtx);
2849                 cgds->ccb_h.status = CAM_REQ_CMP;
2850                 break;
2851         }
2852         case XPT_GDEVLIST:
2853         {
2854                 struct cam_periph       *nperiph;
2855                 struct periph_list      *periph_head;
2856                 struct ccb_getdevlist   *cgdl;
2857                 u_int                   i;
2858                 struct cam_ed           *device;
2859                 int                     found;
2860
2861                 found = 0;
2862
2863                 /*
2864                  * Don't want anyone mucking with our data.
2865                  */
2866                 device = path->device;
2867                 periph_head = &device->periphs;
2868                 cgdl = &start_ccb->cgdl;
2869
2870                 /*
2871                  * Check and see if the list has changed since the user
2872                  * last requested a list member.  If so, tell them that the
2873                  * list has changed, and therefore they need to start over
2874                  * from the beginning.
2875                  */
2876                 if ((cgdl->index != 0) &&
2877                     (cgdl->generation != device->generation)) {
2878                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2879                         break;
2880                 }
2881
2882                 /*
2883                  * Traverse the list of peripherals and attempt to find
2884                  * the requested peripheral.
2885                  */
2886                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
2887                      (nperiph != NULL) && (i <= cgdl->index);
2888                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
2889                         if (i == cgdl->index) {
2890                                 strlcpy(cgdl->periph_name,
2891                                         nperiph->periph_name,
2892                                         sizeof(cgdl->periph_name));
2893                                 cgdl->unit_number = nperiph->unit_number;
2894                                 found = 1;
2895                         }
2896                 }
2897                 if (found == 0) {
2898                         cgdl->status = CAM_GDEVLIST_ERROR;
2899                         break;
2900                 }
2901
2902                 if (nperiph == NULL)
2903                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
2904                 else
2905                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
2906
2907                 cgdl->index++;
2908                 cgdl->generation = device->generation;
2909
2910                 cgdl->ccb_h.status = CAM_REQ_CMP;
2911                 break;
2912         }
2913         case XPT_DEV_MATCH:
2914         {
2915                 dev_pos_type position_type;
2916                 struct ccb_dev_match *cdm;
2917
2918                 cdm = &start_ccb->cdm;
2919
2920                 /*
2921                  * There are two ways of getting at information in the EDT.
2922                  * The first way is via the primary EDT tree.  It starts
2923                  * with a list of buses, then a list of targets on a bus,
2924                  * then devices/luns on a target, and then peripherals on a
2925                  * device/lun.  The "other" way is by the peripheral driver
2926                  * lists.  The peripheral driver lists are organized by
2927                  * peripheral driver.  (obviously)  So it makes sense to
2928                  * use the peripheral driver list if the user is looking
2929                  * for something like "da1", or all "da" devices.  If the
2930                  * user is looking for something on a particular bus/target
2931                  * or lun, it's generally better to go through the EDT tree.
2932                  */
2933
2934                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
2935                         position_type = cdm->pos.position_type;
2936                 else {
2937                         u_int i;
2938
2939                         position_type = CAM_DEV_POS_NONE;
2940
2941                         for (i = 0; i < cdm->num_patterns; i++) {
2942                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
2943                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
2944                                         position_type = CAM_DEV_POS_EDT;
2945                                         break;
2946                                 }
2947                         }
2948
2949                         if (cdm->num_patterns == 0)
2950                                 position_type = CAM_DEV_POS_EDT;
2951                         else if (position_type == CAM_DEV_POS_NONE)
2952                                 position_type = CAM_DEV_POS_PDRV;
2953                 }
2954
2955                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
2956                 case CAM_DEV_POS_EDT:
2957                         xptedtmatch(cdm);
2958                         break;
2959                 case CAM_DEV_POS_PDRV:
2960                         xptperiphlistmatch(cdm);
2961                         break;
2962                 default:
2963                         cdm->status = CAM_DEV_MATCH_ERROR;
2964                         break;
2965                 }
2966
2967                 if (cdm->status == CAM_DEV_MATCH_ERROR)
2968                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2969                 else
2970                         start_ccb->ccb_h.status = CAM_REQ_CMP;
2971
2972                 break;
2973         }
2974         case XPT_SASYNC_CB:
2975         {
2976                 struct ccb_setasync *csa;
2977                 struct async_node *cur_entry;
2978                 struct async_list *async_head;
2979                 u_int32_t added;
2980
2981                 csa = &start_ccb->csa;
2982                 added = csa->event_enable;
2983                 async_head = &path->device->asyncs;
2984
2985                 /*
2986                  * If there is already an entry for us, simply
2987                  * update it.
2988                  */
2989                 cur_entry = SLIST_FIRST(async_head);
2990                 while (cur_entry != NULL) {
2991                         if ((cur_entry->callback_arg == csa->callback_arg)
2992                          && (cur_entry->callback == csa->callback))
2993                                 break;
2994                         cur_entry = SLIST_NEXT(cur_entry, links);
2995                 }
2996
2997                 if (cur_entry != NULL) {
2998                         /*
2999                          * If the request has no flags set,
3000                          * remove the entry.
3001                          */
3002                         added &= ~cur_entry->event_enable;
3003                         if (csa->event_enable == 0) {
3004                                 SLIST_REMOVE(async_head, cur_entry,
3005                                              async_node, links);
3006                                 xpt_release_device(path->device);
3007                                 free(cur_entry, M_CAMXPT);
3008                         } else {
3009                                 cur_entry->event_enable = csa->event_enable;
3010                         }
3011                         csa->event_enable = added;
3012                 } else {
3013                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
3014                                            M_NOWAIT);
3015                         if (cur_entry == NULL) {
3016                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3017                                 break;
3018                         }
3019                         cur_entry->event_enable = csa->event_enable;
3020                         cur_entry->event_lock = (path->bus->sim->mtx &&
3021                             mtx_owned(path->bus->sim->mtx)) ? 1 : 0;
3022                         cur_entry->callback_arg = csa->callback_arg;
3023                         cur_entry->callback = csa->callback;
3024                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
3025                         xpt_acquire_device(path->device);
3026                 }
3027                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3028                 break;
3029         }
3030         case XPT_REL_SIMQ:
3031         {
3032                 struct ccb_relsim *crs;
3033                 struct cam_ed *dev;
3034
3035                 crs = &start_ccb->crs;
3036                 dev = path->device;
3037                 if (dev == NULL) {
3038                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
3039                         break;
3040                 }
3041
3042                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3043                         /* Don't ever go below one opening */
3044                         if (crs->openings > 0) {
3045                                 xpt_dev_ccbq_resize(path, crs->openings);
3046                                 if (bootverbose) {
3047                                         xpt_print(path,
3048                                             "number of openings is now %d\n",
3049                                             crs->openings);
3050                                 }
3051                         }
3052                 }
3053
3054                 mtx_lock(&dev->sim->devq->send_mtx);
3055                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3056                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3057                                 /*
3058                                  * Just extend the old timeout and decrement
3059                                  * the freeze count so that a single timeout
3060                                  * is sufficient for releasing the queue.
3061                                  */
3062                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3063                                 callout_stop(&dev->callout);
3064                         } else {
3065                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3066                         }
3067
3068                         callout_reset_sbt(&dev->callout,
3069                             SBT_1MS * crs->release_timeout, 0,
3070                             xpt_release_devq_timeout, dev, 0);
3071
3072                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3073                 }
3074
3075                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3076                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3077                                 /*
3078                                  * Decrement the freeze count so that a single
3079                                  * completion is still sufficient to unfreeze
3080                                  * the queue.
3081                                  */
3082                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3083                         } else {
3084                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3085                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3086                         }
3087                 }
3088
3089                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3090                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3091                          || (dev->ccbq.dev_active == 0)) {
3092                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3093                         } else {
3094                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3095                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3096                         }
3097                 }
3098                 mtx_unlock(&dev->sim->devq->send_mtx);
3099
3100                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0)
3101                         xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
3102                 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt;
3103                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3104                 break;
3105         }
3106         case XPT_DEBUG: {
3107                 struct cam_path *oldpath;
3108
3109                 /* Check that all request bits are supported. */
3110                 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
3111                         start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3112                         break;
3113                 }
3114
3115                 cam_dflags = CAM_DEBUG_NONE;
3116                 if (cam_dpath != NULL) {
3117                         oldpath = cam_dpath;
3118                         cam_dpath = NULL;
3119                         xpt_free_path(oldpath);
3120                 }
3121                 if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
3122                         if (xpt_create_path(&cam_dpath, NULL,
3123                                             start_ccb->ccb_h.path_id,
3124                                             start_ccb->ccb_h.target_id,
3125                                             start_ccb->ccb_h.target_lun) !=
3126                                             CAM_REQ_CMP) {
3127                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3128                         } else {
3129                                 cam_dflags = start_ccb->cdbg.flags;
3130                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3131                                 xpt_print(cam_dpath, "debugging flags now %x\n",
3132                                     cam_dflags);
3133                         }
3134                 } else
3135                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3136                 break;
3137         }
3138         case XPT_NOOP:
3139                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3140                         xpt_freeze_devq(path, 1);
3141                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3142                 break;
3143         case XPT_REPROBE_LUN:
3144                 xpt_async(AC_INQ_CHANGED, path, NULL);
3145                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3146                 xpt_done(start_ccb);
3147                 break;
3148         case XPT_ASYNC:
3149                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3150                 xpt_done(start_ccb);
3151                 break;
3152         default:
3153         case XPT_SDEV_TYPE:
3154         case XPT_TERM_IO:
3155         case XPT_ENG_INQ:
3156                 /* XXX Implement */
3157                 xpt_print(start_ccb->ccb_h.path,
3158                     "%s: CCB type %#x %s not supported\n", __func__,
3159                     start_ccb->ccb_h.func_code,
3160                     xpt_action_name(start_ccb->ccb_h.func_code));
3161                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3162                 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
3163                         xpt_done(start_ccb);
3164                 }
3165                 break;
3166         }
3167         CAM_DEBUG(path, CAM_DEBUG_TRACE,
3168             ("xpt_action_default: func= %#x %s status %#x\n",
3169                 start_ccb->ccb_h.func_code,
3170                 xpt_action_name(start_ccb->ccb_h.func_code),
3171                 start_ccb->ccb_h.status));
3172 }
3173
3174 /*
3175  * Call the sim poll routine to allow the sim to complete
3176  * any inflight requests, then call camisr_runqueue to
3177  * complete any CCB that the polling completed.
3178  */
3179 void
3180 xpt_sim_poll(struct cam_sim *sim)
3181 {
3182         struct mtx *mtx;
3183
3184         mtx = sim->mtx;
3185         if (mtx)
3186                 mtx_lock(mtx);
3187         (*(sim->sim_poll))(sim);
3188         if (mtx)
3189                 mtx_unlock(mtx);
3190         camisr_runqueue();
3191 }
3192
3193 uint32_t
3194 xpt_poll_setup(union ccb *start_ccb)
3195 {
3196         u_int32_t timeout;
3197         struct    cam_sim *sim;
3198         struct    cam_devq *devq;
3199         struct    cam_ed *dev;
3200
3201         timeout = start_ccb->ccb_h.timeout * 10;
3202         sim = start_ccb->ccb_h.path->bus->sim;
3203         devq = sim->devq;
3204         dev = start_ccb->ccb_h.path->device;
3205
3206         /*
3207          * Steal an opening so that no other queued requests
3208          * can get it before us while we simulate interrupts.
3209          */
3210         mtx_lock(&devq->send_mtx);
3211         dev->ccbq.dev_openings--;
3212         while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) &&
3213             (--timeout > 0)) {
3214                 mtx_unlock(&devq->send_mtx);
3215                 DELAY(100);
3216                 xpt_sim_poll(sim);
3217                 mtx_lock(&devq->send_mtx);
3218         }
3219         dev->ccbq.dev_openings++;
3220         mtx_unlock(&devq->send_mtx);
3221
3222         return (timeout);
3223 }
3224
3225 void
3226 xpt_pollwait(union ccb *start_ccb, uint32_t timeout)
3227 {
3228
3229         while (--timeout > 0) {
3230                 xpt_sim_poll(start_ccb->ccb_h.path->bus->sim);
3231                 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3232                     != CAM_REQ_INPROG)
3233                         break;
3234                 DELAY(100);
3235         }
3236
3237         if (timeout == 0) {
3238                 /*
3239                  * XXX Is it worth adding a sim_timeout entry
3240                  * point so we can attempt recovery?  If
3241                  * this is only used for dumps, I don't think
3242                  * it is.
3243                  */
3244                 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3245         }
3246 }
3247
3248 void
3249 xpt_polled_action(union ccb *start_ccb)
3250 {
3251         uint32_t        timeout;
3252         struct cam_ed   *dev;
3253
3254         timeout = start_ccb->ccb_h.timeout * 10;
3255         dev = start_ccb->ccb_h.path->device;
3256
3257         mtx_unlock(&dev->device_mtx);
3258
3259         timeout = xpt_poll_setup(start_ccb);
3260         if (timeout > 0) {
3261                 xpt_action(start_ccb);
3262                 xpt_pollwait(start_ccb, timeout);
3263         } else {
3264                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3265         }
3266
3267         mtx_lock(&dev->device_mtx);
3268 }
3269
3270 /*
3271  * Schedule a peripheral driver to receive a ccb when its
3272  * target device has space for more transactions.
3273  */
3274 void
3275 xpt_schedule(struct cam_periph *periph, u_int32_t new_priority)
3276 {
3277
3278         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3279         cam_periph_assert(periph, MA_OWNED);
3280         if (new_priority < periph->scheduled_priority) {
3281                 periph->scheduled_priority = new_priority;
3282                 xpt_run_allocq(periph, 0);
3283         }
3284 }
3285
3286 /*
3287  * Schedule a device to run on a given queue.
3288  * If the device was inserted as a new entry on the queue,
3289  * return 1 meaning the device queue should be run. If we
3290  * were already queued, implying someone else has already
3291  * started the queue, return 0 so the caller doesn't attempt
3292  * to run the queue.
3293  */
3294 static int
3295 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3296                  u_int32_t new_priority)
3297 {
3298         int retval;
3299         u_int32_t old_priority;
3300
3301         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3302
3303         old_priority = pinfo->priority;
3304
3305         /*
3306          * Are we already queued?
3307          */
3308         if (pinfo->index != CAM_UNQUEUED_INDEX) {
3309                 /* Simply reorder based on new priority */
3310                 if (new_priority < old_priority) {
3311                         camq_change_priority(queue, pinfo->index,
3312                                              new_priority);
3313                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3314                                         ("changed priority to %d\n",
3315                                          new_priority));
3316                         retval = 1;
3317                 } else
3318                         retval = 0;
3319         } else {
3320                 /* New entry on the queue */
3321                 if (new_priority < old_priority)
3322                         pinfo->priority = new_priority;
3323
3324                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3325                                 ("Inserting onto queue\n"));
3326                 pinfo->generation = ++queue->generation;
3327                 camq_insert(queue, pinfo);
3328                 retval = 1;
3329         }
3330         return (retval);
3331 }
3332
3333 static void
3334 xpt_run_allocq_task(void *context, int pending)
3335 {
3336         struct cam_periph *periph = context;
3337
3338         cam_periph_lock(periph);
3339         periph->flags &= ~CAM_PERIPH_RUN_TASK;
3340         xpt_run_allocq(periph, 1);
3341         cam_periph_unlock(periph);
3342         cam_periph_release(periph);
3343 }
3344
3345 static void
3346 xpt_run_allocq(struct cam_periph *periph, int sleep)
3347 {
3348         struct cam_ed   *device;
3349         union ccb       *ccb;
3350         uint32_t         prio;
3351
3352         cam_periph_assert(periph, MA_OWNED);
3353         if (periph->periph_allocating)
3354                 return;
3355         cam_periph_doacquire(periph);
3356         periph->periph_allocating = 1;
3357         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph));
3358         device = periph->path->device;
3359         ccb = NULL;
3360 restart:
3361         while ((prio = min(periph->scheduled_priority,
3362             periph->immediate_priority)) != CAM_PRIORITY_NONE &&
3363             (periph->periph_allocated - (ccb != NULL ? 1 : 0) <
3364              device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) {
3365                 if (ccb == NULL &&
3366                     (ccb = xpt_get_ccb_nowait(periph)) == NULL) {
3367                         if (sleep) {
3368                                 ccb = xpt_get_ccb(periph);
3369                                 goto restart;
3370                         }
3371                         if (periph->flags & CAM_PERIPH_RUN_TASK)
3372                                 break;
3373                         cam_periph_doacquire(periph);
3374                         periph->flags |= CAM_PERIPH_RUN_TASK;
3375                         taskqueue_enqueue(xsoftc.xpt_taskq,
3376                             &periph->periph_run_task);
3377                         break;
3378                 }
3379                 xpt_setup_ccb(&ccb->ccb_h, periph->path, prio);
3380                 if (prio == periph->immediate_priority) {
3381                         periph->immediate_priority = CAM_PRIORITY_NONE;
3382                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3383                                         ("waking cam_periph_getccb()\n"));
3384                         SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h,
3385                                           periph_links.sle);
3386                         wakeup(&periph->ccb_list);
3387                 } else {
3388                         periph->scheduled_priority = CAM_PRIORITY_NONE;
3389                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3390                                         ("calling periph_start()\n"));
3391                         periph->periph_start(periph, ccb);
3392                 }
3393                 ccb = NULL;
3394         }
3395         if (ccb != NULL)
3396                 xpt_release_ccb(ccb);
3397         periph->periph_allocating = 0;
3398         cam_periph_release_locked(periph);
3399 }
3400
3401 static void
3402 xpt_run_devq(struct cam_devq *devq)
3403 {
3404         struct mtx *mtx;
3405
3406         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n"));
3407
3408         devq->send_queue.qfrozen_cnt++;
3409         while ((devq->send_queue.entries > 0)
3410             && (devq->send_openings > 0)
3411             && (devq->send_queue.qfrozen_cnt <= 1)) {
3412                 struct  cam_ed *device;
3413                 union ccb *work_ccb;
3414                 struct  cam_sim *sim;
3415                 struct xpt_proto *proto;
3416
3417                 device = (struct cam_ed *)camq_remove(&devq->send_queue,
3418                                                            CAMQ_HEAD);
3419                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3420                                 ("running device %p\n", device));
3421
3422                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3423                 if (work_ccb == NULL) {
3424                         printf("device on run queue with no ccbs???\n");
3425                         continue;
3426                 }
3427
3428                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3429                         mtx_lock(&xsoftc.xpt_highpower_lock);
3430                         if (xsoftc.num_highpower <= 0) {
3431                                 /*
3432                                  * We got a high power command, but we
3433                                  * don't have any available slots.  Freeze
3434                                  * the device queue until we have a slot
3435                                  * available.
3436                                  */
3437                                 xpt_freeze_devq_device(device, 1);
3438                                 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device,
3439                                                    highpowerq_entry);
3440
3441                                 mtx_unlock(&xsoftc.xpt_highpower_lock);
3442                                 continue;
3443                         } else {
3444                                 /*
3445                                  * Consume a high power slot while
3446                                  * this ccb runs.
3447                                  */
3448                                 xsoftc.num_highpower--;
3449                         }
3450                         mtx_unlock(&xsoftc.xpt_highpower_lock);
3451                 }
3452                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3453                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3454                 devq->send_openings--;
3455                 devq->send_active++;
3456                 xpt_schedule_devq(devq, device);
3457                 mtx_unlock(&devq->send_mtx);
3458
3459                 if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) {
3460                         /*
3461                          * The client wants to freeze the queue
3462                          * after this CCB is sent.
3463                          */
3464                         xpt_freeze_devq(work_ccb->ccb_h.path, 1);
3465                 }
3466
3467                 /* In Target mode, the peripheral driver knows best... */
3468                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3469                         if ((device->inq_flags & SID_CmdQue) != 0
3470                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3471                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3472                         else
3473                                 /*
3474                                  * Clear this in case of a retried CCB that
3475                                  * failed due to a rejected tag.
3476                                  */
3477                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3478                 }
3479
3480                 KASSERT(device == work_ccb->ccb_h.path->device,
3481                     ("device (%p) / path->device (%p) mismatch",
3482                         device, work_ccb->ccb_h.path->device));
3483                 proto = xpt_proto_find(device->protocol);
3484                 if (proto && proto->ops->debug_out)
3485                         proto->ops->debug_out(work_ccb);
3486
3487                 /*
3488                  * Device queues can be shared among multiple SIM instances
3489                  * that reside on different buses.  Use the SIM from the
3490                  * queued device, rather than the one from the calling bus.
3491                  */
3492                 sim = device->sim;
3493                 mtx = sim->mtx;
3494                 if (mtx && !mtx_owned(mtx))
3495                         mtx_lock(mtx);
3496                 else
3497                         mtx = NULL;
3498                 work_ccb->ccb_h.qos.periph_data = cam_iosched_now();
3499                 (*(sim->sim_action))(sim, work_ccb);
3500                 if (mtx)
3501                         mtx_unlock(mtx);
3502                 mtx_lock(&devq->send_mtx);
3503         }
3504         devq->send_queue.qfrozen_cnt--;
3505 }
3506
3507 /*
3508  * This function merges stuff from the src ccb into the dst ccb, while keeping
3509  * important fields in the dst ccb constant.
3510  */
3511 void
3512 xpt_merge_ccb(union ccb *dst_ccb, union ccb *src_ccb)
3513 {
3514
3515         /*
3516          * Pull fields that are valid for peripheral drivers to set
3517          * into the dst CCB along with the CCB "payload".
3518          */
3519         dst_ccb->ccb_h.retry_count = src_ccb->ccb_h.retry_count;
3520         dst_ccb->ccb_h.func_code = src_ccb->ccb_h.func_code;
3521         dst_ccb->ccb_h.timeout = src_ccb->ccb_h.timeout;
3522         dst_ccb->ccb_h.flags = src_ccb->ccb_h.flags;
3523         bcopy(&(&src_ccb->ccb_h)[1], &(&dst_ccb->ccb_h)[1],
3524               sizeof(union ccb) - sizeof(struct ccb_hdr));
3525 }
3526
3527 void
3528 xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path,
3529                     u_int32_t priority, u_int32_t flags)
3530 {
3531
3532         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3533         ccb_h->pinfo.priority = priority;
3534         ccb_h->path = path;
3535         ccb_h->path_id = path->bus->path_id;
3536         if (path->target)
3537                 ccb_h->target_id = path->target->target_id;
3538         else
3539                 ccb_h->target_id = CAM_TARGET_WILDCARD;
3540         if (path->device) {
3541                 ccb_h->target_lun = path->device->lun_id;
3542                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3543         } else {
3544                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3545         }
3546         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3547         ccb_h->flags = flags;
3548         ccb_h->xflags = 0;
3549 }
3550
3551 void
3552 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3553 {
3554         xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0);
3555 }
3556
3557 /* Path manipulation functions */
3558 cam_status
3559 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3560                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3561 {
3562         struct     cam_path *path;
3563         cam_status status;
3564
3565         path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3566
3567         if (path == NULL) {
3568                 status = CAM_RESRC_UNAVAIL;
3569                 return(status);
3570         }
3571         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3572         if (status != CAM_REQ_CMP) {
3573                 free(path, M_CAMPATH);
3574                 path = NULL;
3575         }
3576         *new_path_ptr = path;
3577         return (status);
3578 }
3579
3580 cam_status
3581 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3582                          struct cam_periph *periph, path_id_t path_id,
3583                          target_id_t target_id, lun_id_t lun_id)
3584 {
3585
3586         return (xpt_create_path(new_path_ptr, periph, path_id, target_id,
3587             lun_id));
3588 }
3589
3590 cam_status
3591 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3592                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3593 {
3594         struct       cam_eb *bus;
3595         struct       cam_et *target;
3596         struct       cam_ed *device;
3597         cam_status   status;
3598
3599         status = CAM_REQ_CMP;   /* Completed without error */
3600         target = NULL;          /* Wildcarded */
3601         device = NULL;          /* Wildcarded */
3602
3603         /*
3604          * We will potentially modify the EDT, so block interrupts
3605          * that may attempt to create cam paths.
3606          */
3607         bus = xpt_find_bus(path_id);
3608         if (bus == NULL) {
3609                 status = CAM_PATH_INVALID;
3610         } else {
3611                 xpt_lock_buses();
3612                 mtx_lock(&bus->eb_mtx);
3613                 target = xpt_find_target(bus, target_id);
3614                 if (target == NULL) {
3615                         /* Create one */
3616                         struct cam_et *new_target;
3617
3618                         new_target = xpt_alloc_target(bus, target_id);
3619                         if (new_target == NULL) {
3620                                 status = CAM_RESRC_UNAVAIL;
3621                         } else {
3622                                 target = new_target;
3623                         }
3624                 }
3625                 xpt_unlock_buses();
3626                 if (target != NULL) {
3627                         device = xpt_find_device(target, lun_id);
3628                         if (device == NULL) {
3629                                 /* Create one */
3630                                 struct cam_ed *new_device;
3631
3632                                 new_device =
3633                                     (*(bus->xport->ops->alloc_device))(bus,
3634                                                                        target,
3635                                                                        lun_id);
3636                                 if (new_device == NULL) {
3637                                         status = CAM_RESRC_UNAVAIL;
3638                                 } else {
3639                                         device = new_device;
3640                                 }
3641                         }
3642                 }
3643                 mtx_unlock(&bus->eb_mtx);
3644         }
3645
3646         /*
3647          * Only touch the user's data if we are successful.
3648          */
3649         if (status == CAM_REQ_CMP) {
3650                 new_path->periph = perph;
3651                 new_path->bus = bus;
3652                 new_path->target = target;
3653                 new_path->device = device;
3654                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3655         } else {
3656                 if (device != NULL)
3657                         xpt_release_device(device);
3658                 if (target != NULL)
3659                         xpt_release_target(target);
3660                 if (bus != NULL)
3661                         xpt_release_bus(bus);
3662         }
3663         return (status);
3664 }
3665
3666 cam_status
3667 xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path)
3668 {
3669         struct     cam_path *new_path;
3670
3671         new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3672         if (new_path == NULL)
3673                 return(CAM_RESRC_UNAVAIL);
3674         *new_path = *path;
3675         if (path->bus != NULL)
3676                 xpt_acquire_bus(path->bus);
3677         if (path->target != NULL)
3678                 xpt_acquire_target(path->target);
3679         if (path->device != NULL)
3680                 xpt_acquire_device(path->device);
3681         *new_path_ptr = new_path;
3682         return (CAM_REQ_CMP);
3683 }
3684
3685 void
3686 xpt_release_path(struct cam_path *path)
3687 {
3688         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3689         if (path->device != NULL) {
3690                 xpt_release_device(path->device);
3691                 path->device = NULL;
3692         }
3693         if (path->target != NULL) {
3694                 xpt_release_target(path->target);
3695                 path->target = NULL;
3696         }
3697         if (path->bus != NULL) {
3698                 xpt_release_bus(path->bus);
3699                 path->bus = NULL;
3700         }
3701 }
3702
3703 void
3704 xpt_free_path(struct cam_path *path)
3705 {
3706
3707         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3708         xpt_release_path(path);
3709         free(path, M_CAMPATH);
3710 }
3711
3712 void
3713 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
3714     uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
3715 {
3716
3717         xpt_lock_buses();
3718         if (bus_ref) {
3719                 if (path->bus)
3720                         *bus_ref = path->bus->refcount;
3721                 else
3722                         *bus_ref = 0;
3723         }
3724         if (periph_ref) {
3725                 if (path->periph)
3726                         *periph_ref = path->periph->refcount;
3727                 else
3728                         *periph_ref = 0;
3729         }
3730         xpt_unlock_buses();
3731         if (target_ref) {
3732                 if (path->target)
3733                         *target_ref = path->target->refcount;
3734                 else
3735                         *target_ref = 0;
3736         }
3737         if (device_ref) {
3738                 if (path->device)
3739                         *device_ref = path->device->refcount;
3740                 else
3741                         *device_ref = 0;
3742         }
3743 }
3744
3745 /*
3746  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3747  * in path1, 2 for match with wildcards in path2.
3748  */
3749 int
3750 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3751 {
3752         int retval = 0;
3753
3754         if (path1->bus != path2->bus) {
3755                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
3756                         retval = 1;
3757                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3758                         retval = 2;
3759                 else
3760                         return (-1);
3761         }
3762         if (path1->target != path2->target) {
3763                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3764                         if (retval == 0)
3765                                 retval = 1;
3766                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3767                         retval = 2;
3768                 else
3769                         return (-1);
3770         }
3771         if (path1->device != path2->device) {
3772                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3773                         if (retval == 0)
3774                                 retval = 1;
3775                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3776                         retval = 2;
3777                 else
3778                         return (-1);
3779         }
3780         return (retval);
3781 }
3782
3783 int
3784 xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev)
3785 {
3786         int retval = 0;
3787
3788         if (path->bus != dev->target->bus) {
3789                 if (path->bus->path_id == CAM_BUS_WILDCARD)
3790                         retval = 1;
3791                 else if (dev->target->bus->path_id == CAM_BUS_WILDCARD)
3792                         retval = 2;
3793                 else
3794                         return (-1);
3795         }
3796         if (path->target != dev->target) {
3797                 if (path->target->target_id == CAM_TARGET_WILDCARD) {
3798                         if (retval == 0)
3799                                 retval = 1;
3800                 } else if (dev->target->target_id == CAM_TARGET_WILDCARD)
3801                         retval = 2;
3802                 else
3803                         return (-1);
3804         }
3805         if (path->device != dev) {
3806                 if (path->device->lun_id == CAM_LUN_WILDCARD) {
3807                         if (retval == 0)
3808                                 retval = 1;
3809                 } else if (dev->lun_id == CAM_LUN_WILDCARD)
3810                         retval = 2;
3811                 else
3812                         return (-1);
3813         }
3814         return (retval);
3815 }
3816
3817 void
3818 xpt_print_path(struct cam_path *path)
3819 {
3820         struct sbuf sb;
3821         char buffer[XPT_PRINT_LEN];
3822
3823         sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN);
3824         xpt_path_sbuf(path, &sb);
3825         sbuf_finish(&sb);
3826         printf("%s", sbuf_data(&sb));
3827         sbuf_delete(&sb);
3828 }
3829
3830 void
3831 xpt_print_device(struct cam_ed *device)
3832 {
3833
3834         if (device == NULL)
3835                 printf("(nopath): ");
3836         else {
3837                 printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name,
3838                        device->sim->unit_number,
3839                        device->sim->bus_id,
3840                        device->target->target_id,
3841                        (uintmax_t)device->lun_id);
3842         }
3843 }
3844
3845 void
3846 xpt_print(struct cam_path *path, const char *fmt, ...)
3847 {
3848         va_list ap;
3849         struct sbuf sb;
3850         char buffer[XPT_PRINT_LEN];
3851
3852         sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN);
3853
3854         xpt_path_sbuf(path, &sb);
3855         va_start(ap, fmt);
3856         sbuf_vprintf(&sb, fmt, ap);
3857         va_end(ap);
3858
3859         sbuf_finish(&sb);
3860         printf("%s", sbuf_data(&sb));
3861         sbuf_delete(&sb);
3862 }
3863
3864 int
3865 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
3866 {
3867         struct sbuf sb;
3868         int len;
3869
3870         sbuf_new(&sb, str, str_len, 0);
3871         len = xpt_path_sbuf(path, &sb);
3872         sbuf_finish(&sb);
3873         return (len);
3874 }
3875
3876 int
3877 xpt_path_sbuf(struct cam_path *path, struct sbuf *sb)
3878 {
3879
3880         if (path == NULL)
3881                 sbuf_printf(sb, "(nopath): ");
3882         else {
3883                 if (path->periph != NULL)
3884                         sbuf_printf(sb, "(%s%d:", path->periph->periph_name,
3885                                     path->periph->unit_number);
3886                 else
3887                         sbuf_printf(sb, "(noperiph:");
3888
3889                 if (path->bus != NULL)
3890                         sbuf_printf(sb, "%s%d:%d:", path->bus->sim->sim_name,
3891                                     path->bus->sim->unit_number,
3892                                     path->bus->sim->bus_id);
3893                 else
3894                         sbuf_printf(sb, "nobus:");
3895
3896                 if (path->target != NULL)
3897                         sbuf_printf(sb, "%d:", path->target->target_id);
3898                 else
3899                         sbuf_printf(sb, "X:");
3900
3901                 if (path->device != NULL)
3902                         sbuf_printf(sb, "%jx): ",
3903                             (uintmax_t)path->device->lun_id);
3904                 else
3905                         sbuf_printf(sb, "X): ");
3906         }
3907
3908         return(sbuf_len(sb));
3909 }
3910
3911 path_id_t
3912 xpt_path_path_id(struct cam_path *path)
3913 {
3914         return(path->bus->path_id);
3915 }
3916
3917 target_id_t
3918 xpt_path_target_id(struct cam_path *path)
3919 {
3920         if (path->target != NULL)
3921                 return (path->target->target_id);
3922         else
3923                 return (CAM_TARGET_WILDCARD);
3924 }
3925
3926 lun_id_t
3927 xpt_path_lun_id(struct cam_path *path)
3928 {
3929         if (path->device != NULL)
3930                 return (path->device->lun_id);
3931         else
3932                 return (CAM_LUN_WILDCARD);
3933 }
3934
3935 struct cam_sim *
3936 xpt_path_sim(struct cam_path *path)
3937 {
3938
3939         return (path->bus->sim);
3940 }
3941
3942 struct cam_periph*
3943 xpt_path_periph(struct cam_path *path)
3944 {
3945
3946         return (path->periph);
3947 }
3948
3949 /*
3950  * Release a CAM control block for the caller.  Remit the cost of the structure
3951  * to the device referenced by the path.  If the this device had no 'credits'
3952  * and peripheral drivers have registered async callbacks for this notification
3953  * call them now.
3954  */
3955 void
3956 xpt_release_ccb(union ccb *free_ccb)
3957 {
3958         struct   cam_ed *device;
3959         struct   cam_periph *periph;
3960
3961         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3962         xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED);
3963         device = free_ccb->ccb_h.path->device;
3964         periph = free_ccb->ccb_h.path->periph;
3965
3966         xpt_free_ccb(free_ccb);
3967         periph->periph_allocated--;
3968         cam_ccbq_release_opening(&device->ccbq);
3969         xpt_run_allocq(periph, 0);
3970 }
3971
3972 /* Functions accessed by SIM drivers */
3973
3974 static struct xpt_xport_ops xport_default_ops = {
3975         .alloc_device = xpt_alloc_device_default,
3976         .action = xpt_action_default,
3977         .async = xpt_dev_async_default,
3978 };
3979 static struct xpt_xport xport_default = {
3980         .xport = XPORT_UNKNOWN,
3981         .name = "unknown",
3982         .ops = &xport_default_ops,
3983 };
3984
3985 CAM_XPT_XPORT(xport_default);
3986
3987 /*
3988  * A sim structure, listing the SIM entry points and instance
3989  * identification info is passed to xpt_bus_register to hook the SIM
3990  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
3991  * for this new bus and places it in the array of buses and assigns
3992  * it a path_id.  The path_id may be influenced by "hard wiring"
3993  * information specified by the user.  Once interrupt services are
3994  * available, the bus will be probed.
3995  */
3996 int32_t
3997 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
3998 {
3999         struct cam_eb *new_bus;
4000         struct cam_eb *old_bus;
4001         struct ccb_pathinq cpi;
4002         struct cam_path *path;
4003         cam_status status;
4004
4005         sim->bus_id = bus;
4006         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
4007                                           M_CAMXPT, M_NOWAIT|M_ZERO);
4008         if (new_bus == NULL) {
4009                 /* Couldn't satisfy request */
4010                 return (CAM_RESRC_UNAVAIL);
4011         }
4012
4013         mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF);
4014         TAILQ_INIT(&new_bus->et_entries);
4015         cam_sim_hold(sim);
4016         new_bus->sim = sim;
4017         timevalclear(&new_bus->last_reset);
4018         new_bus->flags = 0;
4019         new_bus->refcount = 1;  /* Held until a bus_deregister event */
4020         new_bus->generation = 0;
4021
4022         xpt_lock_buses();
4023         sim->path_id = new_bus->path_id =
4024             xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4025         old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4026         while (old_bus != NULL
4027             && old_bus->path_id < new_bus->path_id)
4028                 old_bus = TAILQ_NEXT(old_bus, links);
4029         if (old_bus != NULL)
4030                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4031         else
4032                 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
4033         xsoftc.bus_generation++;
4034         xpt_unlock_buses();
4035
4036         /*
4037          * Set a default transport so that a PATH_INQ can be issued to
4038          * the SIM.  This will then allow for probing and attaching of
4039          * a more appropriate transport.
4040          */
4041         new_bus->xport = &xport_default;
4042
4043         status = xpt_create_path(&path, /*periph*/NULL, sim->path_id,
4044                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4045         if (status != CAM_REQ_CMP) {
4046                 xpt_release_bus(new_bus);
4047                 return (CAM_RESRC_UNAVAIL);
4048         }
4049
4050         xpt_path_inq(&cpi, path);
4051
4052         if (cpi.ccb_h.status == CAM_REQ_CMP) {
4053                 struct xpt_xport **xpt;
4054
4055                 SET_FOREACH(xpt, cam_xpt_xport_set) {
4056                         if ((*xpt)->xport == cpi.transport) {
4057                                 new_bus->xport = *xpt;
4058                                 break;
4059                         }
4060                 }
4061                 if (new_bus->xport == NULL) {
4062                         xpt_print(path,
4063                             "No transport found for %d\n", cpi.transport);
4064                         xpt_release_bus(new_bus);
4065                         free(path, M_CAMXPT);
4066                         return (CAM_RESRC_UNAVAIL);
4067                 }
4068         }
4069
4070         /* Notify interested parties */
4071         if (sim->path_id != CAM_XPT_PATH_ID) {
4072                 xpt_async(AC_PATH_REGISTERED, path, &cpi);
4073                 if ((cpi.hba_misc & PIM_NOSCAN) == 0) {
4074                         union   ccb *scan_ccb;
4075
4076                         /* Initiate bus rescan. */
4077                         scan_ccb = xpt_alloc_ccb_nowait();
4078                         if (scan_ccb != NULL) {
4079                                 scan_ccb->ccb_h.path = path;
4080                                 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
4081                                 scan_ccb->crcn.flags = 0;
4082                                 xpt_rescan(scan_ccb);
4083                         } else {
4084                                 xpt_print(path,
4085                                           "Can't allocate CCB to scan bus\n");
4086                                 xpt_free_path(path);
4087                         }
4088                 } else
4089                         xpt_free_path(path);
4090         } else
4091                 xpt_free_path(path);
4092         return (CAM_SUCCESS);
4093 }
4094
4095 int32_t
4096 xpt_bus_deregister(path_id_t pathid)
4097 {
4098         struct cam_path bus_path;
4099         cam_status status;
4100
4101         status = xpt_compile_path(&bus_path, NULL, pathid,
4102                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4103         if (status != CAM_REQ_CMP)
4104                 return (status);
4105
4106         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4107         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4108
4109         /* Release the reference count held while registered. */
4110         xpt_release_bus(bus_path.bus);
4111         xpt_release_path(&bus_path);
4112
4113         return (CAM_REQ_CMP);
4114 }
4115
4116 static path_id_t
4117 xptnextfreepathid(void)
4118 {
4119         struct cam_eb *bus;
4120         path_id_t pathid;
4121         const char *strval;
4122
4123         mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4124         pathid = 0;
4125         bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4126 retry:
4127         /* Find an unoccupied pathid */
4128         while (bus != NULL && bus->path_id <= pathid) {
4129                 if (bus->path_id == pathid)
4130                         pathid++;
4131                 bus = TAILQ_NEXT(bus, links);
4132         }
4133
4134         /*
4135          * Ensure that this pathid is not reserved for
4136          * a bus that may be registered in the future.
4137          */
4138         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4139                 ++pathid;
4140                 /* Start the search over */
4141                 goto retry;
4142         }
4143         return (pathid);
4144 }
4145
4146 static path_id_t
4147 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4148 {
4149         path_id_t pathid;
4150         int i, dunit, val;
4151         char buf[32];
4152         const char *dname;
4153
4154         pathid = CAM_XPT_PATH_ID;
4155         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4156         if (strcmp(buf, "xpt0") == 0 && sim_bus == 0)
4157                 return (pathid);
4158         i = 0;
4159         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4160                 if (strcmp(dname, "scbus")) {
4161                         /* Avoid a bit of foot shooting. */
4162                         continue;
4163                 }
4164                 if (dunit < 0)          /* unwired?! */
4165                         continue;
4166                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4167                         if (sim_bus == val) {
4168                                 pathid = dunit;
4169                                 break;
4170                         }
4171                 } else if (sim_bus == 0) {
4172                         /* Unspecified matches bus 0 */
4173                         pathid = dunit;
4174                         break;
4175                 } else {
4176                         printf("Ambiguous scbus configuration for %s%d "
4177                                "bus %d, cannot wire down.  The kernel "
4178                                "config entry for scbus%d should "
4179                                "specify a controller bus.\n"
4180                                "Scbus will be assigned dynamically.\n",
4181                                sim_name, sim_unit, sim_bus, dunit);
4182                         break;
4183                 }
4184         }
4185
4186         if (pathid == CAM_XPT_PATH_ID)
4187                 pathid = xptnextfreepathid();
4188         return (pathid);
4189 }
4190
4191 static const char *
4192 xpt_async_string(u_int32_t async_code)
4193 {
4194
4195         switch (async_code) {
4196         case AC_BUS_RESET: return ("AC_BUS_RESET");
4197         case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
4198         case AC_SCSI_AEN: return ("AC_SCSI_AEN");
4199         case AC_SENT_BDR: return ("AC_SENT_BDR");
4200         case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
4201         case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
4202         case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
4203         case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
4204         case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
4205         case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
4206         case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
4207         case AC_CONTRACT: return ("AC_CONTRACT");
4208         case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED");
4209         case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION");
4210         }
4211         return ("AC_UNKNOWN");
4212 }
4213
4214 static int
4215 xpt_async_size(u_int32_t async_code)
4216 {
4217
4218         switch (async_code) {
4219         case AC_BUS_RESET: return (0);
4220         case AC_UNSOL_RESEL: return (0);
4221         case AC_SCSI_AEN: return (0);
4222         case AC_SENT_BDR: return (0);
4223         case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq));
4224         case AC_PATH_DEREGISTERED: return (0);
4225         case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev));
4226         case AC_LOST_DEVICE: return (0);
4227         case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings));
4228         case AC_INQ_CHANGED: return (0);
4229         case AC_GETDEV_CHANGED: return (0);
4230         case AC_CONTRACT: return (sizeof(struct ac_contract));
4231         case AC_ADVINFO_CHANGED: return (-1);
4232         case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio));
4233         }
4234         return (0);
4235 }
4236
4237 static int
4238 xpt_async_process_dev(struct cam_ed *device, void *arg)
4239 {
4240         union ccb *ccb = arg;
4241         struct cam_path *path = ccb->ccb_h.path;
4242         void *async_arg = ccb->casync.async_arg_ptr;
4243         u_int32_t async_code = ccb->casync.async_code;
4244         int relock;
4245
4246         if (path->device != device
4247          && path->device->lun_id != CAM_LUN_WILDCARD
4248          && device->lun_id != CAM_LUN_WILDCARD)
4249                 return (1);
4250
4251         /*
4252          * The async callback could free the device.
4253          * If it is a broadcast async, it doesn't hold
4254          * device reference, so take our own reference.
4255          */
4256         xpt_acquire_device(device);
4257
4258         /*
4259          * If async for specific device is to be delivered to
4260          * the wildcard client, take the specific device lock.
4261          * XXX: We may need a way for client to specify it.
4262          */
4263         if ((device->lun_id == CAM_LUN_WILDCARD &&
4264              path->device->lun_id != CAM_LUN_WILDCARD) ||
4265             (device->target->target_id == CAM_TARGET_WILDCARD &&
4266              path->target->target_id != CAM_TARGET_WILDCARD) ||
4267             (device->target->bus->path_id == CAM_BUS_WILDCARD &&
4268              path->target->bus->path_id != CAM_BUS_WILDCARD)) {
4269                 mtx_unlock(&device->device_mtx);
4270                 xpt_path_lock(path);
4271                 relock = 1;
4272         } else
4273                 relock = 0;
4274
4275         (*(device->target->bus->xport->ops->async))(async_code,
4276             device->target->bus, device->target, device, async_arg);
4277         xpt_async_bcast(&device->asyncs, async_code, path, async_arg);
4278
4279         if (relock) {
4280                 xpt_path_unlock(path);
4281                 mtx_lock(&device->device_mtx);
4282         }
4283         xpt_release_device(device);
4284         return (1);
4285 }
4286
4287 static int
4288 xpt_async_process_tgt(struct cam_et *target, void *arg)
4289 {
4290         union ccb *ccb = arg;
4291         struct cam_path *path = ccb->ccb_h.path;
4292
4293         if (path->target != target
4294          && path->target->target_id != CAM_TARGET_WILDCARD
4295          && target->target_id != CAM_TARGET_WILDCARD)
4296                 return (1);
4297
4298         if (ccb->casync.async_code == AC_SENT_BDR) {
4299                 /* Update our notion of when the last reset occurred */
4300                 microtime(&target->last_reset);
4301         }
4302
4303         return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb));
4304 }
4305
4306 static void
4307 xpt_async_process(struct cam_periph *periph, union ccb *ccb)
4308 {
4309         struct cam_eb *bus;
4310         struct cam_path *path;
4311         void *async_arg;
4312         u_int32_t async_code;
4313
4314         path = ccb->ccb_h.path;
4315         async_code = ccb->casync.async_code;
4316         async_arg = ccb->casync.async_arg_ptr;
4317         CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
4318             ("xpt_async(%s)\n", xpt_async_string(async_code)));
4319         bus = path->bus;
4320
4321         if (async_code == AC_BUS_RESET) {
4322                 /* Update our notion of when the last reset occurred */
4323                 microtime(&bus->last_reset);
4324         }
4325
4326         xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb);
4327
4328         /*
4329          * If this wasn't a fully wildcarded async, tell all
4330          * clients that want all async events.
4331          */
4332         if (bus != xpt_periph->path->bus) {
4333                 xpt_path_lock(xpt_periph->path);
4334                 xpt_async_process_dev(xpt_periph->path->device, ccb);
4335                 xpt_path_unlock(xpt_periph->path);
4336         }
4337
4338         if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4339                 xpt_release_devq(path, 1, TRUE);
4340         else
4341                 xpt_release_simq(path->bus->sim, TRUE);
4342         if (ccb->casync.async_arg_size > 0)
4343                 free(async_arg, M_CAMXPT);
4344         xpt_free_path(path);
4345         xpt_free_ccb(ccb);
4346 }
4347
4348 static void
4349 xpt_async_bcast(struct async_list *async_head,
4350                 u_int32_t async_code,
4351                 struct cam_path *path, void *async_arg)
4352 {
4353         struct async_node *cur_entry;
4354         struct mtx *mtx;
4355
4356         cur_entry = SLIST_FIRST(async_head);
4357         while (cur_entry != NULL) {
4358                 struct async_node *next_entry;
4359                 /*
4360                  * Grab the next list entry before we call the current
4361                  * entry's callback.  This is because the callback function
4362                  * can delete its async callback entry.
4363                  */
4364                 next_entry = SLIST_NEXT(cur_entry, links);
4365                 if ((cur_entry->event_enable & async_code) != 0) {
4366                         mtx = cur_entry->event_lock ?
4367                             path->device->sim->mtx : NULL;
4368                         if (mtx)
4369                                 mtx_lock(mtx);
4370                         cur_entry->callback(cur_entry->callback_arg,
4371                                             async_code, path,
4372                                             async_arg);
4373                         if (mtx)
4374                                 mtx_unlock(mtx);
4375                 }
4376                 cur_entry = next_entry;
4377         }
4378 }
4379
4380 void
4381 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4382 {
4383         union ccb *ccb;
4384         int size;
4385
4386         ccb = xpt_alloc_ccb_nowait();
4387         if (ccb == NULL) {
4388                 xpt_print(path, "Can't allocate CCB to send %s\n",
4389                     xpt_async_string(async_code));
4390                 return;
4391         }
4392
4393         if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) {
4394                 xpt_print(path, "Can't allocate path to send %s\n",
4395                     xpt_async_string(async_code));
4396                 xpt_free_ccb(ccb);
4397                 return;
4398         }
4399         ccb->ccb_h.path->periph = NULL;
4400         ccb->ccb_h.func_code = XPT_ASYNC;
4401         ccb->ccb_h.cbfcnp = xpt_async_process;
4402         ccb->ccb_h.flags |= CAM_UNLOCKED;
4403         ccb->casync.async_code = async_code;
4404         ccb->casync.async_arg_size = 0;
4405         size = xpt_async_size(async_code);
4406         CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
4407             ("xpt_async: func %#x %s aync_code %d %s\n",
4408                 ccb->ccb_h.func_code,
4409                 xpt_action_name(ccb->ccb_h.func_code),
4410                 async_code,
4411                 xpt_async_string(async_code)));
4412         if (size > 0 && async_arg != NULL) {
4413                 ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT);
4414                 if (ccb->casync.async_arg_ptr == NULL) {
4415                         xpt_print(path, "Can't allocate argument to send %s\n",
4416                             xpt_async_string(async_code));
4417                         xpt_free_path(ccb->ccb_h.path);
4418                         xpt_free_ccb(ccb);
4419                         return;
4420                 }
4421                 memcpy(ccb->casync.async_arg_ptr, async_arg, size);
4422                 ccb->casync.async_arg_size = size;
4423         } else if (size < 0) {
4424                 ccb->casync.async_arg_ptr = async_arg;
4425                 ccb->casync.async_arg_size = size;
4426         }
4427         if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4428                 xpt_freeze_devq(path, 1);
4429         else
4430                 xpt_freeze_simq(path->bus->sim, 1);
4431         xpt_action(ccb);
4432 }
4433
4434 static void
4435 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
4436                       struct cam_et *target, struct cam_ed *device,
4437                       void *async_arg)
4438 {
4439
4440         /*
4441          * We only need to handle events for real devices.
4442          */
4443         if (target->target_id == CAM_TARGET_WILDCARD
4444          || device->lun_id == CAM_LUN_WILDCARD)
4445                 return;
4446
4447         printf("%s called\n", __func__);
4448 }
4449
4450 static uint32_t
4451 xpt_freeze_devq_device(struct cam_ed *dev, u_int count)
4452 {
4453         struct cam_devq *devq;
4454         uint32_t freeze;
4455
4456         devq = dev->sim->devq;
4457         mtx_assert(&devq->send_mtx, MA_OWNED);
4458         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4459             ("xpt_freeze_devq_device(%d) %u->%u\n", count,
4460             dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count));
4461         freeze = (dev->ccbq.queue.qfrozen_cnt += count);
4462         /* Remove frozen device from sendq. */
4463         if (device_is_queued(dev))
4464                 camq_remove(&devq->send_queue, dev->devq_entry.index);
4465         return (freeze);
4466 }
4467
4468 u_int32_t
4469 xpt_freeze_devq(struct cam_path *path, u_int count)
4470 {
4471         struct cam_ed   *dev = path->device;
4472         struct cam_devq *devq;
4473         uint32_t         freeze;
4474
4475         devq = dev->sim->devq;
4476         mtx_lock(&devq->send_mtx);
4477         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count));
4478         freeze = xpt_freeze_devq_device(dev, count);
4479         mtx_unlock(&devq->send_mtx);
4480         return (freeze);
4481 }
4482
4483 u_int32_t
4484 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4485 {
4486         struct cam_devq *devq;
4487         uint32_t         freeze;
4488
4489         devq = sim->devq;
4490         mtx_lock(&devq->send_mtx);
4491         freeze = (devq->send_queue.qfrozen_cnt += count);
4492         mtx_unlock(&devq->send_mtx);
4493         return (freeze);
4494 }
4495
4496 static void
4497 xpt_release_devq_timeout(void *arg)
4498 {
4499         struct cam_ed *dev;
4500         struct cam_devq *devq;
4501
4502         dev = (struct cam_ed *)arg;
4503         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n"));
4504         devq = dev->sim->devq;
4505         mtx_assert(&devq->send_mtx, MA_OWNED);
4506         if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE))
4507                 xpt_run_devq(devq);
4508 }
4509
4510 void
4511 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4512 {
4513         struct cam_ed *dev;
4514         struct cam_devq *devq;
4515
4516         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n",
4517             count, run_queue));
4518         dev = path->device;
4519         devq = dev->sim->devq;
4520         mtx_lock(&devq->send_mtx);
4521         if (xpt_release_devq_device(dev, count, run_queue))
4522                 xpt_run_devq(dev->sim->devq);
4523         mtx_unlock(&devq->send_mtx);
4524 }
4525
4526 static int
4527 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4528 {
4529
4530         mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED);
4531         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4532             ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue,
4533             dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count));
4534         if (count > dev->ccbq.queue.qfrozen_cnt) {
4535 #ifdef INVARIANTS
4536                 printf("xpt_release_devq(): requested %u > present %u\n",
4537                     count, dev->ccbq.queue.qfrozen_cnt);
4538 #endif
4539                 count = dev->ccbq.queue.qfrozen_cnt;
4540         }
4541         dev->ccbq.queue.qfrozen_cnt -= count;
4542         if (dev->ccbq.queue.qfrozen_cnt == 0) {
4543                 /*
4544                  * No longer need to wait for a successful
4545                  * command completion.
4546                  */
4547                 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4548                 /*
4549                  * Remove any timeouts that might be scheduled
4550                  * to release this queue.
4551                  */
4552                 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4553                         callout_stop(&dev->callout);
4554                         dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4555                 }
4556                 /*
4557                  * Now that we are unfrozen schedule the
4558                  * device so any pending transactions are
4559                  * run.
4560                  */
4561                 xpt_schedule_devq(dev->sim->devq, dev);
4562         } else
4563                 run_queue = 0;
4564         return (run_queue);
4565 }
4566
4567 void
4568 xpt_release_simq(struct cam_sim *sim, int run_queue)
4569 {
4570         struct cam_devq *devq;
4571
4572         devq = sim->devq;
4573         mtx_lock(&devq->send_mtx);
4574         if (devq->send_queue.qfrozen_cnt <= 0) {
4575 #ifdef INVARIANTS
4576                 printf("xpt_release_simq: requested 1 > present %u\n",
4577                     devq->send_queue.qfrozen_cnt);
4578 #endif
4579         } else
4580                 devq->send_queue.qfrozen_cnt--;
4581         if (devq->send_queue.qfrozen_cnt == 0) {
4582                 /*
4583                  * If there is a timeout scheduled to release this
4584                  * sim queue, remove it.  The queue frozen count is
4585                  * already at 0.
4586                  */
4587                 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4588                         callout_stop(&sim->callout);
4589                         sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4590                 }
4591                 if (run_queue) {
4592                         /*
4593                          * Now that we are unfrozen run the send queue.
4594                          */
4595                         xpt_run_devq(sim->devq);
4596                 }
4597         }
4598         mtx_unlock(&devq->send_mtx);
4599 }
4600
4601 void
4602 xpt_done(union ccb *done_ccb)
4603 {
4604         struct cam_doneq *queue;
4605         int     run, hash;
4606
4607 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4608         if (done_ccb->ccb_h.func_code == XPT_SCSI_IO &&
4609             done_ccb->csio.bio != NULL)
4610                 biotrack(done_ccb->csio.bio, __func__);
4611 #endif
4612
4613         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4614             ("xpt_done: func= %#x %s status %#x\n",
4615                 done_ccb->ccb_h.func_code,
4616                 xpt_action_name(done_ccb->ccb_h.func_code),
4617                 done_ccb->ccb_h.status));
4618         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4619                 return;
4620
4621         /* Store the time the ccb was in the sim */
4622         done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data);
4623         hash = (u_int)(done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id +
4624             done_ccb->ccb_h.target_lun) % cam_num_doneqs;
4625         queue = &cam_doneqs[hash];
4626         mtx_lock(&queue->cam_doneq_mtx);
4627         run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq));
4628         STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe);
4629         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4630         mtx_unlock(&queue->cam_doneq_mtx);
4631         if (run)
4632                 wakeup(&queue->cam_doneq);
4633 }
4634
4635 void
4636 xpt_done_direct(union ccb *done_ccb)
4637 {
4638
4639         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4640             ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status));
4641         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4642                 return;
4643
4644         /* Store the time the ccb was in the sim */
4645         done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data);
4646         xpt_done_process(&done_ccb->ccb_h);
4647 }
4648
4649 union ccb *
4650 xpt_alloc_ccb(void)
4651 {
4652         union ccb *new_ccb;
4653
4654         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4655         return (new_ccb);
4656 }
4657
4658 union ccb *
4659 xpt_alloc_ccb_nowait(void)
4660 {
4661         union ccb *new_ccb;
4662
4663         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4664         return (new_ccb);
4665 }
4666
4667 void
4668 xpt_free_ccb(union ccb *free_ccb)
4669 {
4670         free(free_ccb, M_CAMCCB);
4671 }
4672
4673 /* Private XPT functions */
4674
4675 /*
4676  * Get a CAM control block for the caller. Charge the structure to the device
4677  * referenced by the path.  If we don't have sufficient resources to allocate
4678  * more ccbs, we return NULL.
4679  */
4680 static union ccb *
4681 xpt_get_ccb_nowait(struct cam_periph *periph)
4682 {
4683         union ccb *new_ccb;
4684
4685         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4686         if (new_ccb == NULL)
4687                 return (NULL);
4688         periph->periph_allocated++;
4689         cam_ccbq_take_opening(&periph->path->device->ccbq);
4690         return (new_ccb);
4691 }
4692
4693 static union ccb *
4694 xpt_get_ccb(struct cam_periph *periph)
4695 {
4696         union ccb *new_ccb;
4697
4698         cam_periph_unlock(periph);
4699         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4700         cam_periph_lock(periph);
4701         periph->periph_allocated++;
4702         cam_ccbq_take_opening(&periph->path->device->ccbq);
4703         return (new_ccb);
4704 }
4705
4706 union ccb *
4707 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
4708 {
4709         struct ccb_hdr *ccb_h;
4710
4711         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n"));
4712         cam_periph_assert(periph, MA_OWNED);
4713         while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL ||
4714             ccb_h->pinfo.priority != priority) {
4715                 if (priority < periph->immediate_priority) {
4716                         periph->immediate_priority = priority;
4717                         xpt_run_allocq(periph, 0);
4718                 } else
4719                         cam_periph_sleep(periph, &periph->ccb_list, PRIBIO,
4720                             "cgticb", 0);
4721         }
4722         SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
4723         return ((union ccb *)ccb_h);
4724 }
4725
4726 static void
4727 xpt_acquire_bus(struct cam_eb *bus)
4728 {
4729
4730         xpt_lock_buses();
4731         bus->refcount++;
4732         xpt_unlock_buses();
4733 }
4734
4735 static void
4736 xpt_release_bus(struct cam_eb *bus)
4737 {
4738
4739         xpt_lock_buses();
4740         KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
4741         if (--bus->refcount > 0) {
4742                 xpt_unlock_buses();
4743                 return;
4744         }
4745         TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4746         xsoftc.bus_generation++;
4747         xpt_unlock_buses();
4748         KASSERT(TAILQ_EMPTY(&bus->et_entries),
4749             ("destroying bus, but target list is not empty"));
4750         cam_sim_release(bus->sim);
4751         mtx_destroy(&bus->eb_mtx);
4752         free(bus, M_CAMXPT);
4753 }
4754
4755 static struct cam_et *
4756 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4757 {
4758         struct cam_et *cur_target, *target;
4759
4760         mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4761         mtx_assert(&bus->eb_mtx, MA_OWNED);
4762         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
4763                                          M_NOWAIT|M_ZERO);
4764         if (target == NULL)
4765                 return (NULL);
4766
4767         TAILQ_INIT(&target->ed_entries);
4768         target->bus = bus;
4769         target->target_id = target_id;
4770         target->refcount = 1;
4771         target->generation = 0;
4772         target->luns = NULL;
4773         mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF);
4774         timevalclear(&target->last_reset);
4775         /*
4776          * Hold a reference to our parent bus so it
4777          * will not go away before we do.
4778          */
4779         bus->refcount++;
4780
4781         /* Insertion sort into our bus's target list */
4782         cur_target = TAILQ_FIRST(&bus->et_entries);
4783         while (cur_target != NULL && cur_target->target_id < target_id)
4784                 cur_target = TAILQ_NEXT(cur_target, links);
4785         if (cur_target != NULL) {
4786                 TAILQ_INSERT_BEFORE(cur_target, target, links);
4787         } else {
4788                 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4789         }
4790         bus->generation++;
4791         return (target);
4792 }
4793
4794 static void
4795 xpt_acquire_target(struct cam_et *target)
4796 {
4797         struct cam_eb *bus = target->bus;
4798
4799         mtx_lock(&bus->eb_mtx);
4800         target->refcount++;
4801         mtx_unlock(&bus->eb_mtx);
4802 }
4803
4804 static void
4805 xpt_release_target(struct cam_et *target)
4806 {
4807         struct cam_eb *bus = target->bus;
4808
4809         mtx_lock(&bus->eb_mtx);
4810         if (--target->refcount > 0) {
4811                 mtx_unlock(&bus->eb_mtx);
4812                 return;
4813         }
4814         TAILQ_REMOVE(&bus->et_entries, target, links);
4815         bus->generation++;
4816         mtx_unlock(&bus->eb_mtx);
4817         KASSERT(TAILQ_EMPTY(&target->ed_entries),
4818             ("destroying target, but device list is not empty"));
4819         xpt_release_bus(bus);
4820         mtx_destroy(&target->luns_mtx);
4821         if (target->luns)
4822                 free(target->luns, M_CAMXPT);
4823         free(target, M_CAMXPT);
4824 }
4825
4826 static struct cam_ed *
4827 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
4828                          lun_id_t lun_id)
4829 {
4830         struct cam_ed *device;
4831
4832         device = xpt_alloc_device(bus, target, lun_id);
4833         if (device == NULL)
4834                 return (NULL);
4835
4836         device->mintags = 1;
4837         device->maxtags = 1;
4838         return (device);
4839 }
4840
4841 static void
4842 xpt_destroy_device(void *context, int pending)
4843 {
4844         struct cam_ed   *device = context;
4845
4846         mtx_lock(&device->device_mtx);
4847         mtx_destroy(&device->device_mtx);
4848         free(device, M_CAMDEV);
4849 }
4850
4851 struct cam_ed *
4852 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4853 {
4854         struct cam_ed   *cur_device, *device;
4855         struct cam_devq *devq;
4856         cam_status status;
4857
4858         mtx_assert(&bus->eb_mtx, MA_OWNED);
4859         /* Make space for us in the device queue on our bus */
4860         devq = bus->sim->devq;
4861         mtx_lock(&devq->send_mtx);
4862         status = cam_devq_resize(devq, devq->send_queue.array_size + 1);
4863         mtx_unlock(&devq->send_mtx);
4864         if (status != CAM_REQ_CMP)
4865                 return (NULL);
4866
4867         device = (struct cam_ed *)malloc(sizeof(*device),
4868                                          M_CAMDEV, M_NOWAIT|M_ZERO);
4869         if (device == NULL)
4870                 return (NULL);
4871
4872         cam_init_pinfo(&device->devq_entry);
4873         device->target = target;
4874         device->lun_id = lun_id;
4875         device->sim = bus->sim;
4876         if (cam_ccbq_init(&device->ccbq,
4877                           bus->sim->max_dev_openings) != 0) {
4878                 free(device, M_CAMDEV);
4879                 return (NULL);
4880         }
4881         SLIST_INIT(&device->asyncs);
4882         SLIST_INIT(&device->periphs);
4883         device->generation = 0;
4884         device->flags = CAM_DEV_UNCONFIGURED;
4885         device->tag_delay_count = 0;
4886         device->tag_saved_openings = 0;
4887         device->refcount = 1;
4888         mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF);
4889         callout_init_mtx(&device->callout, &devq->send_mtx, 0);
4890         TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device);
4891         /*
4892          * Hold a reference to our parent bus so it
4893          * will not go away before we do.
4894          */
4895         target->refcount++;
4896
4897         cur_device = TAILQ_FIRST(&target->ed_entries);
4898         while (cur_device != NULL && cur_device->lun_id < lun_id)
4899                 cur_device = TAILQ_NEXT(cur_device, links);
4900         if (cur_device != NULL)
4901                 TAILQ_INSERT_BEFORE(cur_device, device, links);
4902         else
4903                 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4904         target->generation++;
4905         return (device);
4906 }
4907
4908 void
4909 xpt_acquire_device(struct cam_ed *device)
4910 {
4911         struct cam_eb *bus = device->target->bus;
4912
4913         mtx_lock(&bus->eb_mtx);
4914         device->refcount++;
4915         mtx_unlock(&bus->eb_mtx);
4916 }
4917
4918 void
4919 xpt_release_device(struct cam_ed *device)
4920 {
4921         struct cam_eb *bus = device->target->bus;
4922         struct cam_devq *devq;
4923
4924         mtx_lock(&bus->eb_mtx);
4925         if (--device->refcount > 0) {
4926                 mtx_unlock(&bus->eb_mtx);
4927                 return;
4928         }
4929
4930         TAILQ_REMOVE(&device->target->ed_entries, device,links);
4931         device->target->generation++;
4932         mtx_unlock(&bus->eb_mtx);
4933
4934         /* Release our slot in the devq */
4935         devq = bus->sim->devq;
4936         mtx_lock(&devq->send_mtx);
4937         cam_devq_resize(devq, devq->send_queue.array_size - 1);
4938
4939         KASSERT(SLIST_EMPTY(&device->periphs),
4940             ("destroying device, but periphs list is not empty"));
4941         KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX,
4942             ("destroying device while still queued for ccbs"));
4943
4944         /* The send_mtx must be held when accessing the callout */
4945         if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
4946                 callout_stop(&device->callout);
4947
4948         mtx_unlock(&devq->send_mtx);
4949
4950         xpt_release_target(device->target);
4951
4952         cam_ccbq_fini(&device->ccbq);
4953         /*
4954          * Free allocated memory.  free(9) does nothing if the
4955          * supplied pointer is NULL, so it is safe to call without
4956          * checking.
4957          */
4958         free(device->supported_vpds, M_CAMXPT);
4959         free(device->device_id, M_CAMXPT);
4960         free(device->ext_inq, M_CAMXPT);
4961         free(device->physpath, M_CAMXPT);
4962         free(device->rcap_buf, M_CAMXPT);
4963         free(device->serial_num, M_CAMXPT);
4964         free(device->nvme_data, M_CAMXPT);
4965         free(device->nvme_cdata, M_CAMXPT);
4966         taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task);
4967 }
4968
4969 u_int32_t
4970 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4971 {
4972         int     result;
4973         struct  cam_ed *dev;
4974
4975         dev = path->device;
4976         mtx_lock(&dev->sim->devq->send_mtx);
4977         result = cam_ccbq_resize(&dev->ccbq, newopenings);
4978         mtx_unlock(&dev->sim->devq->send_mtx);
4979         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
4980          || (dev->inq_flags & SID_CmdQue) != 0)
4981                 dev->tag_saved_openings = newopenings;
4982         return (result);
4983 }
4984
4985 static struct cam_eb *
4986 xpt_find_bus(path_id_t path_id)
4987 {
4988         struct cam_eb *bus;
4989
4990         xpt_lock_buses();
4991         for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4992              bus != NULL;
4993              bus = TAILQ_NEXT(bus, links)) {
4994                 if (bus->path_id == path_id) {
4995                         bus->refcount++;
4996                         break;
4997                 }
4998         }
4999         xpt_unlock_buses();
5000         return (bus);
5001 }
5002
5003 static struct cam_et *
5004 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
5005 {
5006         struct cam_et *target;
5007
5008         mtx_assert(&bus->eb_mtx, MA_OWNED);
5009         for (target = TAILQ_FIRST(&bus->et_entries);
5010              target != NULL;
5011              target = TAILQ_NEXT(target, links)) {
5012                 if (target->target_id == target_id) {
5013                         target->refcount++;
5014                         break;
5015                 }
5016         }
5017         return (target);
5018 }
5019
5020 static struct cam_ed *
5021 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
5022 {
5023         struct cam_ed *device;
5024
5025         mtx_assert(&target->bus->eb_mtx, MA_OWNED);
5026         for (device = TAILQ_FIRST(&target->ed_entries);
5027              device != NULL;
5028              device = TAILQ_NEXT(device, links)) {
5029                 if (device->lun_id == lun_id) {
5030                         device->refcount++;
5031                         break;
5032                 }
5033         }
5034         return (device);
5035 }
5036
5037 void
5038 xpt_start_tags(struct cam_path *path)
5039 {
5040         struct ccb_relsim crs;
5041         struct cam_ed *device;
5042         struct cam_sim *sim;
5043         int    newopenings;
5044
5045         device = path->device;
5046         sim = path->bus->sim;
5047         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
5048         xpt_freeze_devq(path, /*count*/1);
5049         device->inq_flags |= SID_CmdQue;
5050         if (device->tag_saved_openings != 0)
5051                 newopenings = device->tag_saved_openings;
5052         else
5053                 newopenings = min(device->maxtags,
5054                                   sim->max_tagged_dev_openings);
5055         xpt_dev_ccbq_resize(path, newopenings);
5056         xpt_async(AC_GETDEV_CHANGED, path, NULL);
5057         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
5058         crs.ccb_h.func_code = XPT_REL_SIMQ;
5059         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5060         crs.openings
5061             = crs.release_timeout
5062             = crs.qfrozen_cnt
5063             = 0;
5064         xpt_action((union ccb *)&crs);
5065 }
5066
5067 void
5068 xpt_stop_tags(struct cam_path *path)
5069 {
5070         struct ccb_relsim crs;
5071         struct cam_ed *device;
5072         struct cam_sim *sim;
5073
5074         device = path->device;
5075         sim = path->bus->sim;
5076         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
5077         device->tag_delay_count = 0;
5078         xpt_freeze_devq(path, /*count*/1);
5079         device->inq_flags &= ~SID_CmdQue;
5080         xpt_dev_ccbq_resize(path, sim->max_dev_openings);
5081         xpt_async(AC_GETDEV_CHANGED, path, NULL);
5082         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
5083         crs.ccb_h.func_code = XPT_REL_SIMQ;
5084         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5085         crs.openings
5086             = crs.release_timeout
5087             = crs.qfrozen_cnt
5088             = 0;
5089         xpt_action((union ccb *)&crs);
5090 }
5091
5092 /*
5093  * Assume all possible buses are detected by this time, so allow boot
5094  * as soon as they all are scanned.
5095  */
5096 static void
5097 xpt_boot_delay(void *arg)
5098 {
5099
5100         xpt_release_boot();
5101 }
5102
5103 /*
5104  * Now that all config hooks have completed, start boot_delay timer,
5105  * waiting for possibly still undetected buses (USB) to appear.
5106  */
5107 static void
5108 xpt_ch_done(void *arg)
5109 {
5110
5111         callout_init(&xsoftc.boot_callout, 1);
5112         callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0,
5113             xpt_boot_delay, NULL, 0);
5114 }
5115 SYSINIT(xpt_hw_delay, SI_SUB_INT_CONFIG_HOOKS, SI_ORDER_ANY, xpt_ch_done, NULL);
5116
5117 /*
5118  * Now that interrupts are enabled, go find our devices
5119  */
5120 static void
5121 xpt_config(void *arg)
5122 {
5123         if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq"))
5124                 printf("xpt_config: failed to create taskqueue thread.\n");
5125
5126         /* Setup debugging path */
5127         if (cam_dflags != CAM_DEBUG_NONE) {
5128                 if (xpt_create_path(&cam_dpath, NULL,
5129                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
5130                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
5131                         printf("xpt_config: xpt_create_path() failed for debug"
5132                                " target %d:%d:%d, debugging disabled\n",
5133                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
5134                         cam_dflags = CAM_DEBUG_NONE;
5135                 }
5136         } else
5137                 cam_dpath = NULL;
5138
5139         periphdriver_init(1);
5140         xpt_hold_boot();
5141
5142         /* Fire up rescan thread. */
5143         if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0,
5144             "cam", "scanner")) {
5145                 printf("xpt_config: failed to create rescan thread.\n");
5146         }
5147 }
5148
5149 void
5150 xpt_hold_boot_locked(void)
5151 {
5152
5153         if (xsoftc.buses_to_config++ == 0)
5154                 root_mount_hold_token("CAM", &xsoftc.xpt_rootmount);
5155 }
5156
5157 void
5158 xpt_hold_boot(void)
5159 {
5160
5161         xpt_lock_buses();
5162         xpt_hold_boot_locked();
5163         xpt_unlock_buses();
5164 }
5165
5166 void
5167 xpt_release_boot(void)
5168 {
5169
5170         xpt_lock_buses();
5171         if (--xsoftc.buses_to_config == 0) {
5172                 if (xsoftc.buses_config_done == 0) {
5173                         xsoftc.buses_config_done = 1;
5174                         xsoftc.buses_to_config++;
5175                         TASK_INIT(&xsoftc.boot_task, 0, xpt_finishconfig_task,
5176                             NULL);
5177                         taskqueue_enqueue(taskqueue_thread, &xsoftc.boot_task);
5178                 } else
5179                         root_mount_rel(&xsoftc.xpt_rootmount);
5180         }
5181         xpt_unlock_buses();
5182 }
5183
5184 /*
5185  * If the given device only has one peripheral attached to it, and if that
5186  * peripheral is the passthrough driver, announce it.  This insures that the
5187  * user sees some sort of announcement for every peripheral in their system.
5188  */
5189 static int
5190 xptpassannouncefunc(struct cam_ed *device, void *arg)
5191 {
5192         struct cam_periph *periph;
5193         int i;
5194
5195         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
5196              periph = SLIST_NEXT(periph, periph_links), i++);
5197
5198         periph = SLIST_FIRST(&device->periphs);
5199         if ((i == 1)
5200          && (strncmp(periph->periph_name, "pass", 4) == 0))
5201                 xpt_announce_periph(periph, NULL);
5202
5203         return(1);
5204 }
5205
5206 static void
5207 xpt_finishconfig_task(void *context, int pending)
5208 {
5209
5210         periphdriver_init(2);
5211         /*
5212          * Check for devices with no "standard" peripheral driver
5213          * attached.  For any devices like that, announce the
5214          * passthrough driver so the user will see something.
5215          */
5216         if (!bootverbose)
5217                 xpt_for_all_devices(xptpassannouncefunc, NULL);
5218
5219         xpt_release_boot();
5220 }
5221
5222 cam_status
5223 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
5224                    struct cam_path *path)
5225 {
5226         struct ccb_setasync csa;
5227         cam_status status;
5228         int xptpath = 0;
5229
5230         if (path == NULL) {
5231                 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
5232                                          CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
5233                 if (status != CAM_REQ_CMP)
5234                         return (status);
5235                 xpt_path_lock(path);
5236                 xptpath = 1;
5237         }
5238
5239         xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
5240         csa.ccb_h.func_code = XPT_SASYNC_CB;
5241         csa.event_enable = event;
5242         csa.callback = cbfunc;
5243         csa.callback_arg = cbarg;
5244         xpt_action((union ccb *)&csa);
5245         status = csa.ccb_h.status;
5246
5247         CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE,
5248             ("xpt_register_async: func %p\n", cbfunc));
5249
5250         if (xptpath) {
5251                 xpt_path_unlock(path);
5252                 xpt_free_path(path);
5253         }
5254
5255         if ((status == CAM_REQ_CMP) &&
5256             (csa.event_enable & AC_FOUND_DEVICE)) {
5257                 /*
5258                  * Get this peripheral up to date with all
5259                  * the currently existing devices.
5260                  */
5261                 xpt_for_all_devices(xptsetasyncfunc, &csa);
5262         }
5263         if ((status == CAM_REQ_CMP) &&
5264             (csa.event_enable & AC_PATH_REGISTERED)) {
5265                 /*
5266                  * Get this peripheral up to date with all
5267                  * the currently existing buses.
5268                  */
5269                 xpt_for_all_busses(xptsetasyncbusfunc, &csa);
5270         }
5271
5272         return (status);
5273 }
5274
5275 static void
5276 xptaction(struct cam_sim *sim, union ccb *work_ccb)
5277 {
5278         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
5279
5280         switch (work_ccb->ccb_h.func_code) {
5281         /* Common cases first */
5282         case XPT_PATH_INQ:              /* Path routing inquiry */
5283         {
5284                 struct ccb_pathinq *cpi;
5285
5286                 cpi = &work_ccb->cpi;
5287                 cpi->version_num = 1; /* XXX??? */
5288                 cpi->hba_inquiry = 0;
5289                 cpi->target_sprt = 0;
5290                 cpi->hba_misc = 0;
5291                 cpi->hba_eng_cnt = 0;
5292                 cpi->max_target = 0;
5293                 cpi->max_lun = 0;
5294                 cpi->initiator_id = 0;
5295                 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
5296                 strlcpy(cpi->hba_vid, "", HBA_IDLEN);
5297                 strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
5298                 cpi->unit_number = sim->unit_number;
5299                 cpi->bus_id = sim->bus_id;
5300                 cpi->base_transfer_speed = 0;
5301                 cpi->protocol = PROTO_UNSPECIFIED;
5302                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
5303                 cpi->transport = XPORT_UNSPECIFIED;
5304                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
5305                 cpi->ccb_h.status = CAM_REQ_CMP;
5306                 break;
5307         }
5308         default:
5309                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
5310                 break;
5311         }
5312         xpt_done(work_ccb);
5313 }
5314
5315 /*
5316  * The xpt as a "controller" has no interrupt sources, so polling
5317  * is a no-op.
5318  */
5319 static void
5320 xptpoll(struct cam_sim *sim)
5321 {
5322 }
5323
5324 void
5325 xpt_lock_buses(void)
5326 {
5327         mtx_lock(&xsoftc.xpt_topo_lock);
5328 }
5329
5330 void
5331 xpt_unlock_buses(void)
5332 {
5333         mtx_unlock(&xsoftc.xpt_topo_lock);
5334 }
5335
5336 struct mtx *
5337 xpt_path_mtx(struct cam_path *path)
5338 {
5339
5340         return (&path->device->device_mtx);
5341 }
5342
5343 static void
5344 xpt_done_process(struct ccb_hdr *ccb_h)
5345 {
5346         struct cam_sim *sim = NULL;
5347         struct cam_devq *devq = NULL;
5348         struct mtx *mtx = NULL;
5349
5350 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
5351         struct ccb_scsiio *csio;
5352
5353         if (ccb_h->func_code == XPT_SCSI_IO) {
5354                 csio = &((union ccb *)ccb_h)->csio;
5355                 if (csio->bio != NULL)
5356                         biotrack(csio->bio, __func__);
5357         }
5358 #endif
5359
5360         if (ccb_h->flags & CAM_HIGH_POWER) {
5361                 struct highpowerlist    *hphead;
5362                 struct cam_ed           *device;
5363
5364                 mtx_lock(&xsoftc.xpt_highpower_lock);
5365                 hphead = &xsoftc.highpowerq;
5366
5367                 device = STAILQ_FIRST(hphead);
5368
5369                 /*
5370                  * Increment the count since this command is done.
5371                  */
5372                 xsoftc.num_highpower++;
5373
5374                 /*
5375                  * Any high powered commands queued up?
5376                  */
5377                 if (device != NULL) {
5378                         STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
5379                         mtx_unlock(&xsoftc.xpt_highpower_lock);
5380
5381                         mtx_lock(&device->sim->devq->send_mtx);
5382                         xpt_release_devq_device(device,
5383                                          /*count*/1, /*runqueue*/TRUE);
5384                         mtx_unlock(&device->sim->devq->send_mtx);
5385                 } else
5386                         mtx_unlock(&xsoftc.xpt_highpower_lock);
5387         }
5388
5389         /*
5390          * Insulate against a race where the periph is destroyed but CCBs are
5391          * still not all processed. This shouldn't happen, but allows us better
5392          * bug diagnostic when it does.
5393          */
5394         if (ccb_h->path->bus)
5395                 sim = ccb_h->path->bus->sim;
5396
5397         if (ccb_h->status & CAM_RELEASE_SIMQ) {
5398                 KASSERT(sim, ("sim missing for CAM_RELEASE_SIMQ request"));
5399                 xpt_release_simq(sim, /*run_queue*/FALSE);
5400                 ccb_h->status &= ~CAM_RELEASE_SIMQ;
5401         }
5402
5403         if ((ccb_h->flags & CAM_DEV_QFRZDIS)
5404          && (ccb_h->status & CAM_DEV_QFRZN)) {
5405                 xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE);
5406                 ccb_h->status &= ~CAM_DEV_QFRZN;
5407         }
5408
5409         if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
5410                 struct cam_ed *dev = ccb_h->path->device;
5411
5412                 if (sim)
5413                         devq = sim->devq;
5414                 KASSERT(devq, ("Periph disappeared with CCB %p %s request pending.",
5415                         ccb_h, xpt_action_name(ccb_h->func_code)));
5416
5417                 mtx_lock(&devq->send_mtx);
5418                 devq->send_active--;
5419                 devq->send_openings++;
5420                 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
5421
5422                 if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
5423                   && (dev->ccbq.dev_active == 0))) {
5424                         dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
5425                         xpt_release_devq_device(dev, /*count*/1,
5426                                          /*run_queue*/FALSE);
5427                 }
5428
5429                 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
5430                   && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
5431                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
5432                         xpt_release_devq_device(dev, /*count*/1,
5433                                          /*run_queue*/FALSE);
5434                 }
5435
5436                 if (!device_is_queued(dev))
5437                         (void)xpt_schedule_devq(devq, dev);
5438                 xpt_run_devq(devq);
5439                 mtx_unlock(&devq->send_mtx);
5440
5441                 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) {
5442                         mtx = xpt_path_mtx(ccb_h->path);
5443                         mtx_lock(mtx);
5444
5445                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5446                          && (--dev->tag_delay_count == 0))
5447                                 xpt_start_tags(ccb_h->path);
5448                 }
5449         }
5450
5451         if ((ccb_h->flags & CAM_UNLOCKED) == 0) {
5452                 if (mtx == NULL) {
5453                         mtx = xpt_path_mtx(ccb_h->path);
5454                         mtx_lock(mtx);
5455                 }
5456         } else {
5457                 if (mtx != NULL) {
5458                         mtx_unlock(mtx);
5459                         mtx = NULL;
5460                 }
5461         }
5462
5463         /* Call the peripheral driver's callback */
5464         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
5465         (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
5466         if (mtx != NULL)
5467                 mtx_unlock(mtx);
5468 }
5469
5470 void
5471 xpt_done_td(void *arg)
5472 {
5473         struct cam_doneq *queue = arg;
5474         struct ccb_hdr *ccb_h;
5475         STAILQ_HEAD(, ccb_hdr)  doneq;
5476
5477         STAILQ_INIT(&doneq);
5478         mtx_lock(&queue->cam_doneq_mtx);
5479         while (1) {
5480                 while (STAILQ_EMPTY(&queue->cam_doneq)) {
5481                         queue->cam_doneq_sleep = 1;
5482                         msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
5483                             PRIBIO, "-", 0);
5484                         queue->cam_doneq_sleep = 0;
5485                 }
5486                 STAILQ_CONCAT(&doneq, &queue->cam_doneq);
5487                 mtx_unlock(&queue->cam_doneq_mtx);
5488
5489                 THREAD_NO_SLEEPING();
5490                 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
5491                         STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
5492                         xpt_done_process(ccb_h);
5493                 }
5494                 THREAD_SLEEPING_OK();
5495
5496                 mtx_lock(&queue->cam_doneq_mtx);
5497         }
5498 }
5499
5500 static void
5501 camisr_runqueue(void)
5502 {
5503         struct  ccb_hdr *ccb_h;
5504         struct cam_doneq *queue;
5505         int i;
5506
5507         /* Process global queues. */
5508         for (i = 0; i < cam_num_doneqs; i++) {
5509                 queue = &cam_doneqs[i];
5510                 mtx_lock(&queue->cam_doneq_mtx);
5511                 while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) {
5512                         STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe);
5513                         mtx_unlock(&queue->cam_doneq_mtx);
5514                         xpt_done_process(ccb_h);
5515                         mtx_lock(&queue->cam_doneq_mtx);
5516                 }
5517                 mtx_unlock(&queue->cam_doneq_mtx);
5518         }
5519 }
5520
5521 struct kv 
5522 {
5523         uint32_t v;
5524         const char *name;
5525 };
5526
5527 static struct kv map[] = {
5528         { XPT_NOOP, "XPT_NOOP" },
5529         { XPT_SCSI_IO, "XPT_SCSI_IO" },
5530         { XPT_GDEV_TYPE, "XPT_GDEV_TYPE" },
5531         { XPT_GDEVLIST, "XPT_GDEVLIST" },
5532         { XPT_PATH_INQ, "XPT_PATH_INQ" },
5533         { XPT_REL_SIMQ, "XPT_REL_SIMQ" },
5534         { XPT_SASYNC_CB, "XPT_SASYNC_CB" },
5535         { XPT_SDEV_TYPE, "XPT_SDEV_TYPE" },
5536         { XPT_SCAN_BUS, "XPT_SCAN_BUS" },
5537         { XPT_DEV_MATCH, "XPT_DEV_MATCH" },
5538         { XPT_DEBUG, "XPT_DEBUG" },
5539         { XPT_PATH_STATS, "XPT_PATH_STATS" },
5540         { XPT_GDEV_STATS, "XPT_GDEV_STATS" },
5541         { XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" },
5542         { XPT_ASYNC, "XPT_ASYNC" },
5543         { XPT_ABORT, "XPT_ABORT" },
5544         { XPT_RESET_BUS, "XPT_RESET_BUS" },
5545         { XPT_RESET_DEV, "XPT_RESET_DEV" },
5546         { XPT_TERM_IO, "XPT_TERM_IO" },
5547         { XPT_SCAN_LUN, "XPT_SCAN_LUN" },
5548         { XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" },
5549         { XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" },
5550         { XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" },
5551         { XPT_ATA_IO, "XPT_ATA_IO" },
5552         { XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" },
5553         { XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" },
5554         { XPT_NVME_IO, "XPT_NVME_IO" },
5555         { XPT_MMC_IO, "XPT_MMC_IO" },
5556         { XPT_SMP_IO, "XPT_SMP_IO" },
5557         { XPT_SCAN_TGT, "XPT_SCAN_TGT" },
5558         { XPT_NVME_ADMIN, "XPT_NVME_ADMIN" },
5559         { XPT_ENG_INQ, "XPT_ENG_INQ" },
5560         { XPT_ENG_EXEC, "XPT_ENG_EXEC" },
5561         { XPT_EN_LUN, "XPT_EN_LUN" },
5562         { XPT_TARGET_IO, "XPT_TARGET_IO" },
5563         { XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" },
5564         { XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" },
5565         { XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" },
5566         { XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" },
5567         { XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" },
5568         { XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" },
5569         { 0, 0 }
5570 };
5571
5572 const char *
5573 xpt_action_name(uint32_t action) 
5574 {
5575         static char buffer[32]; /* Only for unknown messages -- racy */
5576         struct kv *walker = map;
5577
5578         while (walker->name != NULL) {
5579                 if (walker->v == action)
5580                         return (walker->name);
5581                 walker++;
5582         }
5583
5584         snprintf(buffer, sizeof(buffer), "%#x", action);
5585         return (buffer);
5586 }