]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/cam/cam_xpt.c
Document three open issues affecting 11.3-RELEASE.
[FreeBSD/FreeBSD.git] / sys / cam / cam_xpt.c
1 /*-
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/systm.h>
36 #include <sys/types.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/time.h>
40 #include <sys/conf.h>
41 #include <sys/fcntl.h>
42 #include <sys/proc.h>
43 #include <sys/sbuf.h>
44 #include <sys/smp.h>
45 #include <sys/taskqueue.h>
46
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/sysctl.h>
50 #include <sys/kthread.h>
51
52 #include <cam/cam.h>
53 #include <cam/cam_ccb.h>
54 #include <cam/cam_periph.h>
55 #include <cam/cam_queue.h>
56 #include <cam/cam_sim.h>
57 #include <cam/cam_xpt.h>
58 #include <cam/cam_xpt_sim.h>
59 #include <cam/cam_xpt_periph.h>
60 #include <cam/cam_xpt_internal.h>
61 #include <cam/cam_debug.h>
62 #include <cam/cam_compat.h>
63
64 #include <cam/scsi/scsi_all.h>
65 #include <cam/scsi/scsi_message.h>
66 #include <cam/scsi/scsi_pass.h>
67
68 #include <machine/md_var.h>     /* geometry translation */
69 #include <machine/stdarg.h>     /* for xpt_print below */
70
71 #include "opt_cam.h"
72
73 /*
74  * This is the maximum number of high powered commands (e.g. start unit)
75  * that can be outstanding at a particular time.
76  */
77 #ifndef CAM_MAX_HIGHPOWER
78 #define CAM_MAX_HIGHPOWER  4
79 #endif
80
81 /* Datastructures internal to the xpt layer */
82 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
83 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices");
84 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs");
85 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths");
86
87 /* Object for defering XPT actions to a taskqueue */
88 struct xpt_task {
89         struct task     task;
90         void            *data1;
91         uintptr_t       data2;
92 };
93
94 struct xpt_softc {
95         uint32_t                xpt_generation;
96
97         /* number of high powered commands that can go through right now */
98         struct mtx              xpt_highpower_lock;
99         STAILQ_HEAD(highpowerlist, cam_ed)      highpowerq;
100         int                     num_highpower;
101
102         /* queue for handling async rescan requests. */
103         TAILQ_HEAD(, ccb_hdr) ccb_scanq;
104         int buses_to_config;
105         int buses_config_done;
106
107         /* Registered busses */
108         TAILQ_HEAD(,cam_eb)     xpt_busses;
109         u_int                   bus_generation;
110
111         struct intr_config_hook *xpt_config_hook;
112
113         int                     boot_delay;
114         struct callout          boot_callout;
115
116         struct mtx              xpt_topo_lock;
117         struct mtx              xpt_lock;
118         struct taskqueue        *xpt_taskq;
119 };
120
121 typedef enum {
122         DM_RET_COPY             = 0x01,
123         DM_RET_FLAG_MASK        = 0x0f,
124         DM_RET_NONE             = 0x00,
125         DM_RET_STOP             = 0x10,
126         DM_RET_DESCEND          = 0x20,
127         DM_RET_ERROR            = 0x30,
128         DM_RET_ACTION_MASK      = 0xf0
129 } dev_match_ret;
130
131 typedef enum {
132         XPT_DEPTH_BUS,
133         XPT_DEPTH_TARGET,
134         XPT_DEPTH_DEVICE,
135         XPT_DEPTH_PERIPH
136 } xpt_traverse_depth;
137
138 struct xpt_traverse_config {
139         xpt_traverse_depth      depth;
140         void                    *tr_func;
141         void                    *tr_arg;
142 };
143
144 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
145 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
146 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
147 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
148 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
149
150 /* Transport layer configuration information */
151 static struct xpt_softc xsoftc;
152
153 MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF);
154
155 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
156            &xsoftc.boot_delay, 0, "Bus registration wait time");
157 SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD,
158             &xsoftc.xpt_generation, 0, "CAM peripheral generation count");
159
160 struct cam_doneq {
161         struct mtx_padalign     cam_doneq_mtx;
162         STAILQ_HEAD(, ccb_hdr)  cam_doneq;
163         int                     cam_doneq_sleep;
164 };
165
166 static struct cam_doneq cam_doneqs[MAXCPU];
167 static int cam_num_doneqs;
168 static struct proc *cam_proc;
169
170 SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
171            &cam_num_doneqs, 0, "Number of completion queues/threads");
172
173 struct cam_periph *xpt_periph;
174
175 static periph_init_t xpt_periph_init;
176
177 static struct periph_driver xpt_driver =
178 {
179         xpt_periph_init, "xpt",
180         TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
181         CAM_PERIPH_DRV_EARLY
182 };
183
184 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
185
186 static d_open_t xptopen;
187 static d_close_t xptclose;
188 static d_ioctl_t xptioctl;
189 static d_ioctl_t xptdoioctl;
190
191 static struct cdevsw xpt_cdevsw = {
192         .d_version =    D_VERSION,
193         .d_flags =      0,
194         .d_open =       xptopen,
195         .d_close =      xptclose,
196         .d_ioctl =      xptioctl,
197         .d_name =       "xpt",
198 };
199
200 /* Storage for debugging datastructures */
201 struct cam_path *cam_dpath;
202 u_int32_t cam_dflags = CAM_DEBUG_FLAGS;
203 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN,
204         &cam_dflags, 0, "Enabled debug flags");
205 u_int32_t cam_debug_delay = CAM_DEBUG_DELAY;
206 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN,
207         &cam_debug_delay, 0, "Delay in us after each debug message");
208
209 /* Our boot-time initialization hook */
210 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
211
212 static moduledata_t cam_moduledata = {
213         "cam",
214         cam_module_event_handler,
215         NULL
216 };
217
218 static int      xpt_init(void *);
219
220 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
221 MODULE_VERSION(cam, 1);
222
223
224 static void             xpt_async_bcast(struct async_list *async_head,
225                                         u_int32_t async_code,
226                                         struct cam_path *path,
227                                         void *async_arg);
228 static path_id_t xptnextfreepathid(void);
229 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
230 static union ccb *xpt_get_ccb(struct cam_periph *periph);
231 static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph);
232 static void      xpt_run_allocq(struct cam_periph *periph, int sleep);
233 static void      xpt_run_allocq_task(void *context, int pending);
234 static void      xpt_run_devq(struct cam_devq *devq);
235 static timeout_t xpt_release_devq_timeout;
236 static void      xpt_release_simq_timeout(void *arg) __unused;
237 static void      xpt_acquire_bus(struct cam_eb *bus);
238 static void      xpt_release_bus(struct cam_eb *bus);
239 static uint32_t  xpt_freeze_devq_device(struct cam_ed *dev, u_int count);
240 static int       xpt_release_devq_device(struct cam_ed *dev, u_int count,
241                     int run_queue);
242 static struct cam_et*
243                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
244 static void      xpt_acquire_target(struct cam_et *target);
245 static void      xpt_release_target(struct cam_et *target);
246 static struct cam_eb*
247                  xpt_find_bus(path_id_t path_id);
248 static struct cam_et*
249                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
250 static struct cam_ed*
251                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
252 static void      xpt_config(void *arg);
253 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
254                                  u_int32_t new_priority);
255 static xpt_devicefunc_t xptpassannouncefunc;
256 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
257 static void      xptpoll(struct cam_sim *sim);
258 static void      camisr_runqueue(void);
259 static void      xpt_done_process(struct ccb_hdr *ccb_h);
260 static void      xpt_done_td(void *);
261 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
262                                     u_int num_patterns, struct cam_eb *bus);
263 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
264                                        u_int num_patterns,
265                                        struct cam_ed *device);
266 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
267                                        u_int num_patterns,
268                                        struct cam_periph *periph);
269 static xpt_busfunc_t    xptedtbusfunc;
270 static xpt_targetfunc_t xptedttargetfunc;
271 static xpt_devicefunc_t xptedtdevicefunc;
272 static xpt_periphfunc_t xptedtperiphfunc;
273 static xpt_pdrvfunc_t   xptplistpdrvfunc;
274 static xpt_periphfunc_t xptplistperiphfunc;
275 static int              xptedtmatch(struct ccb_dev_match *cdm);
276 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
277 static int              xptbustraverse(struct cam_eb *start_bus,
278                                        xpt_busfunc_t *tr_func, void *arg);
279 static int              xpttargettraverse(struct cam_eb *bus,
280                                           struct cam_et *start_target,
281                                           xpt_targetfunc_t *tr_func, void *arg);
282 static int              xptdevicetraverse(struct cam_et *target,
283                                           struct cam_ed *start_device,
284                                           xpt_devicefunc_t *tr_func, void *arg);
285 static int              xptperiphtraverse(struct cam_ed *device,
286                                           struct cam_periph *start_periph,
287                                           xpt_periphfunc_t *tr_func, void *arg);
288 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
289                                         xpt_pdrvfunc_t *tr_func, void *arg);
290 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
291                                             struct cam_periph *start_periph,
292                                             xpt_periphfunc_t *tr_func,
293                                             void *arg);
294 static xpt_busfunc_t    xptdefbusfunc;
295 static xpt_targetfunc_t xptdeftargetfunc;
296 static xpt_devicefunc_t xptdefdevicefunc;
297 static xpt_periphfunc_t xptdefperiphfunc;
298 static void             xpt_finishconfig_task(void *context, int pending);
299 static void             xpt_dev_async_default(u_int32_t async_code,
300                                               struct cam_eb *bus,
301                                               struct cam_et *target,
302                                               struct cam_ed *device,
303                                               void *async_arg);
304 static struct cam_ed *  xpt_alloc_device_default(struct cam_eb *bus,
305                                                  struct cam_et *target,
306                                                  lun_id_t lun_id);
307 static xpt_devicefunc_t xptsetasyncfunc;
308 static xpt_busfunc_t    xptsetasyncbusfunc;
309 static cam_status       xptregister(struct cam_periph *periph,
310                                     void *arg);
311 static const char *     xpt_action_name(uint32_t action);
312 static __inline int device_is_queued(struct cam_ed *device);
313
314 static __inline int
315 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
316 {
317         int     retval;
318
319         mtx_assert(&devq->send_mtx, MA_OWNED);
320         if ((dev->ccbq.queue.entries > 0) &&
321             (dev->ccbq.dev_openings > 0) &&
322             (dev->ccbq.queue.qfrozen_cnt == 0)) {
323                 /*
324                  * The priority of a device waiting for controller
325                  * resources is that of the highest priority CCB
326                  * enqueued.
327                  */
328                 retval =
329                     xpt_schedule_dev(&devq->send_queue,
330                                      &dev->devq_entry,
331                                      CAMQ_GET_PRIO(&dev->ccbq.queue));
332         } else {
333                 retval = 0;
334         }
335         return (retval);
336 }
337
338 static __inline int
339 device_is_queued(struct cam_ed *device)
340 {
341         return (device->devq_entry.index != CAM_UNQUEUED_INDEX);
342 }
343
344 static void
345 xpt_periph_init()
346 {
347         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
348 }
349
350 static int
351 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
352 {
353
354         /*
355          * Only allow read-write access.
356          */
357         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
358                 return(EPERM);
359
360         /*
361          * We don't allow nonblocking access.
362          */
363         if ((flags & O_NONBLOCK) != 0) {
364                 printf("%s: can't do nonblocking access\n", devtoname(dev));
365                 return(ENODEV);
366         }
367
368         return(0);
369 }
370
371 static int
372 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
373 {
374
375         return(0);
376 }
377
378 /*
379  * Don't automatically grab the xpt softc lock here even though this is going
380  * through the xpt device.  The xpt device is really just a back door for
381  * accessing other devices and SIMs, so the right thing to do is to grab
382  * the appropriate SIM lock once the bus/SIM is located.
383  */
384 static int
385 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
386 {
387         int error;
388
389         if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
390                 error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl);
391         }
392         return (error);
393 }
394         
395 static int
396 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
397 {
398         int error;
399
400         error = 0;
401
402         switch(cmd) {
403         /*
404          * For the transport layer CAMIOCOMMAND ioctl, we really only want
405          * to accept CCB types that don't quite make sense to send through a
406          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
407          * in the CAM spec.
408          */
409         case CAMIOCOMMAND: {
410                 union ccb *ccb;
411                 union ccb *inccb;
412                 struct cam_eb *bus;
413
414                 inccb = (union ccb *)addr;
415
416                 if (inccb->ccb_h.flags & CAM_UNLOCKED)
417                         return (EINVAL);
418
419                 bus = xpt_find_bus(inccb->ccb_h.path_id);
420                 if (bus == NULL)
421                         return (EINVAL);
422
423                 switch (inccb->ccb_h.func_code) {
424                 case XPT_SCAN_BUS:
425                 case XPT_RESET_BUS:
426                         if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
427                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
428                                 xpt_release_bus(bus);
429                                 return (EINVAL);
430                         }
431                         break;
432                 case XPT_SCAN_TGT:
433                         if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
434                             inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
435                                 xpt_release_bus(bus);
436                                 return (EINVAL);
437                         }
438                         break;
439                 default:
440                         break;
441                 }
442
443                 switch(inccb->ccb_h.func_code) {
444                 case XPT_SCAN_BUS:
445                 case XPT_RESET_BUS:
446                 case XPT_PATH_INQ:
447                 case XPT_ENG_INQ:
448                 case XPT_SCAN_LUN:
449                 case XPT_SCAN_TGT:
450
451                         ccb = xpt_alloc_ccb();
452
453                         /*
454                          * Create a path using the bus, target, and lun the
455                          * user passed in.
456                          */
457                         if (xpt_create_path(&ccb->ccb_h.path, NULL,
458                                             inccb->ccb_h.path_id,
459                                             inccb->ccb_h.target_id,
460                                             inccb->ccb_h.target_lun) !=
461                                             CAM_REQ_CMP){
462                                 error = EINVAL;
463                                 xpt_free_ccb(ccb);
464                                 break;
465                         }
466                         /* Ensure all of our fields are correct */
467                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
468                                       inccb->ccb_h.pinfo.priority);
469                         xpt_merge_ccb(ccb, inccb);
470                         xpt_path_lock(ccb->ccb_h.path);
471                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
472                         xpt_path_unlock(ccb->ccb_h.path);
473                         bcopy(ccb, inccb, sizeof(union ccb));
474                         xpt_free_path(ccb->ccb_h.path);
475                         xpt_free_ccb(ccb);
476                         break;
477
478                 case XPT_DEBUG: {
479                         union ccb ccb;
480
481                         /*
482                          * This is an immediate CCB, so it's okay to
483                          * allocate it on the stack.
484                          */
485
486                         /*
487                          * Create a path using the bus, target, and lun the
488                          * user passed in.
489                          */
490                         if (xpt_create_path(&ccb.ccb_h.path, NULL,
491                                             inccb->ccb_h.path_id,
492                                             inccb->ccb_h.target_id,
493                                             inccb->ccb_h.target_lun) !=
494                                             CAM_REQ_CMP){
495                                 error = EINVAL;
496                                 break;
497                         }
498                         /* Ensure all of our fields are correct */
499                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
500                                       inccb->ccb_h.pinfo.priority);
501                         xpt_merge_ccb(&ccb, inccb);
502                         xpt_action(&ccb);
503                         bcopy(&ccb, inccb, sizeof(union ccb));
504                         xpt_free_path(ccb.ccb_h.path);
505                         break;
506
507                 }
508                 case XPT_DEV_MATCH: {
509                         struct cam_periph_map_info mapinfo;
510                         struct cam_path *old_path;
511
512                         /*
513                          * We can't deal with physical addresses for this
514                          * type of transaction.
515                          */
516                         if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
517                             CAM_DATA_VADDR) {
518                                 error = EINVAL;
519                                 break;
520                         }
521
522                         /*
523                          * Save this in case the caller had it set to
524                          * something in particular.
525                          */
526                         old_path = inccb->ccb_h.path;
527
528                         /*
529                          * We really don't need a path for the matching
530                          * code.  The path is needed because of the
531                          * debugging statements in xpt_action().  They
532                          * assume that the CCB has a valid path.
533                          */
534                         inccb->ccb_h.path = xpt_periph->path;
535
536                         bzero(&mapinfo, sizeof(mapinfo));
537
538                         /*
539                          * Map the pattern and match buffers into kernel
540                          * virtual address space.
541                          */
542                         error = cam_periph_mapmem(inccb, &mapinfo, MAXPHYS);
543
544                         if (error) {
545                                 inccb->ccb_h.path = old_path;
546                                 break;
547                         }
548
549                         /*
550                          * This is an immediate CCB, we can send it on directly.
551                          */
552                         xpt_action(inccb);
553
554                         /*
555                          * Map the buffers back into user space.
556                          */
557                         cam_periph_unmapmem(inccb, &mapinfo);
558
559                         inccb->ccb_h.path = old_path;
560
561                         error = 0;
562                         break;
563                 }
564                 default:
565                         error = ENOTSUP;
566                         break;
567                 }
568                 xpt_release_bus(bus);
569                 break;
570         }
571         /*
572          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
573          * with the periphal driver name and unit name filled in.  The other
574          * fields don't really matter as input.  The passthrough driver name
575          * ("pass"), and unit number are passed back in the ccb.  The current
576          * device generation number, and the index into the device peripheral
577          * driver list, and the status are also passed back.  Note that
578          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
579          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
580          * (or rather should be) impossible for the device peripheral driver
581          * list to change since we look at the whole thing in one pass, and
582          * we do it with lock protection.
583          *
584          */
585         case CAMGETPASSTHRU: {
586                 union ccb *ccb;
587                 struct cam_periph *periph;
588                 struct periph_driver **p_drv;
589                 char   *name;
590                 u_int unit;
591                 int base_periph_found;
592
593                 ccb = (union ccb *)addr;
594                 unit = ccb->cgdl.unit_number;
595                 name = ccb->cgdl.periph_name;
596                 base_periph_found = 0;
597
598                 /*
599                  * Sanity check -- make sure we don't get a null peripheral
600                  * driver name.
601                  */
602                 if (*ccb->cgdl.periph_name == '\0') {
603                         error = EINVAL;
604                         break;
605                 }
606
607                 /* Keep the list from changing while we traverse it */
608                 xpt_lock_buses();
609
610                 /* first find our driver in the list of drivers */
611                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
612                         if (strcmp((*p_drv)->driver_name, name) == 0)
613                                 break;
614
615                 if (*p_drv == NULL) {
616                         xpt_unlock_buses();
617                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
618                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
619                         *ccb->cgdl.periph_name = '\0';
620                         ccb->cgdl.unit_number = 0;
621                         error = ENOENT;
622                         break;
623                 }
624
625                 /*
626                  * Run through every peripheral instance of this driver
627                  * and check to see whether it matches the unit passed
628                  * in by the user.  If it does, get out of the loops and
629                  * find the passthrough driver associated with that
630                  * peripheral driver.
631                  */
632                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
633                      periph = TAILQ_NEXT(periph, unit_links)) {
634
635                         if (periph->unit_number == unit)
636                                 break;
637                 }
638                 /*
639                  * If we found the peripheral driver that the user passed
640                  * in, go through all of the peripheral drivers for that
641                  * particular device and look for a passthrough driver.
642                  */
643                 if (periph != NULL) {
644                         struct cam_ed *device;
645                         int i;
646
647                         base_periph_found = 1;
648                         device = periph->path->device;
649                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
650                              periph != NULL;
651                              periph = SLIST_NEXT(periph, periph_links), i++) {
652                                 /*
653                                  * Check to see whether we have a
654                                  * passthrough device or not.
655                                  */
656                                 if (strcmp(periph->periph_name, "pass") == 0) {
657                                         /*
658                                          * Fill in the getdevlist fields.
659                                          */
660                                         strlcpy(ccb->cgdl.periph_name,
661                                                periph->periph_name,
662                                                sizeof(ccb->cgdl.periph_name));
663                                         ccb->cgdl.unit_number =
664                                                 periph->unit_number;
665                                         if (SLIST_NEXT(periph, periph_links))
666                                                 ccb->cgdl.status =
667                                                         CAM_GDEVLIST_MORE_DEVS;
668                                         else
669                                                 ccb->cgdl.status =
670                                                        CAM_GDEVLIST_LAST_DEVICE;
671                                         ccb->cgdl.generation =
672                                                 device->generation;
673                                         ccb->cgdl.index = i;
674                                         /*
675                                          * Fill in some CCB header fields
676                                          * that the user may want.
677                                          */
678                                         ccb->ccb_h.path_id =
679                                                 periph->path->bus->path_id;
680                                         ccb->ccb_h.target_id =
681                                                 periph->path->target->target_id;
682                                         ccb->ccb_h.target_lun =
683                                                 periph->path->device->lun_id;
684                                         ccb->ccb_h.status = CAM_REQ_CMP;
685                                         break;
686                                 }
687                         }
688                 }
689
690                 /*
691                  * If the periph is null here, one of two things has
692                  * happened.  The first possibility is that we couldn't
693                  * find the unit number of the particular peripheral driver
694                  * that the user is asking about.  e.g. the user asks for
695                  * the passthrough driver for "da11".  We find the list of
696                  * "da" peripherals all right, but there is no unit 11.
697                  * The other possibility is that we went through the list
698                  * of peripheral drivers attached to the device structure,
699                  * but didn't find one with the name "pass".  Either way,
700                  * we return ENOENT, since we couldn't find something.
701                  */
702                 if (periph == NULL) {
703                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
704                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
705                         *ccb->cgdl.periph_name = '\0';
706                         ccb->cgdl.unit_number = 0;
707                         error = ENOENT;
708                         /*
709                          * It is unfortunate that this is even necessary,
710                          * but there are many, many clueless users out there.
711                          * If this is true, the user is looking for the
712                          * passthrough driver, but doesn't have one in his
713                          * kernel.
714                          */
715                         if (base_periph_found == 1) {
716                                 printf("xptioctl: pass driver is not in the "
717                                        "kernel\n");
718                                 printf("xptioctl: put \"device pass\" in "
719                                        "your kernel config file\n");
720                         }
721                 }
722                 xpt_unlock_buses();
723                 break;
724                 }
725         default:
726                 error = ENOTTY;
727                 break;
728         }
729
730         return(error);
731 }
732
733 static int
734 cam_module_event_handler(module_t mod, int what, void *arg)
735 {
736         int error;
737
738         switch (what) {
739         case MOD_LOAD:
740                 if ((error = xpt_init(NULL)) != 0)
741                         return (error);
742                 break;
743         case MOD_UNLOAD:
744                 return EBUSY;
745         default:
746                 return EOPNOTSUPP;
747         }
748
749         return 0;
750 }
751
752 static struct xpt_proto *
753 xpt_proto_find(cam_proto proto)
754 {
755         struct xpt_proto **pp;
756
757         SET_FOREACH(pp, cam_xpt_proto_set) {
758                 if ((*pp)->proto == proto)
759                         return *pp;
760         }
761
762         return NULL;
763 }
764
765 static void
766 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
767 {
768
769         if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
770                 xpt_free_path(done_ccb->ccb_h.path);
771                 xpt_free_ccb(done_ccb);
772         } else {
773                 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
774                 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
775         }
776         xpt_release_boot();
777 }
778
779 /* thread to handle bus rescans */
780 static void
781 xpt_scanner_thread(void *dummy)
782 {
783         union ccb       *ccb;
784         struct cam_path  path;
785
786         xpt_lock_buses();
787         for (;;) {
788                 if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
789                         msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
790                                "-", 0);
791                 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
792                         TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
793                         xpt_unlock_buses();
794
795                         /*
796                          * Since lock can be dropped inside and path freed
797                          * by completion callback even before return here,
798                          * take our own path copy for reference.
799                          */
800                         xpt_copy_path(&path, ccb->ccb_h.path);
801                         xpt_path_lock(&path);
802                         xpt_action(ccb);
803                         xpt_path_unlock(&path);
804                         xpt_release_path(&path);
805
806                         xpt_lock_buses();
807                 }
808         }
809 }
810
811 void
812 xpt_rescan(union ccb *ccb)
813 {
814         struct ccb_hdr *hdr;
815
816         /* Prepare request */
817         if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
818             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
819                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
820         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
821             ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
822                 ccb->ccb_h.func_code = XPT_SCAN_TGT;
823         else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
824             ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
825                 ccb->ccb_h.func_code = XPT_SCAN_LUN;
826         else {
827                 xpt_print(ccb->ccb_h.path, "illegal scan path\n");
828                 xpt_free_path(ccb->ccb_h.path);
829                 xpt_free_ccb(ccb);
830                 return;
831         }
832         CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
833             ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code,
834                 xpt_action_name(ccb->ccb_h.func_code)));
835
836         ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
837         ccb->ccb_h.cbfcnp = xpt_rescan_done;
838         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
839         /* Don't make duplicate entries for the same paths. */
840         xpt_lock_buses();
841         if (ccb->ccb_h.ppriv_ptr1 == NULL) {
842                 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
843                         if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
844                                 wakeup(&xsoftc.ccb_scanq);
845                                 xpt_unlock_buses();
846                                 xpt_print(ccb->ccb_h.path, "rescan already queued\n");
847                                 xpt_free_path(ccb->ccb_h.path);
848                                 xpt_free_ccb(ccb);
849                                 return;
850                         }
851                 }
852         }
853         TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
854         xsoftc.buses_to_config++;
855         wakeup(&xsoftc.ccb_scanq);
856         xpt_unlock_buses();
857 }
858
859 /* Functions accessed by the peripheral drivers */
860 static int
861 xpt_init(void *dummy)
862 {
863         struct cam_sim *xpt_sim;
864         struct cam_path *path;
865         struct cam_devq *devq;
866         cam_status status;
867         int error, i;
868
869         TAILQ_INIT(&xsoftc.xpt_busses);
870         TAILQ_INIT(&xsoftc.ccb_scanq);
871         STAILQ_INIT(&xsoftc.highpowerq);
872         xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
873
874         mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
875         mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF);
876         xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK,
877             taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq);
878
879 #ifdef CAM_BOOT_DELAY
880         /*
881          * Override this value at compile time to assist our users
882          * who don't use loader to boot a kernel.
883          */
884         xsoftc.boot_delay = CAM_BOOT_DELAY;
885 #endif
886         /*
887          * The xpt layer is, itself, the equivalent of a SIM.
888          * Allow 16 ccbs in the ccb pool for it.  This should
889          * give decent parallelism when we probe busses and
890          * perform other XPT functions.
891          */
892         devq = cam_simq_alloc(16);
893         xpt_sim = cam_sim_alloc(xptaction,
894                                 xptpoll,
895                                 "xpt",
896                                 /*softc*/NULL,
897                                 /*unit*/0,
898                                 /*mtx*/&xsoftc.xpt_lock,
899                                 /*max_dev_transactions*/0,
900                                 /*max_tagged_dev_transactions*/0,
901                                 devq);
902         if (xpt_sim == NULL)
903                 return (ENOMEM);
904
905         mtx_lock(&xsoftc.xpt_lock);
906         if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
907                 mtx_unlock(&xsoftc.xpt_lock);
908                 printf("xpt_init: xpt_bus_register failed with status %#x,"
909                        " failing attach\n", status);
910                 return (EINVAL);
911         }
912         mtx_unlock(&xsoftc.xpt_lock);
913
914         /*
915          * Looking at the XPT from the SIM layer, the XPT is
916          * the equivalent of a peripheral driver.  Allocate
917          * a peripheral driver entry for us.
918          */
919         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
920                                       CAM_TARGET_WILDCARD,
921                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
922                 printf("xpt_init: xpt_create_path failed with status %#x,"
923                        " failing attach\n", status);
924                 return (EINVAL);
925         }
926         xpt_path_lock(path);
927         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
928                          path, NULL, 0, xpt_sim);
929         xpt_path_unlock(path);
930         xpt_free_path(path);
931
932         if (cam_num_doneqs < 1)
933                 cam_num_doneqs = 1 + mp_ncpus / 6;
934         else if (cam_num_doneqs > MAXCPU)
935                 cam_num_doneqs = MAXCPU;
936         for (i = 0; i < cam_num_doneqs; i++) {
937                 mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL,
938                     MTX_DEF);
939                 STAILQ_INIT(&cam_doneqs[i].cam_doneq);
940                 error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i],
941                     &cam_proc, NULL, 0, 0, "cam", "doneq%d", i);
942                 if (error != 0) {
943                         cam_num_doneqs = i;
944                         break;
945                 }
946         }
947         if (cam_num_doneqs < 1) {
948                 printf("xpt_init: Cannot init completion queues "
949                        "- failing attach\n");
950                 return (ENOMEM);
951         }
952         /*
953          * Register a callback for when interrupts are enabled.
954          */
955         xsoftc.xpt_config_hook =
956             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
957                                               M_CAMXPT, M_NOWAIT | M_ZERO);
958         if (xsoftc.xpt_config_hook == NULL) {
959                 printf("xpt_init: Cannot malloc config hook "
960                        "- failing attach\n");
961                 return (ENOMEM);
962         }
963         xsoftc.xpt_config_hook->ich_func = xpt_config;
964         if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
965                 free (xsoftc.xpt_config_hook, M_CAMXPT);
966                 printf("xpt_init: config_intrhook_establish failed "
967                        "- failing attach\n");
968         }
969
970         return (0);
971 }
972
973 static cam_status
974 xptregister(struct cam_periph *periph, void *arg)
975 {
976         struct cam_sim *xpt_sim;
977
978         if (periph == NULL) {
979                 printf("xptregister: periph was NULL!!\n");
980                 return(CAM_REQ_CMP_ERR);
981         }
982
983         xpt_sim = (struct cam_sim *)arg;
984         xpt_sim->softc = periph;
985         xpt_periph = periph;
986         periph->softc = NULL;
987
988         return(CAM_REQ_CMP);
989 }
990
991 int32_t
992 xpt_add_periph(struct cam_periph *periph)
993 {
994         struct cam_ed *device;
995         int32_t  status;
996
997         TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph);
998         device = periph->path->device;
999         status = CAM_REQ_CMP;
1000         if (device != NULL) {
1001                 mtx_lock(&device->target->bus->eb_mtx);
1002                 device->generation++;
1003                 SLIST_INSERT_HEAD(&device->periphs, periph, periph_links);
1004                 mtx_unlock(&device->target->bus->eb_mtx);
1005                 atomic_add_32(&xsoftc.xpt_generation, 1);
1006         }
1007
1008         return (status);
1009 }
1010
1011 void
1012 xpt_remove_periph(struct cam_periph *periph)
1013 {
1014         struct cam_ed *device;
1015
1016         device = periph->path->device;
1017         if (device != NULL) {
1018                 mtx_lock(&device->target->bus->eb_mtx);
1019                 device->generation++;
1020                 SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links);
1021                 mtx_unlock(&device->target->bus->eb_mtx);
1022                 atomic_add_32(&xsoftc.xpt_generation, 1);
1023         }
1024 }
1025
1026
1027 void
1028 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1029 {
1030         struct  cam_path *path = periph->path;
1031         struct  xpt_proto *proto;
1032
1033         cam_periph_assert(periph, MA_OWNED);
1034         periph->flags |= CAM_PERIPH_ANNOUNCED;
1035
1036         printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1037                periph->periph_name, periph->unit_number,
1038                path->bus->sim->sim_name,
1039                path->bus->sim->unit_number,
1040                path->bus->sim->bus_id,
1041                path->bus->path_id,
1042                path->target->target_id,
1043                (uintmax_t)path->device->lun_id);
1044         printf("%s%d: ", periph->periph_name, periph->unit_number);
1045         proto = xpt_proto_find(path->device->protocol);
1046         if (proto)
1047                 proto->ops->announce(path->device);
1048         else
1049                 printf("%s%d: Unknown protocol device %d\n",
1050                     periph->periph_name, periph->unit_number,
1051                     path->device->protocol);
1052         if (path->device->serial_num_len > 0) {
1053                 /* Don't wrap the screen  - print only the first 60 chars */
1054                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1055                        periph->unit_number, path->device->serial_num);
1056         }
1057         /* Announce transport details. */
1058         path->bus->xport->ops->announce(periph);
1059         /* Announce command queueing. */
1060         if (path->device->inq_flags & SID_CmdQue
1061          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1062                 printf("%s%d: Command Queueing enabled\n",
1063                        periph->periph_name, periph->unit_number);
1064         }
1065         /* Announce caller's details if they've passed in. */
1066         if (announce_string != NULL)
1067                 printf("%s%d: %s\n", periph->periph_name,
1068                        periph->unit_number, announce_string);
1069 }
1070
1071 void
1072 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string)
1073 {
1074         if (quirks != 0) {
1075                 printf("%s%d: quirks=0x%b\n", periph->periph_name,
1076                     periph->unit_number, quirks, bit_string);
1077         }
1078 }
1079
1080 void
1081 xpt_denounce_periph(struct cam_periph *periph)
1082 {
1083         struct  cam_path *path = periph->path;
1084         struct  xpt_proto *proto;
1085
1086         cam_periph_assert(periph, MA_OWNED);
1087         printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1088                periph->periph_name, periph->unit_number,
1089                path->bus->sim->sim_name,
1090                path->bus->sim->unit_number,
1091                path->bus->sim->bus_id,
1092                path->bus->path_id,
1093                path->target->target_id,
1094                (uintmax_t)path->device->lun_id);
1095         printf("%s%d: ", periph->periph_name, periph->unit_number);
1096         proto = xpt_proto_find(path->device->protocol);
1097         if (proto)
1098                 proto->ops->denounce(path->device);
1099         else
1100                 printf("%s%d: Unknown protocol device %d\n",
1101                     periph->periph_name, periph->unit_number,
1102                     path->device->protocol);
1103         if (path->device->serial_num_len > 0)
1104                 printf(" s/n %.60s", path->device->serial_num);
1105         printf(" detached\n");
1106 }
1107
1108
1109 int
1110 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
1111 {
1112         int ret = -1, l, o;
1113         struct ccb_dev_advinfo cdai;
1114         struct scsi_vpd_id_descriptor *idd;
1115
1116         xpt_path_assert(path, MA_OWNED);
1117
1118         memset(&cdai, 0, sizeof(cdai));
1119         xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
1120         cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
1121         cdai.flags = CDAI_FLAG_NONE;
1122         cdai.bufsiz = len;
1123
1124         if (!strcmp(attr, "GEOM::ident"))
1125                 cdai.buftype = CDAI_TYPE_SERIAL_NUM;
1126         else if (!strcmp(attr, "GEOM::physpath"))
1127                 cdai.buftype = CDAI_TYPE_PHYS_PATH;
1128         else if (strcmp(attr, "GEOM::lunid") == 0 ||
1129                  strcmp(attr, "GEOM::lunname") == 0) {
1130                 cdai.buftype = CDAI_TYPE_SCSI_DEVID;
1131                 cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
1132         } else
1133                 goto out;
1134
1135         cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO);
1136         if (cdai.buf == NULL) {
1137                 ret = ENOMEM;
1138                 goto out;
1139         }
1140         xpt_action((union ccb *)&cdai); /* can only be synchronous */
1141         if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
1142                 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
1143         if (cdai.provsiz == 0)
1144                 goto out;
1145         if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) {
1146                 if (strcmp(attr, "GEOM::lunid") == 0) {
1147                         idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1148                             cdai.provsiz, scsi_devid_is_lun_naa);
1149                         if (idd == NULL)
1150                                 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1151                                     cdai.provsiz, scsi_devid_is_lun_eui64);
1152                         if (idd == NULL)
1153                                 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1154                                     cdai.provsiz, scsi_devid_is_lun_uuid);
1155                         if (idd == NULL)
1156                                 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1157                                     cdai.provsiz, scsi_devid_is_lun_md5);
1158                 } else
1159                         idd = NULL;
1160                 if (idd == NULL)
1161                         idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1162                             cdai.provsiz, scsi_devid_is_lun_t10);
1163                 if (idd == NULL)
1164                         idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf,
1165                             cdai.provsiz, scsi_devid_is_lun_name);
1166                 if (idd == NULL)
1167                         goto out;
1168                 ret = 0;
1169                 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII) {
1170                         if (idd->length < len) {
1171                                 for (l = 0; l < idd->length; l++)
1172                                         buf[l] = idd->identifier[l] ?
1173                                             idd->identifier[l] : ' ';
1174                                 buf[l] = 0;
1175                         } else
1176                                 ret = EFAULT;
1177                 } else if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) {
1178                         l = strnlen(idd->identifier, idd->length);
1179                         if (l < len) {
1180                                 bcopy(idd->identifier, buf, l);
1181                                 buf[l] = 0;
1182                         } else
1183                                 ret = EFAULT;
1184                 } else if ((idd->id_type & SVPD_ID_TYPE_MASK) == SVPD_ID_TYPE_UUID
1185                     && idd->identifier[0] == 0x10) {
1186                         if ((idd->length - 2) * 2 + 4 < len) {
1187                                 for (l = 2, o = 0; l < idd->length; l++) {
1188                                         if (l == 6 || l == 8 || l == 10 || l == 12)
1189                                             o += sprintf(buf + o, "-");
1190                                         o += sprintf(buf + o, "%02x",
1191                                             idd->identifier[l]);
1192                                 }
1193                         } else
1194                                 ret = EFAULT;
1195                 } else {
1196                         if (idd->length * 2 < len) {
1197                                 for (l = 0; l < idd->length; l++)
1198                                         sprintf(buf + l * 2, "%02x",
1199                                             idd->identifier[l]);
1200                         } else
1201                                 ret = EFAULT;
1202                 }
1203         } else {
1204                 ret = 0;
1205                 if (strlcpy(buf, cdai.buf, len) >= len)
1206                         ret = EFAULT;
1207         }
1208
1209 out:
1210         if (cdai.buf != NULL)
1211                 free(cdai.buf, M_CAMXPT);
1212         return ret;
1213 }
1214
1215 static dev_match_ret
1216 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1217             struct cam_eb *bus)
1218 {
1219         dev_match_ret retval;
1220         u_int i;
1221
1222         retval = DM_RET_NONE;
1223
1224         /*
1225          * If we aren't given something to match against, that's an error.
1226          */
1227         if (bus == NULL)
1228                 return(DM_RET_ERROR);
1229
1230         /*
1231          * If there are no match entries, then this bus matches no
1232          * matter what.
1233          */
1234         if ((patterns == NULL) || (num_patterns == 0))
1235                 return(DM_RET_DESCEND | DM_RET_COPY);
1236
1237         for (i = 0; i < num_patterns; i++) {
1238                 struct bus_match_pattern *cur_pattern;
1239
1240                 /*
1241                  * If the pattern in question isn't for a bus node, we
1242                  * aren't interested.  However, we do indicate to the
1243                  * calling routine that we should continue descending the
1244                  * tree, since the user wants to match against lower-level
1245                  * EDT elements.
1246                  */
1247                 if (patterns[i].type != DEV_MATCH_BUS) {
1248                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1249                                 retval |= DM_RET_DESCEND;
1250                         continue;
1251                 }
1252
1253                 cur_pattern = &patterns[i].pattern.bus_pattern;
1254
1255                 /*
1256                  * If they want to match any bus node, we give them any
1257                  * device node.
1258                  */
1259                 if (cur_pattern->flags == BUS_MATCH_ANY) {
1260                         /* set the copy flag */
1261                         retval |= DM_RET_COPY;
1262
1263                         /*
1264                          * If we've already decided on an action, go ahead
1265                          * and return.
1266                          */
1267                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1268                                 return(retval);
1269                 }
1270
1271                 /*
1272                  * Not sure why someone would do this...
1273                  */
1274                 if (cur_pattern->flags == BUS_MATCH_NONE)
1275                         continue;
1276
1277                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1278                  && (cur_pattern->path_id != bus->path_id))
1279                         continue;
1280
1281                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1282                  && (cur_pattern->bus_id != bus->sim->bus_id))
1283                         continue;
1284
1285                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1286                  && (cur_pattern->unit_number != bus->sim->unit_number))
1287                         continue;
1288
1289                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1290                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1291                              DEV_IDLEN) != 0))
1292                         continue;
1293
1294                 /*
1295                  * If we get to this point, the user definitely wants
1296                  * information on this bus.  So tell the caller to copy the
1297                  * data out.
1298                  */
1299                 retval |= DM_RET_COPY;
1300
1301                 /*
1302                  * If the return action has been set to descend, then we
1303                  * know that we've already seen a non-bus matching
1304                  * expression, therefore we need to further descend the tree.
1305                  * This won't change by continuing around the loop, so we
1306                  * go ahead and return.  If we haven't seen a non-bus
1307                  * matching expression, we keep going around the loop until
1308                  * we exhaust the matching expressions.  We'll set the stop
1309                  * flag once we fall out of the loop.
1310                  */
1311                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1312                         return(retval);
1313         }
1314
1315         /*
1316          * If the return action hasn't been set to descend yet, that means
1317          * we haven't seen anything other than bus matching patterns.  So
1318          * tell the caller to stop descending the tree -- the user doesn't
1319          * want to match against lower level tree elements.
1320          */
1321         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1322                 retval |= DM_RET_STOP;
1323
1324         return(retval);
1325 }
1326
1327 static dev_match_ret
1328 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1329                struct cam_ed *device)
1330 {
1331         dev_match_ret retval;
1332         u_int i;
1333
1334         retval = DM_RET_NONE;
1335
1336         /*
1337          * If we aren't given something to match against, that's an error.
1338          */
1339         if (device == NULL)
1340                 return(DM_RET_ERROR);
1341
1342         /*
1343          * If there are no match entries, then this device matches no
1344          * matter what.
1345          */
1346         if ((patterns == NULL) || (num_patterns == 0))
1347                 return(DM_RET_DESCEND | DM_RET_COPY);
1348
1349         for (i = 0; i < num_patterns; i++) {
1350                 struct device_match_pattern *cur_pattern;
1351                 struct scsi_vpd_device_id *device_id_page;
1352
1353                 /*
1354                  * If the pattern in question isn't for a device node, we
1355                  * aren't interested.
1356                  */
1357                 if (patterns[i].type != DEV_MATCH_DEVICE) {
1358                         if ((patterns[i].type == DEV_MATCH_PERIPH)
1359                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1360                                 retval |= DM_RET_DESCEND;
1361                         continue;
1362                 }
1363
1364                 cur_pattern = &patterns[i].pattern.device_pattern;
1365
1366                 /* Error out if mutually exclusive options are specified. */ 
1367                 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1368                  == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1369                         return(DM_RET_ERROR);
1370
1371                 /*
1372                  * If they want to match any device node, we give them any
1373                  * device node.
1374                  */
1375                 if (cur_pattern->flags == DEV_MATCH_ANY)
1376                         goto copy_dev_node;
1377
1378                 /*
1379                  * Not sure why someone would do this...
1380                  */
1381                 if (cur_pattern->flags == DEV_MATCH_NONE)
1382                         continue;
1383
1384                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1385                  && (cur_pattern->path_id != device->target->bus->path_id))
1386                         continue;
1387
1388                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1389                  && (cur_pattern->target_id != device->target->target_id))
1390                         continue;
1391
1392                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1393                  && (cur_pattern->target_lun != device->lun_id))
1394                         continue;
1395
1396                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1397                  && (cam_quirkmatch((caddr_t)&device->inq_data,
1398                                     (caddr_t)&cur_pattern->data.inq_pat,
1399                                     1, sizeof(cur_pattern->data.inq_pat),
1400                                     scsi_static_inquiry_match) == NULL))
1401                         continue;
1402
1403                 device_id_page = (struct scsi_vpd_device_id *)device->device_id;
1404                 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
1405                  && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
1406                   || scsi_devid_match((uint8_t *)device_id_page->desc_list,
1407                                       device->device_id_len
1408                                     - SVPD_DEVICE_ID_HDR_LEN,
1409                                       cur_pattern->data.devid_pat.id,
1410                                       cur_pattern->data.devid_pat.id_len) != 0))
1411                         continue;
1412
1413 copy_dev_node:
1414                 /*
1415                  * If we get to this point, the user definitely wants
1416                  * information on this device.  So tell the caller to copy
1417                  * the data out.
1418                  */
1419                 retval |= DM_RET_COPY;
1420
1421                 /*
1422                  * If the return action has been set to descend, then we
1423                  * know that we've already seen a peripheral matching
1424                  * expression, therefore we need to further descend the tree.
1425                  * This won't change by continuing around the loop, so we
1426                  * go ahead and return.  If we haven't seen a peripheral
1427                  * matching expression, we keep going around the loop until
1428                  * we exhaust the matching expressions.  We'll set the stop
1429                  * flag once we fall out of the loop.
1430                  */
1431                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1432                         return(retval);
1433         }
1434
1435         /*
1436          * If the return action hasn't been set to descend yet, that means
1437          * we haven't seen any peripheral matching patterns.  So tell the
1438          * caller to stop descending the tree -- the user doesn't want to
1439          * match against lower level tree elements.
1440          */
1441         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1442                 retval |= DM_RET_STOP;
1443
1444         return(retval);
1445 }
1446
1447 /*
1448  * Match a single peripheral against any number of match patterns.
1449  */
1450 static dev_match_ret
1451 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1452                struct cam_periph *periph)
1453 {
1454         dev_match_ret retval;
1455         u_int i;
1456
1457         /*
1458          * If we aren't given something to match against, that's an error.
1459          */
1460         if (periph == NULL)
1461                 return(DM_RET_ERROR);
1462
1463         /*
1464          * If there are no match entries, then this peripheral matches no
1465          * matter what.
1466          */
1467         if ((patterns == NULL) || (num_patterns == 0))
1468                 return(DM_RET_STOP | DM_RET_COPY);
1469
1470         /*
1471          * There aren't any nodes below a peripheral node, so there's no
1472          * reason to descend the tree any further.
1473          */
1474         retval = DM_RET_STOP;
1475
1476         for (i = 0; i < num_patterns; i++) {
1477                 struct periph_match_pattern *cur_pattern;
1478
1479                 /*
1480                  * If the pattern in question isn't for a peripheral, we
1481                  * aren't interested.
1482                  */
1483                 if (patterns[i].type != DEV_MATCH_PERIPH)
1484                         continue;
1485
1486                 cur_pattern = &patterns[i].pattern.periph_pattern;
1487
1488                 /*
1489                  * If they want to match on anything, then we will do so.
1490                  */
1491                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1492                         /* set the copy flag */
1493                         retval |= DM_RET_COPY;
1494
1495                         /*
1496                          * We've already set the return action to stop,
1497                          * since there are no nodes below peripherals in
1498                          * the tree.
1499                          */
1500                         return(retval);
1501                 }
1502
1503                 /*
1504                  * Not sure why someone would do this...
1505                  */
1506                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
1507                         continue;
1508
1509                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1510                  && (cur_pattern->path_id != periph->path->bus->path_id))
1511                         continue;
1512
1513                 /*
1514                  * For the target and lun id's, we have to make sure the
1515                  * target and lun pointers aren't NULL.  The xpt peripheral
1516                  * has a wildcard target and device.
1517                  */
1518                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1519                  && ((periph->path->target == NULL)
1520                  ||(cur_pattern->target_id != periph->path->target->target_id)))
1521                         continue;
1522
1523                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1524                  && ((periph->path->device == NULL)
1525                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
1526                         continue;
1527
1528                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1529                  && (cur_pattern->unit_number != periph->unit_number))
1530                         continue;
1531
1532                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1533                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
1534                              DEV_IDLEN) != 0))
1535                         continue;
1536
1537                 /*
1538                  * If we get to this point, the user definitely wants
1539                  * information on this peripheral.  So tell the caller to
1540                  * copy the data out.
1541                  */
1542                 retval |= DM_RET_COPY;
1543
1544                 /*
1545                  * The return action has already been set to stop, since
1546                  * peripherals don't have any nodes below them in the EDT.
1547                  */
1548                 return(retval);
1549         }
1550
1551         /*
1552          * If we get to this point, the peripheral that was passed in
1553          * doesn't match any of the patterns.
1554          */
1555         return(retval);
1556 }
1557
1558 static int
1559 xptedtbusfunc(struct cam_eb *bus, void *arg)
1560 {
1561         struct ccb_dev_match *cdm;
1562         struct cam_et *target;
1563         dev_match_ret retval;
1564
1565         cdm = (struct ccb_dev_match *)arg;
1566
1567         /*
1568          * If our position is for something deeper in the tree, that means
1569          * that we've already seen this node.  So, we keep going down.
1570          */
1571         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1572          && (cdm->pos.cookie.bus == bus)
1573          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1574          && (cdm->pos.cookie.target != NULL))
1575                 retval = DM_RET_DESCEND;
1576         else
1577                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1578
1579         /*
1580          * If we got an error, bail out of the search.
1581          */
1582         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1583                 cdm->status = CAM_DEV_MATCH_ERROR;
1584                 return(0);
1585         }
1586
1587         /*
1588          * If the copy flag is set, copy this bus out.
1589          */
1590         if (retval & DM_RET_COPY) {
1591                 int spaceleft, j;
1592
1593                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1594                         sizeof(struct dev_match_result));
1595
1596                 /*
1597                  * If we don't have enough space to put in another
1598                  * match result, save our position and tell the
1599                  * user there are more devices to check.
1600                  */
1601                 if (spaceleft < sizeof(struct dev_match_result)) {
1602                         bzero(&cdm->pos, sizeof(cdm->pos));
1603                         cdm->pos.position_type =
1604                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1605
1606                         cdm->pos.cookie.bus = bus;
1607                         cdm->pos.generations[CAM_BUS_GENERATION]=
1608                                 xsoftc.bus_generation;
1609                         cdm->status = CAM_DEV_MATCH_MORE;
1610                         return(0);
1611                 }
1612                 j = cdm->num_matches;
1613                 cdm->num_matches++;
1614                 cdm->matches[j].type = DEV_MATCH_BUS;
1615                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
1616                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1617                 cdm->matches[j].result.bus_result.unit_number =
1618                         bus->sim->unit_number;
1619                 strlcpy(cdm->matches[j].result.bus_result.dev_name,
1620                         bus->sim->sim_name,
1621                         sizeof(cdm->matches[j].result.bus_result.dev_name));
1622         }
1623
1624         /*
1625          * If the user is only interested in busses, there's no
1626          * reason to descend to the next level in the tree.
1627          */
1628         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1629                 return(1);
1630
1631         /*
1632          * If there is a target generation recorded, check it to
1633          * make sure the target list hasn't changed.
1634          */
1635         mtx_lock(&bus->eb_mtx);
1636         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1637          && (cdm->pos.cookie.bus == bus)
1638          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1639          && (cdm->pos.cookie.target != NULL)) {
1640                 if ((cdm->pos.generations[CAM_TARGET_GENERATION] !=
1641                     bus->generation)) {
1642                         mtx_unlock(&bus->eb_mtx);
1643                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1644                         return (0);
1645                 }
1646                 target = (struct cam_et *)cdm->pos.cookie.target;
1647                 target->refcount++;
1648         } else
1649                 target = NULL;
1650         mtx_unlock(&bus->eb_mtx);
1651
1652         return (xpttargettraverse(bus, target, xptedttargetfunc, arg));
1653 }
1654
1655 static int
1656 xptedttargetfunc(struct cam_et *target, void *arg)
1657 {
1658         struct ccb_dev_match *cdm;
1659         struct cam_eb *bus;
1660         struct cam_ed *device;
1661
1662         cdm = (struct ccb_dev_match *)arg;
1663         bus = target->bus;
1664
1665         /*
1666          * If there is a device list generation recorded, check it to
1667          * make sure the device list hasn't changed.
1668          */
1669         mtx_lock(&bus->eb_mtx);
1670         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1671          && (cdm->pos.cookie.bus == bus)
1672          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1673          && (cdm->pos.cookie.target == target)
1674          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1675          && (cdm->pos.cookie.device != NULL)) {
1676                 if (cdm->pos.generations[CAM_DEV_GENERATION] !=
1677                     target->generation) {
1678                         mtx_unlock(&bus->eb_mtx);
1679                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1680                         return(0);
1681                 }
1682                 device = (struct cam_ed *)cdm->pos.cookie.device;
1683                 device->refcount++;
1684         } else
1685                 device = NULL;
1686         mtx_unlock(&bus->eb_mtx);
1687
1688         return (xptdevicetraverse(target, device, xptedtdevicefunc, arg));
1689 }
1690
1691 static int
1692 xptedtdevicefunc(struct cam_ed *device, void *arg)
1693 {
1694         struct cam_eb *bus;
1695         struct cam_periph *periph;
1696         struct ccb_dev_match *cdm;
1697         dev_match_ret retval;
1698
1699         cdm = (struct ccb_dev_match *)arg;
1700         bus = device->target->bus;
1701
1702         /*
1703          * If our position is for something deeper in the tree, that means
1704          * that we've already seen this node.  So, we keep going down.
1705          */
1706         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1707          && (cdm->pos.cookie.device == device)
1708          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1709          && (cdm->pos.cookie.periph != NULL))
1710                 retval = DM_RET_DESCEND;
1711         else
1712                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
1713                                         device);
1714
1715         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1716                 cdm->status = CAM_DEV_MATCH_ERROR;
1717                 return(0);
1718         }
1719
1720         /*
1721          * If the copy flag is set, copy this device out.
1722          */
1723         if (retval & DM_RET_COPY) {
1724                 int spaceleft, j;
1725
1726                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1727                         sizeof(struct dev_match_result));
1728
1729                 /*
1730                  * If we don't have enough space to put in another
1731                  * match result, save our position and tell the
1732                  * user there are more devices to check.
1733                  */
1734                 if (spaceleft < sizeof(struct dev_match_result)) {
1735                         bzero(&cdm->pos, sizeof(cdm->pos));
1736                         cdm->pos.position_type =
1737                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1738                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
1739
1740                         cdm->pos.cookie.bus = device->target->bus;
1741                         cdm->pos.generations[CAM_BUS_GENERATION]=
1742                                 xsoftc.bus_generation;
1743                         cdm->pos.cookie.target = device->target;
1744                         cdm->pos.generations[CAM_TARGET_GENERATION] =
1745                                 device->target->bus->generation;
1746                         cdm->pos.cookie.device = device;
1747                         cdm->pos.generations[CAM_DEV_GENERATION] =
1748                                 device->target->generation;
1749                         cdm->status = CAM_DEV_MATCH_MORE;
1750                         return(0);
1751                 }
1752                 j = cdm->num_matches;
1753                 cdm->num_matches++;
1754                 cdm->matches[j].type = DEV_MATCH_DEVICE;
1755                 cdm->matches[j].result.device_result.path_id =
1756                         device->target->bus->path_id;
1757                 cdm->matches[j].result.device_result.target_id =
1758                         device->target->target_id;
1759                 cdm->matches[j].result.device_result.target_lun =
1760                         device->lun_id;
1761                 cdm->matches[j].result.device_result.protocol =
1762                         device->protocol;
1763                 bcopy(&device->inq_data,
1764                       &cdm->matches[j].result.device_result.inq_data,
1765                       sizeof(struct scsi_inquiry_data));
1766                 bcopy(&device->ident_data,
1767                       &cdm->matches[j].result.device_result.ident_data,
1768                       sizeof(struct ata_params));
1769
1770                 /* Let the user know whether this device is unconfigured */
1771                 if (device->flags & CAM_DEV_UNCONFIGURED)
1772                         cdm->matches[j].result.device_result.flags =
1773                                 DEV_RESULT_UNCONFIGURED;
1774                 else
1775                         cdm->matches[j].result.device_result.flags =
1776                                 DEV_RESULT_NOFLAG;
1777         }
1778
1779         /*
1780          * If the user isn't interested in peripherals, don't descend
1781          * the tree any further.
1782          */
1783         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1784                 return(1);
1785
1786         /*
1787          * If there is a peripheral list generation recorded, make sure
1788          * it hasn't changed.
1789          */
1790         xpt_lock_buses();
1791         mtx_lock(&bus->eb_mtx);
1792         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1793          && (cdm->pos.cookie.bus == bus)
1794          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1795          && (cdm->pos.cookie.target == device->target)
1796          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1797          && (cdm->pos.cookie.device == device)
1798          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1799          && (cdm->pos.cookie.periph != NULL)) {
1800                 if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1801                     device->generation) {
1802                         mtx_unlock(&bus->eb_mtx);
1803                         xpt_unlock_buses();
1804                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1805                         return(0);
1806                 }
1807                 periph = (struct cam_periph *)cdm->pos.cookie.periph;
1808                 periph->refcount++;
1809         } else
1810                 periph = NULL;
1811         mtx_unlock(&bus->eb_mtx);
1812         xpt_unlock_buses();
1813
1814         return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg));
1815 }
1816
1817 static int
1818 xptedtperiphfunc(struct cam_periph *periph, void *arg)
1819 {
1820         struct ccb_dev_match *cdm;
1821         dev_match_ret retval;
1822
1823         cdm = (struct ccb_dev_match *)arg;
1824
1825         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1826
1827         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1828                 cdm->status = CAM_DEV_MATCH_ERROR;
1829                 return(0);
1830         }
1831
1832         /*
1833          * If the copy flag is set, copy this peripheral out.
1834          */
1835         if (retval & DM_RET_COPY) {
1836                 int spaceleft, j;
1837                 size_t l;
1838
1839                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1840                         sizeof(struct dev_match_result));
1841
1842                 /*
1843                  * If we don't have enough space to put in another
1844                  * match result, save our position and tell the
1845                  * user there are more devices to check.
1846                  */
1847                 if (spaceleft < sizeof(struct dev_match_result)) {
1848                         bzero(&cdm->pos, sizeof(cdm->pos));
1849                         cdm->pos.position_type =
1850                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1851                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
1852                                 CAM_DEV_POS_PERIPH;
1853
1854                         cdm->pos.cookie.bus = periph->path->bus;
1855                         cdm->pos.generations[CAM_BUS_GENERATION]=
1856                                 xsoftc.bus_generation;
1857                         cdm->pos.cookie.target = periph->path->target;
1858                         cdm->pos.generations[CAM_TARGET_GENERATION] =
1859                                 periph->path->bus->generation;
1860                         cdm->pos.cookie.device = periph->path->device;
1861                         cdm->pos.generations[CAM_DEV_GENERATION] =
1862                                 periph->path->target->generation;
1863                         cdm->pos.cookie.periph = periph;
1864                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
1865                                 periph->path->device->generation;
1866                         cdm->status = CAM_DEV_MATCH_MORE;
1867                         return(0);
1868                 }
1869
1870                 j = cdm->num_matches;
1871                 cdm->num_matches++;
1872                 cdm->matches[j].type = DEV_MATCH_PERIPH;
1873                 cdm->matches[j].result.periph_result.path_id =
1874                         periph->path->bus->path_id;
1875                 cdm->matches[j].result.periph_result.target_id =
1876                         periph->path->target->target_id;
1877                 cdm->matches[j].result.periph_result.target_lun =
1878                         periph->path->device->lun_id;
1879                 cdm->matches[j].result.periph_result.unit_number =
1880                         periph->unit_number;
1881                 l = sizeof(cdm->matches[j].result.periph_result.periph_name);
1882                 strlcpy(cdm->matches[j].result.periph_result.periph_name,
1883                         periph->periph_name, l);
1884         }
1885
1886         return(1);
1887 }
1888
1889 static int
1890 xptedtmatch(struct ccb_dev_match *cdm)
1891 {
1892         struct cam_eb *bus;
1893         int ret;
1894
1895         cdm->num_matches = 0;
1896
1897         /*
1898          * Check the bus list generation.  If it has changed, the user
1899          * needs to reset everything and start over.
1900          */
1901         xpt_lock_buses();
1902         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1903          && (cdm->pos.cookie.bus != NULL)) {
1904                 if (cdm->pos.generations[CAM_BUS_GENERATION] !=
1905                     xsoftc.bus_generation) {
1906                         xpt_unlock_buses();
1907                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1908                         return(0);
1909                 }
1910                 bus = (struct cam_eb *)cdm->pos.cookie.bus;
1911                 bus->refcount++;
1912         } else
1913                 bus = NULL;
1914         xpt_unlock_buses();
1915
1916         ret = xptbustraverse(bus, xptedtbusfunc, cdm);
1917
1918         /*
1919          * If we get back 0, that means that we had to stop before fully
1920          * traversing the EDT.  It also means that one of the subroutines
1921          * has set the status field to the proper value.  If we get back 1,
1922          * we've fully traversed the EDT and copied out any matching entries.
1923          */
1924         if (ret == 1)
1925                 cdm->status = CAM_DEV_MATCH_LAST;
1926
1927         return(ret);
1928 }
1929
1930 static int
1931 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
1932 {
1933         struct cam_periph *periph;
1934         struct ccb_dev_match *cdm;
1935
1936         cdm = (struct ccb_dev_match *)arg;
1937
1938         xpt_lock_buses();
1939         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
1940          && (cdm->pos.cookie.pdrv == pdrv)
1941          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1942          && (cdm->pos.cookie.periph != NULL)) {
1943                 if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1944                     (*pdrv)->generation) {
1945                         xpt_unlock_buses();
1946                         cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1947                         return(0);
1948                 }
1949                 periph = (struct cam_periph *)cdm->pos.cookie.periph;
1950                 periph->refcount++;
1951         } else
1952                 periph = NULL;
1953         xpt_unlock_buses();
1954
1955         return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg));
1956 }
1957
1958 static int
1959 xptplistperiphfunc(struct cam_periph *periph, void *arg)
1960 {
1961         struct ccb_dev_match *cdm;
1962         dev_match_ret retval;
1963
1964         cdm = (struct ccb_dev_match *)arg;
1965
1966         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1967
1968         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1969                 cdm->status = CAM_DEV_MATCH_ERROR;
1970                 return(0);
1971         }
1972
1973         /*
1974          * If the copy flag is set, copy this peripheral out.
1975          */
1976         if (retval & DM_RET_COPY) {
1977                 int spaceleft, j;
1978                 size_t l;
1979
1980                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
1981                         sizeof(struct dev_match_result));
1982
1983                 /*
1984                  * If we don't have enough space to put in another
1985                  * match result, save our position and tell the
1986                  * user there are more devices to check.
1987                  */
1988                 if (spaceleft < sizeof(struct dev_match_result)) {
1989                         struct periph_driver **pdrv;
1990
1991                         pdrv = NULL;
1992                         bzero(&cdm->pos, sizeof(cdm->pos));
1993                         cdm->pos.position_type =
1994                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
1995                                 CAM_DEV_POS_PERIPH;
1996
1997                         /*
1998                          * This may look a bit non-sensical, but it is
1999                          * actually quite logical.  There are very few
2000                          * peripheral drivers, and bloating every peripheral
2001                          * structure with a pointer back to its parent
2002                          * peripheral driver linker set entry would cost
2003                          * more in the long run than doing this quick lookup.
2004                          */
2005                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2006                                 if (strcmp((*pdrv)->driver_name,
2007                                     periph->periph_name) == 0)
2008                                         break;
2009                         }
2010
2011                         if (*pdrv == NULL) {
2012                                 cdm->status = CAM_DEV_MATCH_ERROR;
2013                                 return(0);
2014                         }
2015
2016                         cdm->pos.cookie.pdrv = pdrv;
2017                         /*
2018                          * The periph generation slot does double duty, as
2019                          * does the periph pointer slot.  They are used for
2020                          * both edt and pdrv lookups and positioning.
2021                          */
2022                         cdm->pos.cookie.periph = periph;
2023                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2024                                 (*pdrv)->generation;
2025                         cdm->status = CAM_DEV_MATCH_MORE;
2026                         return(0);
2027                 }
2028
2029                 j = cdm->num_matches;
2030                 cdm->num_matches++;
2031                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2032                 cdm->matches[j].result.periph_result.path_id =
2033                         periph->path->bus->path_id;
2034
2035                 /*
2036                  * The transport layer peripheral doesn't have a target or
2037                  * lun.
2038                  */
2039                 if (periph->path->target)
2040                         cdm->matches[j].result.periph_result.target_id =
2041                                 periph->path->target->target_id;
2042                 else
2043                         cdm->matches[j].result.periph_result.target_id =
2044                                 CAM_TARGET_WILDCARD;
2045
2046                 if (periph->path->device)
2047                         cdm->matches[j].result.periph_result.target_lun =
2048                                 periph->path->device->lun_id;
2049                 else
2050                         cdm->matches[j].result.periph_result.target_lun =
2051                                 CAM_LUN_WILDCARD;
2052
2053                 cdm->matches[j].result.periph_result.unit_number =
2054                         periph->unit_number;
2055                 l = sizeof(cdm->matches[j].result.periph_result.periph_name);
2056                 strlcpy(cdm->matches[j].result.periph_result.periph_name,
2057                         periph->periph_name, l);
2058         }
2059
2060         return(1);
2061 }
2062
2063 static int
2064 xptperiphlistmatch(struct ccb_dev_match *cdm)
2065 {
2066         int ret;
2067
2068         cdm->num_matches = 0;
2069
2070         /*
2071          * At this point in the edt traversal function, we check the bus
2072          * list generation to make sure that no busses have been added or
2073          * removed since the user last sent a XPT_DEV_MATCH ccb through.
2074          * For the peripheral driver list traversal function, however, we
2075          * don't have to worry about new peripheral driver types coming or
2076          * going; they're in a linker set, and therefore can't change
2077          * without a recompile.
2078          */
2079
2080         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2081          && (cdm->pos.cookie.pdrv != NULL))
2082                 ret = xptpdrvtraverse(
2083                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
2084                                 xptplistpdrvfunc, cdm);
2085         else
2086                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2087
2088         /*
2089          * If we get back 0, that means that we had to stop before fully
2090          * traversing the peripheral driver tree.  It also means that one of
2091          * the subroutines has set the status field to the proper value.  If
2092          * we get back 1, we've fully traversed the EDT and copied out any
2093          * matching entries.
2094          */
2095         if (ret == 1)
2096                 cdm->status = CAM_DEV_MATCH_LAST;
2097
2098         return(ret);
2099 }
2100
2101 static int
2102 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2103 {
2104         struct cam_eb *bus, *next_bus;
2105         int retval;
2106
2107         retval = 1;
2108         if (start_bus)
2109                 bus = start_bus;
2110         else {
2111                 xpt_lock_buses();
2112                 bus = TAILQ_FIRST(&xsoftc.xpt_busses);
2113                 if (bus == NULL) {
2114                         xpt_unlock_buses();
2115                         return (retval);
2116                 }
2117                 bus->refcount++;
2118                 xpt_unlock_buses();
2119         }
2120         for (; bus != NULL; bus = next_bus) {
2121                 retval = tr_func(bus, arg);
2122                 if (retval == 0) {
2123                         xpt_release_bus(bus);
2124                         break;
2125                 }
2126                 xpt_lock_buses();
2127                 next_bus = TAILQ_NEXT(bus, links);
2128                 if (next_bus)
2129                         next_bus->refcount++;
2130                 xpt_unlock_buses();
2131                 xpt_release_bus(bus);
2132         }
2133         return(retval);
2134 }
2135
2136 static int
2137 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2138                   xpt_targetfunc_t *tr_func, void *arg)
2139 {
2140         struct cam_et *target, *next_target;
2141         int retval;
2142
2143         retval = 1;
2144         if (start_target)
2145                 target = start_target;
2146         else {
2147                 mtx_lock(&bus->eb_mtx);
2148                 target = TAILQ_FIRST(&bus->et_entries);
2149                 if (target == NULL) {
2150                         mtx_unlock(&bus->eb_mtx);
2151                         return (retval);
2152                 }
2153                 target->refcount++;
2154                 mtx_unlock(&bus->eb_mtx);
2155         }
2156         for (; target != NULL; target = next_target) {
2157                 retval = tr_func(target, arg);
2158                 if (retval == 0) {
2159                         xpt_release_target(target);
2160                         break;
2161                 }
2162                 mtx_lock(&bus->eb_mtx);
2163                 next_target = TAILQ_NEXT(target, links);
2164                 if (next_target)
2165                         next_target->refcount++;
2166                 mtx_unlock(&bus->eb_mtx);
2167                 xpt_release_target(target);
2168         }
2169         return(retval);
2170 }
2171
2172 static int
2173 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2174                   xpt_devicefunc_t *tr_func, void *arg)
2175 {
2176         struct cam_eb *bus;
2177         struct cam_ed *device, *next_device;
2178         int retval;
2179
2180         retval = 1;
2181         bus = target->bus;
2182         if (start_device)
2183                 device = start_device;
2184         else {
2185                 mtx_lock(&bus->eb_mtx);
2186                 device = TAILQ_FIRST(&target->ed_entries);
2187                 if (device == NULL) {
2188                         mtx_unlock(&bus->eb_mtx);
2189                         return (retval);
2190                 }
2191                 device->refcount++;
2192                 mtx_unlock(&bus->eb_mtx);
2193         }
2194         for (; device != NULL; device = next_device) {
2195                 mtx_lock(&device->device_mtx);
2196                 retval = tr_func(device, arg);
2197                 mtx_unlock(&device->device_mtx);
2198                 if (retval == 0) {
2199                         xpt_release_device(device);
2200                         break;
2201                 }
2202                 mtx_lock(&bus->eb_mtx);
2203                 next_device = TAILQ_NEXT(device, links);
2204                 if (next_device)
2205                         next_device->refcount++;
2206                 mtx_unlock(&bus->eb_mtx);
2207                 xpt_release_device(device);
2208         }
2209         return(retval);
2210 }
2211
2212 static int
2213 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2214                   xpt_periphfunc_t *tr_func, void *arg)
2215 {
2216         struct cam_eb *bus;
2217         struct cam_periph *periph, *next_periph;
2218         int retval;
2219
2220         retval = 1;
2221
2222         bus = device->target->bus;
2223         if (start_periph)
2224                 periph = start_periph;
2225         else {
2226                 xpt_lock_buses();
2227                 mtx_lock(&bus->eb_mtx);
2228                 periph = SLIST_FIRST(&device->periphs);
2229                 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2230                         periph = SLIST_NEXT(periph, periph_links);
2231                 if (periph == NULL) {
2232                         mtx_unlock(&bus->eb_mtx);
2233                         xpt_unlock_buses();
2234                         return (retval);
2235                 }
2236                 periph->refcount++;
2237                 mtx_unlock(&bus->eb_mtx);
2238                 xpt_unlock_buses();
2239         }
2240         for (; periph != NULL; periph = next_periph) {
2241                 retval = tr_func(periph, arg);
2242                 if (retval == 0) {
2243                         cam_periph_release_locked(periph);
2244                         break;
2245                 }
2246                 xpt_lock_buses();
2247                 mtx_lock(&bus->eb_mtx);
2248                 next_periph = SLIST_NEXT(periph, periph_links);
2249                 while (next_periph != NULL &&
2250                     (next_periph->flags & CAM_PERIPH_FREE) != 0)
2251                         next_periph = SLIST_NEXT(next_periph, periph_links);
2252                 if (next_periph)
2253                         next_periph->refcount++;
2254                 mtx_unlock(&bus->eb_mtx);
2255                 xpt_unlock_buses();
2256                 cam_periph_release_locked(periph);
2257         }
2258         return(retval);
2259 }
2260
2261 static int
2262 xptpdrvtraverse(struct periph_driver **start_pdrv,
2263                 xpt_pdrvfunc_t *tr_func, void *arg)
2264 {
2265         struct periph_driver **pdrv;
2266         int retval;
2267
2268         retval = 1;
2269
2270         /*
2271          * We don't traverse the peripheral driver list like we do the
2272          * other lists, because it is a linker set, and therefore cannot be
2273          * changed during runtime.  If the peripheral driver list is ever
2274          * re-done to be something other than a linker set (i.e. it can
2275          * change while the system is running), the list traversal should
2276          * be modified to work like the other traversal functions.
2277          */
2278         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2279              *pdrv != NULL; pdrv++) {
2280                 retval = tr_func(pdrv, arg);
2281
2282                 if (retval == 0)
2283                         return(retval);
2284         }
2285
2286         return(retval);
2287 }
2288
2289 static int
2290 xptpdperiphtraverse(struct periph_driver **pdrv,
2291                     struct cam_periph *start_periph,
2292                     xpt_periphfunc_t *tr_func, void *arg)
2293 {
2294         struct cam_periph *periph, *next_periph;
2295         int retval;
2296
2297         retval = 1;
2298
2299         if (start_periph)
2300                 periph = start_periph;
2301         else {
2302                 xpt_lock_buses();
2303                 periph = TAILQ_FIRST(&(*pdrv)->units);
2304                 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2305                         periph = TAILQ_NEXT(periph, unit_links);
2306                 if (periph == NULL) {
2307                         xpt_unlock_buses();
2308                         return (retval);
2309                 }
2310                 periph->refcount++;
2311                 xpt_unlock_buses();
2312         }
2313         for (; periph != NULL; periph = next_periph) {
2314                 cam_periph_lock(periph);
2315                 retval = tr_func(periph, arg);
2316                 cam_periph_unlock(periph);
2317                 if (retval == 0) {
2318                         cam_periph_release(periph);
2319                         break;
2320                 }
2321                 xpt_lock_buses();
2322                 next_periph = TAILQ_NEXT(periph, unit_links);
2323                 while (next_periph != NULL &&
2324                     (next_periph->flags & CAM_PERIPH_FREE) != 0)
2325                         next_periph = TAILQ_NEXT(next_periph, unit_links);
2326                 if (next_periph)
2327                         next_periph->refcount++;
2328                 xpt_unlock_buses();
2329                 cam_periph_release(periph);
2330         }
2331         return(retval);
2332 }
2333
2334 static int
2335 xptdefbusfunc(struct cam_eb *bus, void *arg)
2336 {
2337         struct xpt_traverse_config *tr_config;
2338
2339         tr_config = (struct xpt_traverse_config *)arg;
2340
2341         if (tr_config->depth == XPT_DEPTH_BUS) {
2342                 xpt_busfunc_t *tr_func;
2343
2344                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2345
2346                 return(tr_func(bus, tr_config->tr_arg));
2347         } else
2348                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2349 }
2350
2351 static int
2352 xptdeftargetfunc(struct cam_et *target, void *arg)
2353 {
2354         struct xpt_traverse_config *tr_config;
2355
2356         tr_config = (struct xpt_traverse_config *)arg;
2357
2358         if (tr_config->depth == XPT_DEPTH_TARGET) {
2359                 xpt_targetfunc_t *tr_func;
2360
2361                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2362
2363                 return(tr_func(target, tr_config->tr_arg));
2364         } else
2365                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2366 }
2367
2368 static int
2369 xptdefdevicefunc(struct cam_ed *device, void *arg)
2370 {
2371         struct xpt_traverse_config *tr_config;
2372
2373         tr_config = (struct xpt_traverse_config *)arg;
2374
2375         if (tr_config->depth == XPT_DEPTH_DEVICE) {
2376                 xpt_devicefunc_t *tr_func;
2377
2378                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2379
2380                 return(tr_func(device, tr_config->tr_arg));
2381         } else
2382                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2383 }
2384
2385 static int
2386 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2387 {
2388         struct xpt_traverse_config *tr_config;
2389         xpt_periphfunc_t *tr_func;
2390
2391         tr_config = (struct xpt_traverse_config *)arg;
2392
2393         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2394
2395         /*
2396          * Unlike the other default functions, we don't check for depth
2397          * here.  The peripheral driver level is the last level in the EDT,
2398          * so if we're here, we should execute the function in question.
2399          */
2400         return(tr_func(periph, tr_config->tr_arg));
2401 }
2402
2403 /*
2404  * Execute the given function for every bus in the EDT.
2405  */
2406 static int
2407 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2408 {
2409         struct xpt_traverse_config tr_config;
2410
2411         tr_config.depth = XPT_DEPTH_BUS;
2412         tr_config.tr_func = tr_func;
2413         tr_config.tr_arg = arg;
2414
2415         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2416 }
2417
2418 /*
2419  * Execute the given function for every device in the EDT.
2420  */
2421 static int
2422 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2423 {
2424         struct xpt_traverse_config tr_config;
2425
2426         tr_config.depth = XPT_DEPTH_DEVICE;
2427         tr_config.tr_func = tr_func;
2428         tr_config.tr_arg = arg;
2429
2430         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2431 }
2432
2433 static int
2434 xptsetasyncfunc(struct cam_ed *device, void *arg)
2435 {
2436         struct cam_path path;
2437         struct ccb_getdev cgd;
2438         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2439
2440         /*
2441          * Don't report unconfigured devices (Wildcard devs,
2442          * devices only for target mode, device instances
2443          * that have been invalidated but are waiting for
2444          * their last reference count to be released).
2445          */
2446         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2447                 return (1);
2448
2449         xpt_compile_path(&path,
2450                          NULL,
2451                          device->target->bus->path_id,
2452                          device->target->target_id,
2453                          device->lun_id);
2454         xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
2455         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2456         xpt_action((union ccb *)&cgd);
2457         csa->callback(csa->callback_arg,
2458                             AC_FOUND_DEVICE,
2459                             &path, &cgd);
2460         xpt_release_path(&path);
2461
2462         return(1);
2463 }
2464
2465 static int
2466 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2467 {
2468         struct cam_path path;
2469         struct ccb_pathinq cpi;
2470         struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2471
2472         xpt_compile_path(&path, /*periph*/NULL,
2473                          bus->path_id,
2474                          CAM_TARGET_WILDCARD,
2475                          CAM_LUN_WILDCARD);
2476         xpt_path_lock(&path);
2477         xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL);
2478         cpi.ccb_h.func_code = XPT_PATH_INQ;
2479         xpt_action((union ccb *)&cpi);
2480         csa->callback(csa->callback_arg,
2481                             AC_PATH_REGISTERED,
2482                             &path, &cpi);
2483         xpt_path_unlock(&path);
2484         xpt_release_path(&path);
2485
2486         return(1);
2487 }
2488
2489 void
2490 xpt_action(union ccb *start_ccb)
2491 {
2492
2493         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE,
2494             ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code,
2495                 xpt_action_name(start_ccb->ccb_h.func_code)));
2496
2497         start_ccb->ccb_h.status = CAM_REQ_INPROG;
2498         (*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb);
2499 }
2500
2501 void
2502 xpt_action_default(union ccb *start_ccb)
2503 {
2504         struct cam_path *path;
2505         struct cam_sim *sim;
2506         struct mtx *mtx;
2507
2508         path = start_ccb->ccb_h.path;
2509         CAM_DEBUG(path, CAM_DEBUG_TRACE,
2510             ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code,
2511                 xpt_action_name(start_ccb->ccb_h.func_code)));
2512
2513         switch (start_ccb->ccb_h.func_code) {
2514         case XPT_SCSI_IO:
2515         {
2516                 struct cam_ed *device;
2517
2518                 /*
2519                  * For the sake of compatibility with SCSI-1
2520                  * devices that may not understand the identify
2521                  * message, we include lun information in the
2522                  * second byte of all commands.  SCSI-1 specifies
2523                  * that luns are a 3 bit value and reserves only 3
2524                  * bits for lun information in the CDB.  Later
2525                  * revisions of the SCSI spec allow for more than 8
2526                  * luns, but have deprecated lun information in the
2527                  * CDB.  So, if the lun won't fit, we must omit.
2528                  *
2529                  * Also be aware that during initial probing for devices,
2530                  * the inquiry information is unknown but initialized to 0.
2531                  * This means that this code will be exercised while probing
2532                  * devices with an ANSI revision greater than 2.
2533                  */
2534                 device = path->device;
2535                 if (device->protocol_version <= SCSI_REV_2
2536                  && start_ccb->ccb_h.target_lun < 8
2537                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2538
2539                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
2540                             start_ccb->ccb_h.target_lun << 5;
2541                 }
2542                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2543         }
2544         /* FALLTHROUGH */
2545         case XPT_TARGET_IO:
2546         case XPT_CONT_TARGET_IO:
2547                 start_ccb->csio.sense_resid = 0;
2548                 start_ccb->csio.resid = 0;
2549                 /* FALLTHROUGH */
2550         case XPT_ATA_IO:
2551                 if (start_ccb->ccb_h.func_code == XPT_ATA_IO)
2552                         start_ccb->ataio.resid = 0;
2553                 /* FALLTHROUGH */
2554         case XPT_NVME_IO:
2555                 /* FALLTHROUGH */
2556         case XPT_NVME_ADMIN:
2557                 /* FALLTHROUGH */
2558         case XPT_RESET_DEV:
2559         case XPT_ENG_EXEC:
2560         case XPT_SMP_IO:
2561         {
2562                 struct cam_devq *devq;
2563
2564                 devq = path->bus->sim->devq;
2565                 mtx_lock(&devq->send_mtx);
2566                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2567                 if (xpt_schedule_devq(devq, path->device) != 0)
2568                         xpt_run_devq(devq);
2569                 mtx_unlock(&devq->send_mtx);
2570                 break;
2571         }
2572         case XPT_CALC_GEOMETRY:
2573                 /* Filter out garbage */
2574                 if (start_ccb->ccg.block_size == 0
2575                  || start_ccb->ccg.volume_size == 0) {
2576                         start_ccb->ccg.cylinders = 0;
2577                         start_ccb->ccg.heads = 0;
2578                         start_ccb->ccg.secs_per_track = 0;
2579                         start_ccb->ccb_h.status = CAM_REQ_CMP;
2580                         break;
2581                 }
2582 #if defined(PC98) || defined(__sparc64__)
2583                 /*
2584                  * In a PC-98 system, geometry translation depens on
2585                  * the "real" device geometry obtained from mode page 4.
2586                  * SCSI geometry translation is performed in the
2587                  * initialization routine of the SCSI BIOS and the result
2588                  * stored in host memory.  If the translation is available
2589                  * in host memory, use it.  If not, rely on the default
2590                  * translation the device driver performs.
2591                  * For sparc64, we may need adjust the geometry of large
2592                  * disks in order to fit the limitations of the 16-bit
2593                  * fields of the VTOC8 disk label.
2594                  */
2595                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2596                         start_ccb->ccb_h.status = CAM_REQ_CMP;
2597                         break;
2598                 }
2599 #endif
2600                 goto call_sim;
2601         case XPT_ABORT:
2602         {
2603                 union ccb* abort_ccb;
2604
2605                 abort_ccb = start_ccb->cab.abort_ccb;
2606                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2607                         struct cam_ed *device;
2608                         struct cam_devq *devq;
2609
2610                         device = abort_ccb->ccb_h.path->device;
2611                         devq = device->sim->devq;
2612
2613                         mtx_lock(&devq->send_mtx);
2614                         if (abort_ccb->ccb_h.pinfo.index > 0) {
2615                                 cam_ccbq_remove_ccb(&device->ccbq, abort_ccb);
2616                                 abort_ccb->ccb_h.status =
2617                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2618                                 xpt_freeze_devq_device(device, 1);
2619                                 mtx_unlock(&devq->send_mtx);
2620                                 xpt_done(abort_ccb);
2621                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2622                                 break;
2623                         }
2624                         mtx_unlock(&devq->send_mtx);
2625
2626                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2627                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2628                                 /*
2629                                  * We've caught this ccb en route to
2630                                  * the SIM.  Flag it for abort and the
2631                                  * SIM will do so just before starting
2632                                  * real work on the CCB.
2633                                  */
2634                                 abort_ccb->ccb_h.status =
2635                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2636                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2637                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2638                                 break;
2639                         }
2640                 }
2641                 if (XPT_FC_IS_QUEUED(abort_ccb)
2642                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2643                         /*
2644                          * It's already completed but waiting
2645                          * for our SWI to get to it.
2646                          */
2647                         start_ccb->ccb_h.status = CAM_UA_ABORT;
2648                         break;
2649                 }
2650                 /*
2651                  * If we weren't able to take care of the abort request
2652                  * in the XPT, pass the request down to the SIM for processing.
2653                  */
2654         }
2655         /* FALLTHROUGH */
2656         case XPT_ACCEPT_TARGET_IO:
2657         case XPT_EN_LUN:
2658         case XPT_IMMED_NOTIFY:
2659         case XPT_NOTIFY_ACK:
2660         case XPT_RESET_BUS:
2661         case XPT_IMMEDIATE_NOTIFY:
2662         case XPT_NOTIFY_ACKNOWLEDGE:
2663         case XPT_GET_SIM_KNOB_OLD:
2664         case XPT_GET_SIM_KNOB:
2665         case XPT_SET_SIM_KNOB:
2666         case XPT_GET_TRAN_SETTINGS:
2667         case XPT_SET_TRAN_SETTINGS:
2668         case XPT_PATH_INQ:
2669 call_sim:
2670                 sim = path->bus->sim;
2671                 mtx = sim->mtx;
2672                 if (mtx && !mtx_owned(mtx))
2673                         mtx_lock(mtx);
2674                 else
2675                         mtx = NULL;
2676                 CAM_DEBUG(path, CAM_DEBUG_TRACE,
2677                     ("sim->sim_action: func=%#x\n", start_ccb->ccb_h.func_code));
2678                 (*(sim->sim_action))(sim, start_ccb);
2679                 CAM_DEBUG(path, CAM_DEBUG_TRACE,
2680                     ("sim->sim_action: status=%#x\n", start_ccb->ccb_h.status));
2681                 if (mtx)
2682                         mtx_unlock(mtx);
2683                 break;
2684         case XPT_PATH_STATS:
2685                 start_ccb->cpis.last_reset = path->bus->last_reset;
2686                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2687                 break;
2688         case XPT_GDEV_TYPE:
2689         {
2690                 struct cam_ed *dev;
2691
2692                 dev = path->device;
2693                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2694                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2695                 } else {
2696                         struct ccb_getdev *cgd;
2697
2698                         cgd = &start_ccb->cgd;
2699                         cgd->protocol = dev->protocol;
2700                         cgd->inq_data = dev->inq_data;
2701                         cgd->ident_data = dev->ident_data;
2702                         cgd->inq_flags = dev->inq_flags;
2703                         cgd->ccb_h.status = CAM_REQ_CMP;
2704                         cgd->serial_num_len = dev->serial_num_len;
2705                         if ((dev->serial_num_len > 0)
2706                          && (dev->serial_num != NULL))
2707                                 bcopy(dev->serial_num, cgd->serial_num,
2708                                       dev->serial_num_len);
2709                 }
2710                 break;
2711         }
2712         case XPT_GDEV_STATS:
2713         {
2714                 struct ccb_getdevstats *cgds = &start_ccb->cgds;
2715                 struct cam_ed *dev = path->device;
2716                 struct cam_eb *bus = path->bus;
2717                 struct cam_et *tar = path->target;
2718                 struct cam_devq *devq = bus->sim->devq;
2719
2720                 mtx_lock(&devq->send_mtx);
2721                 cgds->dev_openings = dev->ccbq.dev_openings;
2722                 cgds->dev_active = dev->ccbq.dev_active;
2723                 cgds->allocated = dev->ccbq.allocated;
2724                 cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq);
2725                 cgds->held = cgds->allocated - cgds->dev_active - cgds->queued;
2726                 cgds->last_reset = tar->last_reset;
2727                 cgds->maxtags = dev->maxtags;
2728                 cgds->mintags = dev->mintags;
2729                 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2730                         cgds->last_reset = bus->last_reset;
2731                 mtx_unlock(&devq->send_mtx);
2732                 cgds->ccb_h.status = CAM_REQ_CMP;
2733                 break;
2734         }
2735         case XPT_GDEVLIST:
2736         {
2737                 struct cam_periph       *nperiph;
2738                 struct periph_list      *periph_head;
2739                 struct ccb_getdevlist   *cgdl;
2740                 u_int                   i;
2741                 struct cam_ed           *device;
2742                 int                     found;
2743
2744
2745                 found = 0;
2746
2747                 /*
2748                  * Don't want anyone mucking with our data.
2749                  */
2750                 device = path->device;
2751                 periph_head = &device->periphs;
2752                 cgdl = &start_ccb->cgdl;
2753
2754                 /*
2755                  * Check and see if the list has changed since the user
2756                  * last requested a list member.  If so, tell them that the
2757                  * list has changed, and therefore they need to start over
2758                  * from the beginning.
2759                  */
2760                 if ((cgdl->index != 0) &&
2761                     (cgdl->generation != device->generation)) {
2762                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2763                         break;
2764                 }
2765
2766                 /*
2767                  * Traverse the list of peripherals and attempt to find
2768                  * the requested peripheral.
2769                  */
2770                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
2771                      (nperiph != NULL) && (i <= cgdl->index);
2772                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
2773                         if (i == cgdl->index) {
2774                                 strlcpy(cgdl->periph_name,
2775                                         nperiph->periph_name,
2776                                         sizeof(cgdl->periph_name));
2777                                 cgdl->unit_number = nperiph->unit_number;
2778                                 found = 1;
2779                         }
2780                 }
2781                 if (found == 0) {
2782                         cgdl->status = CAM_GDEVLIST_ERROR;
2783                         break;
2784                 }
2785
2786                 if (nperiph == NULL)
2787                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
2788                 else
2789                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
2790
2791                 cgdl->index++;
2792                 cgdl->generation = device->generation;
2793
2794                 cgdl->ccb_h.status = CAM_REQ_CMP;
2795                 break;
2796         }
2797         case XPT_DEV_MATCH:
2798         {
2799                 dev_pos_type position_type;
2800                 struct ccb_dev_match *cdm;
2801
2802                 cdm = &start_ccb->cdm;
2803
2804                 /*
2805                  * There are two ways of getting at information in the EDT.
2806                  * The first way is via the primary EDT tree.  It starts
2807                  * with a list of busses, then a list of targets on a bus,
2808                  * then devices/luns on a target, and then peripherals on a
2809                  * device/lun.  The "other" way is by the peripheral driver
2810                  * lists.  The peripheral driver lists are organized by
2811                  * peripheral driver.  (obviously)  So it makes sense to
2812                  * use the peripheral driver list if the user is looking
2813                  * for something like "da1", or all "da" devices.  If the
2814                  * user is looking for something on a particular bus/target
2815                  * or lun, it's generally better to go through the EDT tree.
2816                  */
2817
2818                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
2819                         position_type = cdm->pos.position_type;
2820                 else {
2821                         u_int i;
2822
2823                         position_type = CAM_DEV_POS_NONE;
2824
2825                         for (i = 0; i < cdm->num_patterns; i++) {
2826                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
2827                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
2828                                         position_type = CAM_DEV_POS_EDT;
2829                                         break;
2830                                 }
2831                         }
2832
2833                         if (cdm->num_patterns == 0)
2834                                 position_type = CAM_DEV_POS_EDT;
2835                         else if (position_type == CAM_DEV_POS_NONE)
2836                                 position_type = CAM_DEV_POS_PDRV;
2837                 }
2838
2839                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
2840                 case CAM_DEV_POS_EDT:
2841                         xptedtmatch(cdm);
2842                         break;
2843                 case CAM_DEV_POS_PDRV:
2844                         xptperiphlistmatch(cdm);
2845                         break;
2846                 default:
2847                         cdm->status = CAM_DEV_MATCH_ERROR;
2848                         break;
2849                 }
2850
2851                 if (cdm->status == CAM_DEV_MATCH_ERROR)
2852                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2853                 else
2854                         start_ccb->ccb_h.status = CAM_REQ_CMP;
2855
2856                 break;
2857         }
2858         case XPT_SASYNC_CB:
2859         {
2860                 struct ccb_setasync *csa;
2861                 struct async_node *cur_entry;
2862                 struct async_list *async_head;
2863                 u_int32_t added;
2864
2865                 csa = &start_ccb->csa;
2866                 added = csa->event_enable;
2867                 async_head = &path->device->asyncs;
2868
2869                 /*
2870                  * If there is already an entry for us, simply
2871                  * update it.
2872                  */
2873                 cur_entry = SLIST_FIRST(async_head);
2874                 while (cur_entry != NULL) {
2875                         if ((cur_entry->callback_arg == csa->callback_arg)
2876                          && (cur_entry->callback == csa->callback))
2877                                 break;
2878                         cur_entry = SLIST_NEXT(cur_entry, links);
2879                 }
2880
2881                 if (cur_entry != NULL) {
2882                         /*
2883                          * If the request has no flags set,
2884                          * remove the entry.
2885                          */
2886                         added &= ~cur_entry->event_enable;
2887                         if (csa->event_enable == 0) {
2888                                 SLIST_REMOVE(async_head, cur_entry,
2889                                              async_node, links);
2890                                 xpt_release_device(path->device);
2891                                 free(cur_entry, M_CAMXPT);
2892                         } else {
2893                                 cur_entry->event_enable = csa->event_enable;
2894                         }
2895                         csa->event_enable = added;
2896                 } else {
2897                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
2898                                            M_NOWAIT);
2899                         if (cur_entry == NULL) {
2900                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
2901                                 break;
2902                         }
2903                         cur_entry->event_enable = csa->event_enable;
2904                         cur_entry->event_lock = (path->bus->sim->mtx &&
2905                             mtx_owned(path->bus->sim->mtx)) ? 1 : 0;
2906                         cur_entry->callback_arg = csa->callback_arg;
2907                         cur_entry->callback = csa->callback;
2908                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
2909                         xpt_acquire_device(path->device);
2910                 }
2911                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2912                 break;
2913         }
2914         case XPT_REL_SIMQ:
2915         {
2916                 struct ccb_relsim *crs;
2917                 struct cam_ed *dev;
2918
2919                 crs = &start_ccb->crs;
2920                 dev = path->device;
2921                 if (dev == NULL) {
2922
2923                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
2924                         break;
2925                 }
2926
2927                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
2928
2929                         /* Don't ever go below one opening */
2930                         if (crs->openings > 0) {
2931                                 xpt_dev_ccbq_resize(path, crs->openings);
2932                                 if (bootverbose) {
2933                                         xpt_print(path,
2934                                             "number of openings is now %d\n",
2935                                             crs->openings);
2936                                 }
2937                         }
2938                 }
2939
2940                 mtx_lock(&dev->sim->devq->send_mtx);
2941                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
2942
2943                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
2944
2945                                 /*
2946                                  * Just extend the old timeout and decrement
2947                                  * the freeze count so that a single timeout
2948                                  * is sufficient for releasing the queue.
2949                                  */
2950                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2951                                 callout_stop(&dev->callout);
2952                         } else {
2953
2954                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2955                         }
2956
2957                         callout_reset_sbt(&dev->callout,
2958                             SBT_1MS * crs->release_timeout, 0,
2959                             xpt_release_devq_timeout, dev, 0);
2960
2961                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
2962
2963                 }
2964
2965                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
2966
2967                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
2968                                 /*
2969                                  * Decrement the freeze count so that a single
2970                                  * completion is still sufficient to unfreeze
2971                                  * the queue.
2972                                  */
2973                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2974                         } else {
2975
2976                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
2977                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2978                         }
2979                 }
2980
2981                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
2982
2983                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
2984                          || (dev->ccbq.dev_active == 0)) {
2985
2986                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2987                         } else {
2988
2989                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
2990                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2991                         }
2992                 }
2993                 mtx_unlock(&dev->sim->devq->send_mtx);
2994
2995                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0)
2996                         xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
2997                 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt;
2998                 start_ccb->ccb_h.status = CAM_REQ_CMP;
2999                 break;
3000         }
3001         case XPT_DEBUG: {
3002                 struct cam_path *oldpath;
3003
3004                 /* Check that all request bits are supported. */
3005                 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
3006                         start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3007                         break;
3008                 }
3009
3010                 cam_dflags = CAM_DEBUG_NONE;
3011                 if (cam_dpath != NULL) {
3012                         oldpath = cam_dpath;
3013                         cam_dpath = NULL;
3014                         xpt_free_path(oldpath);
3015                 }
3016                 if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
3017                         if (xpt_create_path(&cam_dpath, NULL,
3018                                             start_ccb->ccb_h.path_id,
3019                                             start_ccb->ccb_h.target_id,
3020                                             start_ccb->ccb_h.target_lun) !=
3021                                             CAM_REQ_CMP) {
3022                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3023                         } else {
3024                                 cam_dflags = start_ccb->cdbg.flags;
3025                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3026                                 xpt_print(cam_dpath, "debugging flags now %x\n",
3027                                     cam_dflags);
3028                         }
3029                 } else
3030                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3031                 break;
3032         }
3033         case XPT_NOOP:
3034                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3035                         xpt_freeze_devq(path, 1);
3036                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3037                 break;
3038         case XPT_REPROBE_LUN:
3039                 xpt_async(AC_INQ_CHANGED, path, NULL);
3040                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3041                 xpt_done(start_ccb);
3042                 break;
3043         default:
3044         case XPT_SDEV_TYPE:
3045         case XPT_TERM_IO:
3046         case XPT_ENG_INQ:
3047                 /* XXX Implement */
3048                 xpt_print_path(start_ccb->ccb_h.path);
3049                 printf("%s: CCB type %#x %s not supported\n", __func__,
3050                     start_ccb->ccb_h.func_code,
3051                     xpt_action_name(start_ccb->ccb_h.func_code));
3052                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3053                 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
3054                         xpt_done(start_ccb);
3055                 }
3056                 break;
3057         }
3058         CAM_DEBUG(path, CAM_DEBUG_TRACE,
3059             ("xpt_action_default: func= %#x %s status %#x\n",
3060                 start_ccb->ccb_h.func_code,
3061                 xpt_action_name(start_ccb->ccb_h.func_code),
3062                 start_ccb->ccb_h.status));
3063 }
3064
3065 void
3066 xpt_polled_action(union ccb *start_ccb)
3067 {
3068         u_int32_t timeout;
3069         struct    cam_sim *sim;
3070         struct    cam_devq *devq;
3071         struct    cam_ed *dev;
3072         struct mtx *mtx;
3073
3074         timeout = start_ccb->ccb_h.timeout * 10;
3075         sim = start_ccb->ccb_h.path->bus->sim;
3076         devq = sim->devq;
3077         mtx = sim->mtx;
3078         dev = start_ccb->ccb_h.path->device;
3079
3080         mtx_unlock(&dev->device_mtx);
3081
3082         /*
3083          * Steal an opening so that no other queued requests
3084          * can get it before us while we simulate interrupts.
3085          */
3086         mtx_lock(&devq->send_mtx);
3087         dev->ccbq.dev_openings--;
3088         while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) &&
3089             (--timeout > 0)) {
3090                 mtx_unlock(&devq->send_mtx);
3091                 DELAY(100);
3092                 if (mtx)
3093                         mtx_lock(mtx);
3094                 (*(sim->sim_poll))(sim);
3095                 if (mtx)
3096                         mtx_unlock(mtx);
3097                 camisr_runqueue();
3098                 mtx_lock(&devq->send_mtx);
3099         }
3100         dev->ccbq.dev_openings++;
3101         mtx_unlock(&devq->send_mtx);
3102
3103         if (timeout != 0) {
3104                 xpt_action(start_ccb);
3105                 while(--timeout > 0) {
3106                         if (mtx)
3107                                 mtx_lock(mtx);
3108                         (*(sim->sim_poll))(sim);
3109                         if (mtx)
3110                                 mtx_unlock(mtx);
3111                         camisr_runqueue();
3112                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3113                             != CAM_REQ_INPROG)
3114                                 break;
3115                         DELAY(100);
3116                 }
3117                 if (timeout == 0) {
3118                         /*
3119                          * XXX Is it worth adding a sim_timeout entry
3120                          * point so we can attempt recovery?  If
3121                          * this is only used for dumps, I don't think
3122                          * it is.
3123                          */
3124                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3125                 }
3126         } else {
3127                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3128         }
3129
3130         mtx_lock(&dev->device_mtx);
3131 }
3132
3133 /*
3134  * Schedule a peripheral driver to receive a ccb when its
3135  * target device has space for more transactions.
3136  */
3137 void
3138 xpt_schedule(struct cam_periph *periph, u_int32_t new_priority)
3139 {
3140
3141         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3142         cam_periph_assert(periph, MA_OWNED);
3143         if (new_priority < periph->scheduled_priority) {
3144                 periph->scheduled_priority = new_priority;
3145                 xpt_run_allocq(periph, 0);
3146         }
3147 }
3148
3149
3150 /*
3151  * Schedule a device to run on a given queue.
3152  * If the device was inserted as a new entry on the queue,
3153  * return 1 meaning the device queue should be run. If we
3154  * were already queued, implying someone else has already
3155  * started the queue, return 0 so the caller doesn't attempt
3156  * to run the queue.
3157  */
3158 static int
3159 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3160                  u_int32_t new_priority)
3161 {
3162         int retval;
3163         u_int32_t old_priority;
3164
3165         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3166
3167         old_priority = pinfo->priority;
3168
3169         /*
3170          * Are we already queued?
3171          */
3172         if (pinfo->index != CAM_UNQUEUED_INDEX) {
3173                 /* Simply reorder based on new priority */
3174                 if (new_priority < old_priority) {
3175                         camq_change_priority(queue, pinfo->index,
3176                                              new_priority);
3177                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3178                                         ("changed priority to %d\n",
3179                                          new_priority));
3180                         retval = 1;
3181                 } else
3182                         retval = 0;
3183         } else {
3184                 /* New entry on the queue */
3185                 if (new_priority < old_priority)
3186                         pinfo->priority = new_priority;
3187
3188                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3189                                 ("Inserting onto queue\n"));
3190                 pinfo->generation = ++queue->generation;
3191                 camq_insert(queue, pinfo);
3192                 retval = 1;
3193         }
3194         return (retval);
3195 }
3196
3197 static void
3198 xpt_run_allocq_task(void *context, int pending)
3199 {
3200         struct cam_periph *periph = context;
3201
3202         cam_periph_lock(periph);
3203         periph->flags &= ~CAM_PERIPH_RUN_TASK;
3204         xpt_run_allocq(periph, 1);
3205         cam_periph_unlock(periph);
3206         cam_periph_release(periph);
3207 }
3208
3209 static void
3210 xpt_run_allocq(struct cam_periph *periph, int sleep)
3211 {
3212         struct cam_ed   *device;
3213         union ccb       *ccb;
3214         uint32_t         prio;
3215
3216         cam_periph_assert(periph, MA_OWNED);
3217         if (periph->periph_allocating)
3218                 return;
3219         periph->periph_allocating = 1;
3220         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph));
3221         device = periph->path->device;
3222         ccb = NULL;
3223 restart:
3224         while ((prio = min(periph->scheduled_priority,
3225             periph->immediate_priority)) != CAM_PRIORITY_NONE &&
3226             (periph->periph_allocated - (ccb != NULL ? 1 : 0) <
3227              device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) {
3228
3229                 if (ccb == NULL &&
3230                     (ccb = xpt_get_ccb_nowait(periph)) == NULL) {
3231                         if (sleep) {
3232                                 ccb = xpt_get_ccb(periph);
3233                                 goto restart;
3234                         }
3235                         if (periph->flags & CAM_PERIPH_RUN_TASK)
3236                                 break;
3237                         cam_periph_doacquire(periph);
3238                         periph->flags |= CAM_PERIPH_RUN_TASK;
3239                         taskqueue_enqueue(xsoftc.xpt_taskq,
3240                             &periph->periph_run_task);
3241                         break;
3242                 }
3243                 xpt_setup_ccb(&ccb->ccb_h, periph->path, prio);
3244                 if (prio == periph->immediate_priority) {
3245                         periph->immediate_priority = CAM_PRIORITY_NONE;
3246                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3247                                         ("waking cam_periph_getccb()\n"));
3248                         SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h,
3249                                           periph_links.sle);
3250                         wakeup(&periph->ccb_list);
3251                 } else {
3252                         periph->scheduled_priority = CAM_PRIORITY_NONE;
3253                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3254                                         ("calling periph_start()\n"));
3255                         periph->periph_start(periph, ccb);
3256                 }
3257                 ccb = NULL;
3258         }
3259         if (ccb != NULL)
3260                 xpt_release_ccb(ccb);
3261         periph->periph_allocating = 0;
3262 }
3263
3264 static void
3265 xpt_run_devq(struct cam_devq *devq)
3266 {
3267         struct mtx *mtx;
3268
3269         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n"));
3270
3271         devq->send_queue.qfrozen_cnt++;
3272         while ((devq->send_queue.entries > 0)
3273             && (devq->send_openings > 0)
3274             && (devq->send_queue.qfrozen_cnt <= 1)) {
3275                 struct  cam_ed *device;
3276                 union ccb *work_ccb;
3277                 struct  cam_sim *sim;
3278                 struct xpt_proto *proto;
3279
3280                 device = (struct cam_ed *)camq_remove(&devq->send_queue,
3281                                                            CAMQ_HEAD);
3282                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3283                                 ("running device %p\n", device));
3284
3285                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3286                 if (work_ccb == NULL) {
3287                         printf("device on run queue with no ccbs???\n");
3288                         continue;
3289                 }
3290
3291                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3292
3293                         mtx_lock(&xsoftc.xpt_highpower_lock);
3294                         if (xsoftc.num_highpower <= 0) {
3295                                 /*
3296                                  * We got a high power command, but we
3297                                  * don't have any available slots.  Freeze
3298                                  * the device queue until we have a slot
3299                                  * available.
3300                                  */
3301                                 xpt_freeze_devq_device(device, 1);
3302                                 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device,
3303                                                    highpowerq_entry);
3304
3305                                 mtx_unlock(&xsoftc.xpt_highpower_lock);
3306                                 continue;
3307                         } else {
3308                                 /*
3309                                  * Consume a high power slot while
3310                                  * this ccb runs.
3311                                  */
3312                                 xsoftc.num_highpower--;
3313                         }
3314                         mtx_unlock(&xsoftc.xpt_highpower_lock);
3315                 }
3316                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3317                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3318                 devq->send_openings--;
3319                 devq->send_active++;
3320                 xpt_schedule_devq(devq, device);
3321                 mtx_unlock(&devq->send_mtx);
3322
3323                 if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) {
3324                         /*
3325                          * The client wants to freeze the queue
3326                          * after this CCB is sent.
3327                          */
3328                         xpt_freeze_devq(work_ccb->ccb_h.path, 1);
3329                 }
3330
3331                 /* In Target mode, the peripheral driver knows best... */
3332                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3333                         if ((device->inq_flags & SID_CmdQue) != 0
3334                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3335                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3336                         else
3337                                 /*
3338                                  * Clear this in case of a retried CCB that
3339                                  * failed due to a rejected tag.
3340                                  */
3341                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3342                 }
3343
3344                 KASSERT(device == work_ccb->ccb_h.path->device,
3345                     ("device (%p) / path->device (%p) mismatch",
3346                         device, work_ccb->ccb_h.path->device));
3347                 proto = xpt_proto_find(device->protocol);
3348                 if (proto && proto->ops->debug_out)
3349                         proto->ops->debug_out(work_ccb);
3350
3351                 /*
3352                  * Device queues can be shared among multiple SIM instances
3353                  * that reside on different busses.  Use the SIM from the
3354                  * queued device, rather than the one from the calling bus.
3355                  */
3356                 sim = device->sim;
3357                 mtx = sim->mtx;
3358                 if (mtx && !mtx_owned(mtx))
3359                         mtx_lock(mtx);
3360                 else
3361                         mtx = NULL;
3362                 work_ccb->ccb_h.qos.sim_data = sbinuptime(); // xxx uintprt_t too small 32bit platforms
3363                 (*(sim->sim_action))(sim, work_ccb);
3364                 if (mtx)
3365                         mtx_unlock(mtx);
3366                 mtx_lock(&devq->send_mtx);
3367         }
3368         devq->send_queue.qfrozen_cnt--;
3369 }
3370
3371 /*
3372  * This function merges stuff from the slave ccb into the master ccb, while
3373  * keeping important fields in the master ccb constant.
3374  */
3375 void
3376 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3377 {
3378
3379         /*
3380          * Pull fields that are valid for peripheral drivers to set
3381          * into the master CCB along with the CCB "payload".
3382          */
3383         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3384         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3385         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3386         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3387         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3388               sizeof(union ccb) - sizeof(struct ccb_hdr));
3389 }
3390
3391 void
3392 xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path,
3393                     u_int32_t priority, u_int32_t flags)
3394 {
3395
3396         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3397         ccb_h->pinfo.priority = priority;
3398         ccb_h->path = path;
3399         ccb_h->path_id = path->bus->path_id;
3400         if (path->target)
3401                 ccb_h->target_id = path->target->target_id;
3402         else
3403                 ccb_h->target_id = CAM_TARGET_WILDCARD;
3404         if (path->device) {
3405                 ccb_h->target_lun = path->device->lun_id;
3406                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3407         } else {
3408                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3409         }
3410         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3411         ccb_h->flags = flags;
3412         ccb_h->xflags = 0;
3413 }
3414
3415 void
3416 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3417 {
3418         xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0);
3419 }
3420
3421 /* Path manipulation functions */
3422 cam_status
3423 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3424                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3425 {
3426         struct     cam_path *path;
3427         cam_status status;
3428
3429         path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3430
3431         if (path == NULL) {
3432                 status = CAM_RESRC_UNAVAIL;
3433                 return(status);
3434         }
3435         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3436         if (status != CAM_REQ_CMP) {
3437                 free(path, M_CAMPATH);
3438                 path = NULL;
3439         }
3440         *new_path_ptr = path;
3441         return (status);
3442 }
3443
3444 cam_status
3445 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3446                          struct cam_periph *periph, path_id_t path_id,
3447                          target_id_t target_id, lun_id_t lun_id)
3448 {
3449
3450         return (xpt_create_path(new_path_ptr, periph, path_id, target_id,
3451             lun_id));
3452 }
3453
3454 cam_status
3455 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3456                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3457 {
3458         struct       cam_eb *bus;
3459         struct       cam_et *target;
3460         struct       cam_ed *device;
3461         cam_status   status;
3462
3463         status = CAM_REQ_CMP;   /* Completed without error */
3464         target = NULL;          /* Wildcarded */
3465         device = NULL;          /* Wildcarded */
3466
3467         /*
3468          * We will potentially modify the EDT, so block interrupts
3469          * that may attempt to create cam paths.
3470          */
3471         bus = xpt_find_bus(path_id);
3472         if (bus == NULL) {
3473                 status = CAM_PATH_INVALID;
3474         } else {
3475                 xpt_lock_buses();
3476                 mtx_lock(&bus->eb_mtx);
3477                 target = xpt_find_target(bus, target_id);
3478                 if (target == NULL) {
3479                         /* Create one */
3480                         struct cam_et *new_target;
3481
3482                         new_target = xpt_alloc_target(bus, target_id);
3483                         if (new_target == NULL) {
3484                                 status = CAM_RESRC_UNAVAIL;
3485                         } else {
3486                                 target = new_target;
3487                         }
3488                 }
3489                 xpt_unlock_buses();
3490                 if (target != NULL) {
3491                         device = xpt_find_device(target, lun_id);
3492                         if (device == NULL) {
3493                                 /* Create one */
3494                                 struct cam_ed *new_device;
3495
3496                                 new_device =
3497                                     (*(bus->xport->ops->alloc_device))(bus,
3498                                                                        target,
3499                                                                        lun_id);
3500                                 if (new_device == NULL) {
3501                                         status = CAM_RESRC_UNAVAIL;
3502                                 } else {
3503                                         device = new_device;
3504                                 }
3505                         }
3506                 }
3507                 mtx_unlock(&bus->eb_mtx);
3508         }
3509
3510         /*
3511          * Only touch the user's data if we are successful.
3512          */
3513         if (status == CAM_REQ_CMP) {
3514                 new_path->periph = perph;
3515                 new_path->bus = bus;
3516                 new_path->target = target;
3517                 new_path->device = device;
3518                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3519         } else {
3520                 if (device != NULL)
3521                         xpt_release_device(device);
3522                 if (target != NULL)
3523                         xpt_release_target(target);
3524                 if (bus != NULL)
3525                         xpt_release_bus(bus);
3526         }
3527         return (status);
3528 }
3529
3530 cam_status
3531 xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path)
3532 {
3533         struct     cam_path *new_path;
3534
3535         new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3536         if (new_path == NULL)
3537                 return(CAM_RESRC_UNAVAIL);
3538         xpt_copy_path(new_path, path);
3539         *new_path_ptr = new_path;
3540         return (CAM_REQ_CMP);
3541 }
3542
3543 void
3544 xpt_copy_path(struct cam_path *new_path, struct cam_path *path)
3545 {
3546
3547         *new_path = *path;
3548         if (path->bus != NULL)
3549                 xpt_acquire_bus(path->bus);
3550         if (path->target != NULL)
3551                 xpt_acquire_target(path->target);
3552         if (path->device != NULL)
3553                 xpt_acquire_device(path->device);
3554 }
3555
3556 void
3557 xpt_release_path(struct cam_path *path)
3558 {
3559         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3560         if (path->device != NULL) {
3561                 xpt_release_device(path->device);
3562                 path->device = NULL;
3563         }
3564         if (path->target != NULL) {
3565                 xpt_release_target(path->target);
3566                 path->target = NULL;
3567         }
3568         if (path->bus != NULL) {
3569                 xpt_release_bus(path->bus);
3570                 path->bus = NULL;
3571         }
3572 }
3573
3574 void
3575 xpt_free_path(struct cam_path *path)
3576 {
3577
3578         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3579         xpt_release_path(path);
3580         free(path, M_CAMPATH);
3581 }
3582
3583 void
3584 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
3585     uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
3586 {
3587
3588         xpt_lock_buses();
3589         if (bus_ref) {
3590                 if (path->bus)
3591                         *bus_ref = path->bus->refcount;
3592                 else
3593                         *bus_ref = 0;
3594         }
3595         if (periph_ref) {
3596                 if (path->periph)
3597                         *periph_ref = path->periph->refcount;
3598                 else
3599                         *periph_ref = 0;
3600         }
3601         xpt_unlock_buses();
3602         if (target_ref) {
3603                 if (path->target)
3604                         *target_ref = path->target->refcount;
3605                 else
3606                         *target_ref = 0;
3607         }
3608         if (device_ref) {
3609                 if (path->device)
3610                         *device_ref = path->device->refcount;
3611                 else
3612                         *device_ref = 0;
3613         }
3614 }
3615
3616 /*
3617  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3618  * in path1, 2 for match with wildcards in path2.
3619  */
3620 int
3621 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3622 {
3623         int retval = 0;
3624
3625         if (path1->bus != path2->bus) {
3626                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
3627                         retval = 1;
3628                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3629                         retval = 2;
3630                 else
3631                         return (-1);
3632         }
3633         if (path1->target != path2->target) {
3634                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3635                         if (retval == 0)
3636                                 retval = 1;
3637                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3638                         retval = 2;
3639                 else
3640                         return (-1);
3641         }
3642         if (path1->device != path2->device) {
3643                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3644                         if (retval == 0)
3645                                 retval = 1;
3646                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3647                         retval = 2;
3648                 else
3649                         return (-1);
3650         }
3651         return (retval);
3652 }
3653
3654 int
3655 xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev)
3656 {
3657         int retval = 0;
3658
3659         if (path->bus != dev->target->bus) {
3660                 if (path->bus->path_id == CAM_BUS_WILDCARD)
3661                         retval = 1;
3662                 else if (dev->target->bus->path_id == CAM_BUS_WILDCARD)
3663                         retval = 2;
3664                 else
3665                         return (-1);
3666         }
3667         if (path->target != dev->target) {
3668                 if (path->target->target_id == CAM_TARGET_WILDCARD) {
3669                         if (retval == 0)
3670                                 retval = 1;
3671                 } else if (dev->target->target_id == CAM_TARGET_WILDCARD)
3672                         retval = 2;
3673                 else
3674                         return (-1);
3675         }
3676         if (path->device != dev) {
3677                 if (path->device->lun_id == CAM_LUN_WILDCARD) {
3678                         if (retval == 0)
3679                                 retval = 1;
3680                 } else if (dev->lun_id == CAM_LUN_WILDCARD)
3681                         retval = 2;
3682                 else
3683                         return (-1);
3684         }
3685         return (retval);
3686 }
3687
3688 void
3689 xpt_print_path(struct cam_path *path)
3690 {
3691
3692         if (path == NULL)
3693                 printf("(nopath): ");
3694         else {
3695                 if (path->periph != NULL)
3696                         printf("(%s%d:", path->periph->periph_name,
3697                                path->periph->unit_number);
3698                 else
3699                         printf("(noperiph:");
3700
3701                 if (path->bus != NULL)
3702                         printf("%s%d:%d:", path->bus->sim->sim_name,
3703                                path->bus->sim->unit_number,
3704                                path->bus->sim->bus_id);
3705                 else
3706                         printf("nobus:");
3707
3708                 if (path->target != NULL)
3709                         printf("%d:", path->target->target_id);
3710                 else
3711                         printf("X:");
3712
3713                 if (path->device != NULL)
3714                         printf("%jx): ", (uintmax_t)path->device->lun_id);
3715                 else
3716                         printf("X): ");
3717         }
3718 }
3719
3720 void
3721 xpt_print_device(struct cam_ed *device)
3722 {
3723
3724         if (device == NULL)
3725                 printf("(nopath): ");
3726         else {
3727                 printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name,
3728                        device->sim->unit_number,
3729                        device->sim->bus_id,
3730                        device->target->target_id,
3731                        (uintmax_t)device->lun_id);
3732         }
3733 }
3734
3735 void
3736 xpt_print(struct cam_path *path, const char *fmt, ...)
3737 {
3738         va_list ap;
3739         xpt_print_path(path);
3740         va_start(ap, fmt);
3741         vprintf(fmt, ap);
3742         va_end(ap);
3743 }
3744
3745 int
3746 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
3747 {
3748         struct sbuf sb;
3749
3750         sbuf_new(&sb, str, str_len, 0);
3751
3752         if (path == NULL)
3753                 sbuf_printf(&sb, "(nopath): ");
3754         else {
3755                 if (path->periph != NULL)
3756                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
3757                                     path->periph->unit_number);
3758                 else
3759                         sbuf_printf(&sb, "(noperiph:");
3760
3761                 if (path->bus != NULL)
3762                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
3763                                     path->bus->sim->unit_number,
3764                                     path->bus->sim->bus_id);
3765                 else
3766                         sbuf_printf(&sb, "nobus:");
3767
3768                 if (path->target != NULL)
3769                         sbuf_printf(&sb, "%d:", path->target->target_id);
3770                 else
3771                         sbuf_printf(&sb, "X:");
3772
3773                 if (path->device != NULL)
3774                         sbuf_printf(&sb, "%jx): ",
3775                             (uintmax_t)path->device->lun_id);
3776                 else
3777                         sbuf_printf(&sb, "X): ");
3778         }
3779         sbuf_finish(&sb);
3780
3781         return(sbuf_len(&sb));
3782 }
3783
3784 path_id_t
3785 xpt_path_path_id(struct cam_path *path)
3786 {
3787         return(path->bus->path_id);
3788 }
3789
3790 target_id_t
3791 xpt_path_target_id(struct cam_path *path)
3792 {
3793         if (path->target != NULL)
3794                 return (path->target->target_id);
3795         else
3796                 return (CAM_TARGET_WILDCARD);
3797 }
3798
3799 lun_id_t
3800 xpt_path_lun_id(struct cam_path *path)
3801 {
3802         if (path->device != NULL)
3803                 return (path->device->lun_id);
3804         else
3805                 return (CAM_LUN_WILDCARD);
3806 }
3807
3808 struct cam_sim *
3809 xpt_path_sim(struct cam_path *path)
3810 {
3811
3812         return (path->bus->sim);
3813 }
3814
3815 struct cam_periph*
3816 xpt_path_periph(struct cam_path *path)
3817 {
3818
3819         return (path->periph);
3820 }
3821
3822 /*
3823  * Release a CAM control block for the caller.  Remit the cost of the structure
3824  * to the device referenced by the path.  If the this device had no 'credits'
3825  * and peripheral drivers have registered async callbacks for this notification
3826  * call them now.
3827  */
3828 void
3829 xpt_release_ccb(union ccb *free_ccb)
3830 {
3831         struct   cam_ed *device;
3832         struct   cam_periph *periph;
3833
3834         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3835         xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED);
3836         device = free_ccb->ccb_h.path->device;
3837         periph = free_ccb->ccb_h.path->periph;
3838
3839         xpt_free_ccb(free_ccb);
3840         periph->periph_allocated--;
3841         cam_ccbq_release_opening(&device->ccbq);
3842         xpt_run_allocq(periph, 0);
3843 }
3844
3845 /* Functions accessed by SIM drivers */
3846
3847 static struct xpt_xport_ops xport_default_ops = {
3848         .alloc_device = xpt_alloc_device_default,
3849         .action = xpt_action_default,
3850         .async = xpt_dev_async_default,
3851 };
3852 static struct xpt_xport xport_default = {
3853         .xport = XPORT_UNKNOWN,
3854         .name = "unknown",
3855         .ops = &xport_default_ops,
3856 };
3857
3858 CAM_XPT_XPORT(xport_default);
3859
3860 /*
3861  * A sim structure, listing the SIM entry points and instance
3862  * identification info is passed to xpt_bus_register to hook the SIM
3863  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
3864  * for this new bus and places it in the array of busses and assigns
3865  * it a path_id.  The path_id may be influenced by "hard wiring"
3866  * information specified by the user.  Once interrupt services are
3867  * available, the bus will be probed.
3868  */
3869 int32_t
3870 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
3871 {
3872         struct cam_eb *new_bus;
3873         struct cam_eb *old_bus;
3874         struct ccb_pathinq cpi;
3875         struct cam_path *path;
3876         cam_status status;
3877
3878         sim->bus_id = bus;
3879         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
3880                                           M_CAMXPT, M_NOWAIT|M_ZERO);
3881         if (new_bus == NULL) {
3882                 /* Couldn't satisfy request */
3883                 return (CAM_RESRC_UNAVAIL);
3884         }
3885
3886         mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF);
3887         TAILQ_INIT(&new_bus->et_entries);
3888         cam_sim_hold(sim);
3889         new_bus->sim = sim;
3890         timevalclear(&new_bus->last_reset);
3891         new_bus->flags = 0;
3892         new_bus->refcount = 1;  /* Held until a bus_deregister event */
3893         new_bus->generation = 0;
3894
3895         xpt_lock_buses();
3896         sim->path_id = new_bus->path_id =
3897             xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
3898         old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3899         while (old_bus != NULL
3900             && old_bus->path_id < new_bus->path_id)
3901                 old_bus = TAILQ_NEXT(old_bus, links);
3902         if (old_bus != NULL)
3903                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
3904         else
3905                 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
3906         xsoftc.bus_generation++;
3907         xpt_unlock_buses();
3908
3909         /*
3910          * Set a default transport so that a PATH_INQ can be issued to
3911          * the SIM.  This will then allow for probing and attaching of
3912          * a more appropriate transport.
3913          */
3914         new_bus->xport = &xport_default;
3915
3916         status = xpt_create_path(&path, /*periph*/NULL, sim->path_id,
3917                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3918         if (status != CAM_REQ_CMP) {
3919                 xpt_release_bus(new_bus);
3920                 return (CAM_RESRC_UNAVAIL);
3921         }
3922
3923         xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
3924         cpi.ccb_h.func_code = XPT_PATH_INQ;
3925         xpt_action((union ccb *)&cpi);
3926
3927         if (cpi.ccb_h.status == CAM_REQ_CMP) {
3928                 struct xpt_xport **xpt;
3929
3930                 SET_FOREACH(xpt, cam_xpt_xport_set) {
3931                         if ((*xpt)->xport == cpi.transport) {
3932                                 new_bus->xport = *xpt;
3933                                 break;
3934                         }
3935                 }
3936                 if (new_bus->xport == NULL) {
3937                         xpt_print_path(path);
3938                         printf("No transport found for %d\n", cpi.transport);
3939                         xpt_release_bus(new_bus);
3940                         free(path, M_CAMXPT);
3941                         return (CAM_RESRC_UNAVAIL);
3942                 }
3943         }
3944
3945         /* Notify interested parties */
3946         if (sim->path_id != CAM_XPT_PATH_ID) {
3947
3948                 xpt_async(AC_PATH_REGISTERED, path, &cpi);
3949                 if ((cpi.hba_misc & PIM_NOSCAN) == 0) {
3950                         union   ccb *scan_ccb;
3951
3952                         /* Initiate bus rescan. */
3953                         scan_ccb = xpt_alloc_ccb_nowait();
3954                         if (scan_ccb != NULL) {
3955                                 scan_ccb->ccb_h.path = path;
3956                                 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
3957                                 scan_ccb->crcn.flags = 0;
3958                                 xpt_rescan(scan_ccb);
3959                         } else {
3960                                 xpt_print(path,
3961                                           "Can't allocate CCB to scan bus\n");
3962                                 xpt_free_path(path);
3963                         }
3964                 } else
3965                         xpt_free_path(path);
3966         } else
3967                 xpt_free_path(path);
3968         return (CAM_SUCCESS);
3969 }
3970
3971 int32_t
3972 xpt_bus_deregister(path_id_t pathid)
3973 {
3974         struct cam_path bus_path;
3975         cam_status status;
3976
3977         status = xpt_compile_path(&bus_path, NULL, pathid,
3978                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3979         if (status != CAM_REQ_CMP)
3980                 return (status);
3981
3982         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
3983         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
3984
3985         /* Release the reference count held while registered. */
3986         xpt_release_bus(bus_path.bus);
3987         xpt_release_path(&bus_path);
3988
3989         return (CAM_REQ_CMP);
3990 }
3991
3992 static path_id_t
3993 xptnextfreepathid(void)
3994 {
3995         struct cam_eb *bus;
3996         path_id_t pathid;
3997         const char *strval;
3998
3999         mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4000         pathid = 0;
4001         bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4002 retry:
4003         /* Find an unoccupied pathid */
4004         while (bus != NULL && bus->path_id <= pathid) {
4005                 if (bus->path_id == pathid)
4006                         pathid++;
4007                 bus = TAILQ_NEXT(bus, links);
4008         }
4009
4010         /*
4011          * Ensure that this pathid is not reserved for
4012          * a bus that may be registered in the future.
4013          */
4014         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4015                 ++pathid;
4016                 /* Start the search over */
4017                 goto retry;
4018         }
4019         return (pathid);
4020 }
4021
4022 static path_id_t
4023 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4024 {
4025         path_id_t pathid;
4026         int i, dunit, val;
4027         char buf[32];
4028         const char *dname;
4029
4030         pathid = CAM_XPT_PATH_ID;
4031         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4032         if (strcmp(buf, "xpt0") == 0 && sim_bus == 0)
4033                 return (pathid);
4034         i = 0;
4035         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4036                 if (strcmp(dname, "scbus")) {
4037                         /* Avoid a bit of foot shooting. */
4038                         continue;
4039                 }
4040                 if (dunit < 0)          /* unwired?! */
4041                         continue;
4042                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4043                         if (sim_bus == val) {
4044                                 pathid = dunit;
4045                                 break;
4046                         }
4047                 } else if (sim_bus == 0) {
4048                         /* Unspecified matches bus 0 */
4049                         pathid = dunit;
4050                         break;
4051                 } else {
4052                         printf("Ambiguous scbus configuration for %s%d "
4053                                "bus %d, cannot wire down.  The kernel "
4054                                "config entry for scbus%d should "
4055                                "specify a controller bus.\n"
4056                                "Scbus will be assigned dynamically.\n",
4057                                sim_name, sim_unit, sim_bus, dunit);
4058                         break;
4059                 }
4060         }
4061
4062         if (pathid == CAM_XPT_PATH_ID)
4063                 pathid = xptnextfreepathid();
4064         return (pathid);
4065 }
4066
4067 static const char *
4068 xpt_async_string(u_int32_t async_code)
4069 {
4070
4071         switch (async_code) {
4072         case AC_BUS_RESET: return ("AC_BUS_RESET");
4073         case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
4074         case AC_SCSI_AEN: return ("AC_SCSI_AEN");
4075         case AC_SENT_BDR: return ("AC_SENT_BDR");
4076         case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
4077         case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
4078         case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
4079         case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
4080         case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
4081         case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
4082         case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
4083         case AC_CONTRACT: return ("AC_CONTRACT");
4084         case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED");
4085         case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION");
4086         }
4087         return ("AC_UNKNOWN");
4088 }
4089
4090 static int
4091 xpt_async_size(u_int32_t async_code)
4092 {
4093
4094         switch (async_code) {
4095         case AC_BUS_RESET: return (0);
4096         case AC_UNSOL_RESEL: return (0);
4097         case AC_SCSI_AEN: return (0);
4098         case AC_SENT_BDR: return (0);
4099         case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq));
4100         case AC_PATH_DEREGISTERED: return (0);
4101         case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev));
4102         case AC_LOST_DEVICE: return (0);
4103         case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings));
4104         case AC_INQ_CHANGED: return (0);
4105         case AC_GETDEV_CHANGED: return (0);
4106         case AC_CONTRACT: return (sizeof(struct ac_contract));
4107         case AC_ADVINFO_CHANGED: return (-1);
4108         case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio));
4109         }
4110         return (0);
4111 }
4112
4113 static int
4114 xpt_async_process_dev(struct cam_ed *device, void *arg)
4115 {
4116         union ccb *ccb = arg;
4117         struct cam_path *path = ccb->ccb_h.path;
4118         void *async_arg = ccb->casync.async_arg_ptr;
4119         u_int32_t async_code = ccb->casync.async_code;
4120         int relock;
4121
4122         if (path->device != device
4123          && path->device->lun_id != CAM_LUN_WILDCARD
4124          && device->lun_id != CAM_LUN_WILDCARD)
4125                 return (1);
4126
4127         /*
4128          * The async callback could free the device.
4129          * If it is a broadcast async, it doesn't hold
4130          * device reference, so take our own reference.
4131          */
4132         xpt_acquire_device(device);
4133
4134         /*
4135          * If async for specific device is to be delivered to
4136          * the wildcard client, take the specific device lock.
4137          * XXX: We may need a way for client to specify it.
4138          */
4139         if ((device->lun_id == CAM_LUN_WILDCARD &&
4140              path->device->lun_id != CAM_LUN_WILDCARD) ||
4141             (device->target->target_id == CAM_TARGET_WILDCARD &&
4142              path->target->target_id != CAM_TARGET_WILDCARD) ||
4143             (device->target->bus->path_id == CAM_BUS_WILDCARD &&
4144              path->target->bus->path_id != CAM_BUS_WILDCARD)) {
4145                 mtx_unlock(&device->device_mtx);
4146                 xpt_path_lock(path);
4147                 relock = 1;
4148         } else
4149                 relock = 0;
4150
4151         (*(device->target->bus->xport->ops->async))(async_code,
4152             device->target->bus, device->target, device, async_arg);
4153         xpt_async_bcast(&device->asyncs, async_code, path, async_arg);
4154
4155         if (relock) {
4156                 xpt_path_unlock(path);
4157                 mtx_lock(&device->device_mtx);
4158         }
4159         xpt_release_device(device);
4160         return (1);
4161 }
4162
4163 static int
4164 xpt_async_process_tgt(struct cam_et *target, void *arg)
4165 {
4166         union ccb *ccb = arg;
4167         struct cam_path *path = ccb->ccb_h.path;
4168
4169         if (path->target != target
4170          && path->target->target_id != CAM_TARGET_WILDCARD
4171          && target->target_id != CAM_TARGET_WILDCARD)
4172                 return (1);
4173
4174         if (ccb->casync.async_code == AC_SENT_BDR) {
4175                 /* Update our notion of when the last reset occurred */
4176                 microtime(&target->last_reset);
4177         }
4178
4179         return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb));
4180 }
4181
4182 static void
4183 xpt_async_process(struct cam_periph *periph, union ccb *ccb)
4184 {
4185         struct cam_eb *bus;
4186         struct cam_path *path;
4187         void *async_arg;
4188         u_int32_t async_code;
4189
4190         path = ccb->ccb_h.path;
4191         async_code = ccb->casync.async_code;
4192         async_arg = ccb->casync.async_arg_ptr;
4193         CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
4194             ("xpt_async(%s)\n", xpt_async_string(async_code)));
4195         bus = path->bus;
4196
4197         if (async_code == AC_BUS_RESET) {
4198                 /* Update our notion of when the last reset occurred */
4199                 microtime(&bus->last_reset);
4200         }
4201
4202         xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb);
4203
4204         /*
4205          * If this wasn't a fully wildcarded async, tell all
4206          * clients that want all async events.
4207          */
4208         if (bus != xpt_periph->path->bus) {
4209                 xpt_path_lock(xpt_periph->path);
4210                 xpt_async_process_dev(xpt_periph->path->device, ccb);
4211                 xpt_path_unlock(xpt_periph->path);
4212         }
4213
4214         if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4215                 xpt_release_devq(path, 1, TRUE);
4216         else
4217                 xpt_release_simq(path->bus->sim, TRUE);
4218         if (ccb->casync.async_arg_size > 0)
4219                 free(async_arg, M_CAMXPT);
4220         xpt_free_path(path);
4221         xpt_free_ccb(ccb);
4222 }
4223
4224 static void
4225 xpt_async_bcast(struct async_list *async_head,
4226                 u_int32_t async_code,
4227                 struct cam_path *path, void *async_arg)
4228 {
4229         struct async_node *cur_entry;
4230         struct mtx *mtx;
4231
4232         cur_entry = SLIST_FIRST(async_head);
4233         while (cur_entry != NULL) {
4234                 struct async_node *next_entry;
4235                 /*
4236                  * Grab the next list entry before we call the current
4237                  * entry's callback.  This is because the callback function
4238                  * can delete its async callback entry.
4239                  */
4240                 next_entry = SLIST_NEXT(cur_entry, links);
4241                 if ((cur_entry->event_enable & async_code) != 0) {
4242                         mtx = cur_entry->event_lock ?
4243                             path->device->sim->mtx : NULL;
4244                         if (mtx)
4245                                 mtx_lock(mtx);
4246                         cur_entry->callback(cur_entry->callback_arg,
4247                                             async_code, path,
4248                                             async_arg);
4249                         if (mtx)
4250                                 mtx_unlock(mtx);
4251                 }
4252                 cur_entry = next_entry;
4253         }
4254 }
4255
4256 void
4257 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4258 {
4259         union ccb *ccb;
4260         int size;
4261
4262         ccb = xpt_alloc_ccb_nowait();
4263         if (ccb == NULL) {
4264                 xpt_print(path, "Can't allocate CCB to send %s\n",
4265                     xpt_async_string(async_code));
4266                 return;
4267         }
4268
4269         if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) {
4270                 xpt_print(path, "Can't allocate path to send %s\n",
4271                     xpt_async_string(async_code));
4272                 xpt_free_ccb(ccb);
4273                 return;
4274         }
4275         ccb->ccb_h.path->periph = NULL;
4276         ccb->ccb_h.func_code = XPT_ASYNC;
4277         ccb->ccb_h.cbfcnp = xpt_async_process;
4278         ccb->ccb_h.flags |= CAM_UNLOCKED;
4279         ccb->casync.async_code = async_code;
4280         ccb->casync.async_arg_size = 0;
4281         size = xpt_async_size(async_code);
4282         CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
4283             ("xpt_async: func %#x %s aync_code %d %s\n",
4284                 ccb->ccb_h.func_code,
4285                 xpt_action_name(ccb->ccb_h.func_code),
4286                 async_code,
4287                 xpt_async_string(async_code)));
4288         if (size > 0 && async_arg != NULL) {
4289                 ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT);
4290                 if (ccb->casync.async_arg_ptr == NULL) {
4291                         xpt_print(path, "Can't allocate argument to send %s\n",
4292                             xpt_async_string(async_code));
4293                         xpt_free_path(ccb->ccb_h.path);
4294                         xpt_free_ccb(ccb);
4295                         return;
4296                 }
4297                 memcpy(ccb->casync.async_arg_ptr, async_arg, size);
4298                 ccb->casync.async_arg_size = size;
4299         } else if (size < 0) {
4300                 ccb->casync.async_arg_ptr = async_arg;
4301                 ccb->casync.async_arg_size = size;
4302         }
4303         if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4304                 xpt_freeze_devq(path, 1);
4305         else
4306                 xpt_freeze_simq(path->bus->sim, 1);
4307         xpt_done(ccb);
4308 }
4309
4310 static void
4311 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
4312                       struct cam_et *target, struct cam_ed *device,
4313                       void *async_arg)
4314 {
4315
4316         /*
4317          * We only need to handle events for real devices.
4318          */
4319         if (target->target_id == CAM_TARGET_WILDCARD
4320          || device->lun_id == CAM_LUN_WILDCARD)
4321                 return;
4322
4323         printf("%s called\n", __func__);
4324 }
4325
4326 static uint32_t
4327 xpt_freeze_devq_device(struct cam_ed *dev, u_int count)
4328 {
4329         struct cam_devq *devq;
4330         uint32_t freeze;
4331
4332         devq = dev->sim->devq;
4333         mtx_assert(&devq->send_mtx, MA_OWNED);
4334         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4335             ("xpt_freeze_devq_device(%d) %u->%u\n", count,
4336             dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count));
4337         freeze = (dev->ccbq.queue.qfrozen_cnt += count);
4338         /* Remove frozen device from sendq. */
4339         if (device_is_queued(dev))
4340                 camq_remove(&devq->send_queue, dev->devq_entry.index);
4341         return (freeze);
4342 }
4343
4344 u_int32_t
4345 xpt_freeze_devq(struct cam_path *path, u_int count)
4346 {
4347         struct cam_ed   *dev = path->device;
4348         struct cam_devq *devq;
4349         uint32_t         freeze;
4350
4351         devq = dev->sim->devq;
4352         mtx_lock(&devq->send_mtx);
4353         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count));
4354         freeze = xpt_freeze_devq_device(dev, count);
4355         mtx_unlock(&devq->send_mtx);
4356         return (freeze);
4357 }
4358
4359 u_int32_t
4360 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4361 {
4362         struct cam_devq *devq;
4363         uint32_t         freeze;
4364
4365         devq = sim->devq;
4366         mtx_lock(&devq->send_mtx);
4367         freeze = (devq->send_queue.qfrozen_cnt += count);
4368         mtx_unlock(&devq->send_mtx);
4369         return (freeze);
4370 }
4371
4372 static void
4373 xpt_release_devq_timeout(void *arg)
4374 {
4375         struct cam_ed *dev;
4376         struct cam_devq *devq;
4377
4378         dev = (struct cam_ed *)arg;
4379         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n"));
4380         devq = dev->sim->devq;
4381         mtx_assert(&devq->send_mtx, MA_OWNED);
4382         if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE))
4383                 xpt_run_devq(devq);
4384 }
4385
4386 void
4387 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4388 {
4389         struct cam_ed *dev;
4390         struct cam_devq *devq;
4391
4392         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n",
4393             count, run_queue));
4394         dev = path->device;
4395         devq = dev->sim->devq;
4396         mtx_lock(&devq->send_mtx);
4397         if (xpt_release_devq_device(dev, count, run_queue))
4398                 xpt_run_devq(dev->sim->devq);
4399         mtx_unlock(&devq->send_mtx);
4400 }
4401
4402 static int
4403 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4404 {
4405
4406         mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED);
4407         CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4408             ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue,
4409             dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count));
4410         if (count > dev->ccbq.queue.qfrozen_cnt) {
4411 #ifdef INVARIANTS
4412                 printf("xpt_release_devq(): requested %u > present %u\n",
4413                     count, dev->ccbq.queue.qfrozen_cnt);
4414 #endif
4415                 count = dev->ccbq.queue.qfrozen_cnt;
4416         }
4417         dev->ccbq.queue.qfrozen_cnt -= count;
4418         if (dev->ccbq.queue.qfrozen_cnt == 0) {
4419                 /*
4420                  * No longer need to wait for a successful
4421                  * command completion.
4422                  */
4423                 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4424                 /*
4425                  * Remove any timeouts that might be scheduled
4426                  * to release this queue.
4427                  */
4428                 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4429                         callout_stop(&dev->callout);
4430                         dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4431                 }
4432                 /*
4433                  * Now that we are unfrozen schedule the
4434                  * device so any pending transactions are
4435                  * run.
4436                  */
4437                 xpt_schedule_devq(dev->sim->devq, dev);
4438         } else
4439                 run_queue = 0;
4440         return (run_queue);
4441 }
4442
4443 void
4444 xpt_release_simq(struct cam_sim *sim, int run_queue)
4445 {
4446         struct cam_devq *devq;
4447
4448         devq = sim->devq;
4449         mtx_lock(&devq->send_mtx);
4450         if (devq->send_queue.qfrozen_cnt <= 0) {
4451 #ifdef INVARIANTS
4452                 printf("xpt_release_simq: requested 1 > present %u\n",
4453                     devq->send_queue.qfrozen_cnt);
4454 #endif
4455         } else
4456                 devq->send_queue.qfrozen_cnt--;
4457         if (devq->send_queue.qfrozen_cnt == 0) {
4458                 /*
4459                  * If there is a timeout scheduled to release this
4460                  * sim queue, remove it.  The queue frozen count is
4461                  * already at 0.
4462                  */
4463                 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4464                         callout_stop(&sim->callout);
4465                         sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4466                 }
4467                 if (run_queue) {
4468                         /*
4469                          * Now that we are unfrozen run the send queue.
4470                          */
4471                         xpt_run_devq(sim->devq);
4472                 }
4473         }
4474         mtx_unlock(&devq->send_mtx);
4475 }
4476
4477 /*
4478  * XXX Appears to be unused.
4479  */
4480 static void
4481 xpt_release_simq_timeout(void *arg)
4482 {
4483         struct cam_sim *sim;
4484
4485         sim = (struct cam_sim *)arg;
4486         xpt_release_simq(sim, /* run_queue */ TRUE);
4487 }
4488
4489 void
4490 xpt_done(union ccb *done_ccb)
4491 {
4492         struct cam_doneq *queue;
4493         int     run, hash;
4494
4495         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4496             ("xpt_done: func= %#x %s status %#x\n",
4497                 done_ccb->ccb_h.func_code,
4498                 xpt_action_name(done_ccb->ccb_h.func_code),
4499                 done_ccb->ccb_h.status));
4500         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4501                 return;
4502
4503         /* Store the time the ccb was in the sim */
4504         done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data;
4505         hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id +
4506             done_ccb->ccb_h.target_lun) % cam_num_doneqs;
4507         queue = &cam_doneqs[hash];
4508         mtx_lock(&queue->cam_doneq_mtx);
4509         run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq));
4510         STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe);
4511         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4512         mtx_unlock(&queue->cam_doneq_mtx);
4513         if (run)
4514                 wakeup(&queue->cam_doneq);
4515 }
4516
4517 void
4518 xpt_done_direct(union ccb *done_ccb)
4519 {
4520
4521         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4522             ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status));
4523         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4524                 return;
4525
4526         /* Store the time the ccb was in the sim */
4527         done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data;
4528         xpt_done_process(&done_ccb->ccb_h);
4529 }
4530
4531 union ccb *
4532 xpt_alloc_ccb()
4533 {
4534         union ccb *new_ccb;
4535
4536         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4537         return (new_ccb);
4538 }
4539
4540 union ccb *
4541 xpt_alloc_ccb_nowait()
4542 {
4543         union ccb *new_ccb;
4544
4545         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4546         return (new_ccb);
4547 }
4548
4549 void
4550 xpt_free_ccb(union ccb *free_ccb)
4551 {
4552         free(free_ccb, M_CAMCCB);
4553 }
4554
4555
4556
4557 /* Private XPT functions */
4558
4559 /*
4560  * Get a CAM control block for the caller. Charge the structure to the device
4561  * referenced by the path.  If we don't have sufficient resources to allocate
4562  * more ccbs, we return NULL.
4563  */
4564 static union ccb *
4565 xpt_get_ccb_nowait(struct cam_periph *periph)
4566 {
4567         union ccb *new_ccb;
4568
4569         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4570         if (new_ccb == NULL)
4571                 return (NULL);
4572         periph->periph_allocated++;
4573         cam_ccbq_take_opening(&periph->path->device->ccbq);
4574         return (new_ccb);
4575 }
4576
4577 static union ccb *
4578 xpt_get_ccb(struct cam_periph *periph)
4579 {
4580         union ccb *new_ccb;
4581
4582         cam_periph_unlock(periph);
4583         new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4584         cam_periph_lock(periph);
4585         periph->periph_allocated++;
4586         cam_ccbq_take_opening(&periph->path->device->ccbq);
4587         return (new_ccb);
4588 }
4589
4590 union ccb *
4591 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
4592 {
4593         struct ccb_hdr *ccb_h;
4594
4595         CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n"));
4596         cam_periph_assert(periph, MA_OWNED);
4597         while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL ||
4598             ccb_h->pinfo.priority != priority) {
4599                 if (priority < periph->immediate_priority) {
4600                         periph->immediate_priority = priority;
4601                         xpt_run_allocq(periph, 0);
4602                 } else
4603                         cam_periph_sleep(periph, &periph->ccb_list, PRIBIO,
4604                             "cgticb", 0);
4605         }
4606         SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
4607         return ((union ccb *)ccb_h);
4608 }
4609
4610 static void
4611 xpt_acquire_bus(struct cam_eb *bus)
4612 {
4613
4614         xpt_lock_buses();
4615         bus->refcount++;
4616         xpt_unlock_buses();
4617 }
4618
4619 static void
4620 xpt_release_bus(struct cam_eb *bus)
4621 {
4622
4623         xpt_lock_buses();
4624         KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
4625         if (--bus->refcount > 0) {
4626                 xpt_unlock_buses();
4627                 return;
4628         }
4629         TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4630         xsoftc.bus_generation++;
4631         xpt_unlock_buses();
4632         KASSERT(TAILQ_EMPTY(&bus->et_entries),
4633             ("destroying bus, but target list is not empty"));
4634         cam_sim_release(bus->sim);
4635         mtx_destroy(&bus->eb_mtx);
4636         free(bus, M_CAMXPT);
4637 }
4638
4639 static struct cam_et *
4640 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4641 {
4642         struct cam_et *cur_target, *target;
4643
4644         mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4645         mtx_assert(&bus->eb_mtx, MA_OWNED);
4646         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
4647                                          M_NOWAIT|M_ZERO);
4648         if (target == NULL)
4649                 return (NULL);
4650
4651         TAILQ_INIT(&target->ed_entries);
4652         target->bus = bus;
4653         target->target_id = target_id;
4654         target->refcount = 1;
4655         target->generation = 0;
4656         target->luns = NULL;
4657         mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF);
4658         timevalclear(&target->last_reset);
4659         /*
4660          * Hold a reference to our parent bus so it
4661          * will not go away before we do.
4662          */
4663         bus->refcount++;
4664
4665         /* Insertion sort into our bus's target list */
4666         cur_target = TAILQ_FIRST(&bus->et_entries);
4667         while (cur_target != NULL && cur_target->target_id < target_id)
4668                 cur_target = TAILQ_NEXT(cur_target, links);
4669         if (cur_target != NULL) {
4670                 TAILQ_INSERT_BEFORE(cur_target, target, links);
4671         } else {
4672                 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4673         }
4674         bus->generation++;
4675         return (target);
4676 }
4677
4678 static void
4679 xpt_acquire_target(struct cam_et *target)
4680 {
4681         struct cam_eb *bus = target->bus;
4682
4683         mtx_lock(&bus->eb_mtx);
4684         target->refcount++;
4685         mtx_unlock(&bus->eb_mtx);
4686 }
4687
4688 static void
4689 xpt_release_target(struct cam_et *target)
4690 {
4691         struct cam_eb *bus = target->bus;
4692
4693         mtx_lock(&bus->eb_mtx);
4694         if (--target->refcount > 0) {
4695                 mtx_unlock(&bus->eb_mtx);
4696                 return;
4697         }
4698         TAILQ_REMOVE(&bus->et_entries, target, links);
4699         bus->generation++;
4700         mtx_unlock(&bus->eb_mtx);
4701         KASSERT(TAILQ_EMPTY(&target->ed_entries),
4702             ("destroying target, but device list is not empty"));
4703         xpt_release_bus(bus);
4704         mtx_destroy(&target->luns_mtx);
4705         if (target->luns)
4706                 free(target->luns, M_CAMXPT);
4707         free(target, M_CAMXPT);
4708 }
4709
4710 static struct cam_ed *
4711 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
4712                          lun_id_t lun_id)
4713 {
4714         struct cam_ed *device;
4715
4716         device = xpt_alloc_device(bus, target, lun_id);
4717         if (device == NULL)
4718                 return (NULL);
4719
4720         device->mintags = 1;
4721         device->maxtags = 1;
4722         return (device);
4723 }
4724
4725 static void
4726 xpt_destroy_device(void *context, int pending)
4727 {
4728         struct cam_ed   *device = context;
4729
4730         mtx_lock(&device->device_mtx);
4731         mtx_destroy(&device->device_mtx);
4732         free(device, M_CAMDEV);
4733 }
4734
4735 struct cam_ed *
4736 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4737 {
4738         struct cam_ed   *cur_device, *device;
4739         struct cam_devq *devq;
4740         cam_status status;
4741
4742         mtx_assert(&bus->eb_mtx, MA_OWNED);
4743         /* Make space for us in the device queue on our bus */
4744         devq = bus->sim->devq;
4745         mtx_lock(&devq->send_mtx);
4746         status = cam_devq_resize(devq, devq->send_queue.array_size + 1);
4747         mtx_unlock(&devq->send_mtx);
4748         if (status != CAM_REQ_CMP)
4749                 return (NULL);
4750
4751         device = (struct cam_ed *)malloc(sizeof(*device),
4752                                          M_CAMDEV, M_NOWAIT|M_ZERO);
4753         if (device == NULL)
4754                 return (NULL);
4755
4756         cam_init_pinfo(&device->devq_entry);
4757         device->target = target;
4758         device->lun_id = lun_id;
4759         device->sim = bus->sim;
4760         if (cam_ccbq_init(&device->ccbq,
4761                           bus->sim->max_dev_openings) != 0) {
4762                 free(device, M_CAMDEV);
4763                 return (NULL);
4764         }
4765         SLIST_INIT(&device->asyncs);
4766         SLIST_INIT(&device->periphs);
4767         device->generation = 0;
4768         device->flags = CAM_DEV_UNCONFIGURED;
4769         device->tag_delay_count = 0;
4770         device->tag_saved_openings = 0;
4771         device->refcount = 1;
4772         mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF);
4773         callout_init_mtx(&device->callout, &devq->send_mtx, 0);
4774         TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device);
4775         /*
4776          * Hold a reference to our parent bus so it
4777          * will not go away before we do.
4778          */
4779         target->refcount++;
4780
4781         cur_device = TAILQ_FIRST(&target->ed_entries);
4782         while (cur_device != NULL && cur_device->lun_id < lun_id)
4783                 cur_device = TAILQ_NEXT(cur_device, links);
4784         if (cur_device != NULL)
4785                 TAILQ_INSERT_BEFORE(cur_device, device, links);
4786         else
4787                 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4788         target->generation++;
4789         return (device);
4790 }
4791
4792 void
4793 xpt_acquire_device(struct cam_ed *device)
4794 {
4795         struct cam_eb *bus = device->target->bus;
4796
4797         mtx_lock(&bus->eb_mtx);
4798         device->refcount++;
4799         mtx_unlock(&bus->eb_mtx);
4800 }
4801
4802 void
4803 xpt_release_device(struct cam_ed *device)
4804 {
4805         struct cam_eb *bus = device->target->bus;
4806         struct cam_devq *devq;
4807
4808         mtx_lock(&bus->eb_mtx);
4809         if (--device->refcount > 0) {
4810                 mtx_unlock(&bus->eb_mtx);
4811                 return;
4812         }
4813
4814         TAILQ_REMOVE(&device->target->ed_entries, device,links);
4815         device->target->generation++;
4816         mtx_unlock(&bus->eb_mtx);
4817
4818         /* Release our slot in the devq */
4819         devq = bus->sim->devq;
4820         mtx_lock(&devq->send_mtx);
4821         cam_devq_resize(devq, devq->send_queue.array_size - 1);
4822         mtx_unlock(&devq->send_mtx);
4823
4824         KASSERT(SLIST_EMPTY(&device->periphs),
4825             ("destroying device, but periphs list is not empty"));
4826         KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX,
4827             ("destroying device while still queued for ccbs"));
4828
4829         if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
4830                 callout_stop(&device->callout);
4831
4832         xpt_release_target(device->target);
4833
4834         cam_ccbq_fini(&device->ccbq);
4835         /*
4836          * Free allocated memory.  free(9) does nothing if the
4837          * supplied pointer is NULL, so it is safe to call without
4838          * checking.
4839          */
4840         free(device->supported_vpds, M_CAMXPT);
4841         free(device->device_id, M_CAMXPT);
4842         free(device->ext_inq, M_CAMXPT);
4843         free(device->physpath, M_CAMXPT);
4844         free(device->rcap_buf, M_CAMXPT);
4845         free(device->serial_num, M_CAMXPT);
4846         taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task);
4847 }
4848
4849 u_int32_t
4850 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4851 {
4852         int     result;
4853         struct  cam_ed *dev;
4854
4855         dev = path->device;
4856         mtx_lock(&dev->sim->devq->send_mtx);
4857         result = cam_ccbq_resize(&dev->ccbq, newopenings);
4858         mtx_unlock(&dev->sim->devq->send_mtx);
4859         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
4860          || (dev->inq_flags & SID_CmdQue) != 0)
4861                 dev->tag_saved_openings = newopenings;
4862         return (result);
4863 }
4864
4865 static struct cam_eb *
4866 xpt_find_bus(path_id_t path_id)
4867 {
4868         struct cam_eb *bus;
4869
4870         xpt_lock_buses();
4871         for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4872              bus != NULL;
4873              bus = TAILQ_NEXT(bus, links)) {
4874                 if (bus->path_id == path_id) {
4875                         bus->refcount++;
4876                         break;
4877                 }
4878         }
4879         xpt_unlock_buses();
4880         return (bus);
4881 }
4882
4883 static struct cam_et *
4884 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
4885 {
4886         struct cam_et *target;
4887
4888         mtx_assert(&bus->eb_mtx, MA_OWNED);
4889         for (target = TAILQ_FIRST(&bus->et_entries);
4890              target != NULL;
4891              target = TAILQ_NEXT(target, links)) {
4892                 if (target->target_id == target_id) {
4893                         target->refcount++;
4894                         break;
4895                 }
4896         }
4897         return (target);
4898 }
4899
4900 static struct cam_ed *
4901 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4902 {
4903         struct cam_ed *device;
4904
4905         mtx_assert(&target->bus->eb_mtx, MA_OWNED);
4906         for (device = TAILQ_FIRST(&target->ed_entries);
4907              device != NULL;
4908              device = TAILQ_NEXT(device, links)) {
4909                 if (device->lun_id == lun_id) {
4910                         device->refcount++;
4911                         break;
4912                 }
4913         }
4914         return (device);
4915 }
4916
4917 void
4918 xpt_start_tags(struct cam_path *path)
4919 {
4920         struct ccb_relsim crs;
4921         struct cam_ed *device;
4922         struct cam_sim *sim;
4923         int    newopenings;
4924
4925         device = path->device;
4926         sim = path->bus->sim;
4927         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4928         xpt_freeze_devq(path, /*count*/1);
4929         device->inq_flags |= SID_CmdQue;
4930         if (device->tag_saved_openings != 0)
4931                 newopenings = device->tag_saved_openings;
4932         else
4933                 newopenings = min(device->maxtags,
4934                                   sim->max_tagged_dev_openings);
4935         xpt_dev_ccbq_resize(path, newopenings);
4936         xpt_async(AC_GETDEV_CHANGED, path, NULL);
4937         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4938         crs.ccb_h.func_code = XPT_REL_SIMQ;
4939         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4940         crs.openings
4941             = crs.release_timeout
4942             = crs.qfrozen_cnt
4943             = 0;
4944         xpt_action((union ccb *)&crs);
4945 }
4946
4947 void
4948 xpt_stop_tags(struct cam_path *path)
4949 {
4950         struct ccb_relsim crs;
4951         struct cam_ed *device;
4952         struct cam_sim *sim;
4953
4954         device = path->device;
4955         sim = path->bus->sim;
4956         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4957         device->tag_delay_count = 0;
4958         xpt_freeze_devq(path, /*count*/1);
4959         device->inq_flags &= ~SID_CmdQue;
4960         xpt_dev_ccbq_resize(path, sim->max_dev_openings);
4961         xpt_async(AC_GETDEV_CHANGED, path, NULL);
4962         xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4963         crs.ccb_h.func_code = XPT_REL_SIMQ;
4964         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4965         crs.openings
4966             = crs.release_timeout
4967             = crs.qfrozen_cnt
4968             = 0;
4969         xpt_action((union ccb *)&crs);
4970 }
4971
4972 static void
4973 xpt_boot_delay(void *arg)
4974 {
4975
4976         xpt_release_boot();
4977 }
4978
4979 static void
4980 xpt_config(void *arg)
4981 {
4982         /*
4983          * Now that interrupts are enabled, go find our devices
4984          */
4985         if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq"))
4986                 printf("xpt_config: failed to create taskqueue thread.\n");
4987
4988         /* Setup debugging path */
4989         if (cam_dflags != CAM_DEBUG_NONE) {
4990                 if (xpt_create_path(&cam_dpath, NULL,
4991                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
4992                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
4993                         printf("xpt_config: xpt_create_path() failed for debug"
4994                                " target %d:%d:%d, debugging disabled\n",
4995                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
4996                         cam_dflags = CAM_DEBUG_NONE;
4997                 }
4998         } else
4999                 cam_dpath = NULL;
5000
5001         periphdriver_init(1);
5002         xpt_hold_boot();
5003         callout_init(&xsoftc.boot_callout, 1);
5004         callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0,
5005             xpt_boot_delay, NULL, 0);
5006         /* Fire up rescan thread. */
5007         if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0,
5008             "cam", "scanner")) {
5009                 printf("xpt_config: failed to create rescan thread.\n");
5010         }
5011 }
5012
5013 void
5014 xpt_hold_boot(void)
5015 {
5016         xpt_lock_buses();
5017         xsoftc.buses_to_config++;
5018         xpt_unlock_buses();
5019 }
5020
5021 void
5022 xpt_release_boot(void)
5023 {
5024         xpt_lock_buses();
5025         xsoftc.buses_to_config--;
5026         if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) {
5027                 struct  xpt_task *task;
5028
5029                 xsoftc.buses_config_done = 1;
5030                 xpt_unlock_buses();
5031                 /* Call manually because we don't have any busses */
5032                 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
5033                 if (task != NULL) {
5034                         TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
5035                         taskqueue_enqueue(taskqueue_thread, &task->task);
5036                 }
5037         } else
5038                 xpt_unlock_buses();
5039 }
5040
5041 /*
5042  * If the given device only has one peripheral attached to it, and if that
5043  * peripheral is the passthrough driver, announce it.  This insures that the
5044  * user sees some sort of announcement for every peripheral in their system.
5045  */
5046 static int
5047 xptpassannouncefunc(struct cam_ed *device, void *arg)
5048 {
5049         struct cam_periph *periph;
5050         int i;
5051
5052         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
5053              periph = SLIST_NEXT(periph, periph_links), i++);
5054
5055         periph = SLIST_FIRST(&device->periphs);
5056         if ((i == 1)
5057          && (strncmp(periph->periph_name, "pass", 4) == 0))
5058                 xpt_announce_periph(periph, NULL);
5059
5060         return(1);
5061 }
5062
5063 static void
5064 xpt_finishconfig_task(void *context, int pending)
5065 {
5066
5067         periphdriver_init(2);
5068         /*
5069          * Check for devices with no "standard" peripheral driver
5070          * attached.  For any devices like that, announce the
5071          * passthrough driver so the user will see something.
5072          */
5073         if (!bootverbose)
5074                 xpt_for_all_devices(xptpassannouncefunc, NULL);
5075
5076         /* Release our hook so that the boot can continue. */
5077         config_intrhook_disestablish(xsoftc.xpt_config_hook);
5078         free(xsoftc.xpt_config_hook, M_CAMXPT);
5079         xsoftc.xpt_config_hook = NULL;
5080
5081         free(context, M_CAMXPT);
5082 }
5083
5084 cam_status
5085 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
5086                    struct cam_path *path)
5087 {
5088         struct ccb_setasync csa;
5089         cam_status status;
5090         int xptpath = 0;
5091
5092         if (path == NULL) {
5093                 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
5094                                          CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
5095                 if (status != CAM_REQ_CMP)
5096                         return (status);
5097                 xpt_path_lock(path);
5098                 xptpath = 1;
5099         }
5100
5101         xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
5102         csa.ccb_h.func_code = XPT_SASYNC_CB;
5103         csa.event_enable = event;
5104         csa.callback = cbfunc;
5105         csa.callback_arg = cbarg;
5106         xpt_action((union ccb *)&csa);
5107         status = csa.ccb_h.status;
5108
5109         CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE,
5110             ("xpt_register_async: func %p\n", cbfunc));
5111
5112         if (xptpath) {
5113                 xpt_path_unlock(path);
5114                 xpt_free_path(path);
5115         }
5116
5117         if ((status == CAM_REQ_CMP) &&
5118             (csa.event_enable & AC_FOUND_DEVICE)) {
5119                 /*
5120                  * Get this peripheral up to date with all
5121                  * the currently existing devices.
5122                  */
5123                 xpt_for_all_devices(xptsetasyncfunc, &csa);
5124         }
5125         if ((status == CAM_REQ_CMP) &&
5126             (csa.event_enable & AC_PATH_REGISTERED)) {
5127                 /*
5128                  * Get this peripheral up to date with all
5129                  * the currently existing busses.
5130                  */
5131                 xpt_for_all_busses(xptsetasyncbusfunc, &csa);
5132         }
5133
5134         return (status);
5135 }
5136
5137 static void
5138 xptaction(struct cam_sim *sim, union ccb *work_ccb)
5139 {
5140         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
5141
5142         switch (work_ccb->ccb_h.func_code) {
5143         /* Common cases first */
5144         case XPT_PATH_INQ:              /* Path routing inquiry */
5145         {
5146                 struct ccb_pathinq *cpi;
5147
5148                 cpi = &work_ccb->cpi;
5149                 cpi->version_num = 1; /* XXX??? */
5150                 cpi->hba_inquiry = 0;
5151                 cpi->target_sprt = 0;
5152                 cpi->hba_misc = 0;
5153                 cpi->hba_eng_cnt = 0;
5154                 cpi->max_target = 0;
5155                 cpi->max_lun = 0;
5156                 cpi->initiator_id = 0;
5157                 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
5158                 strlcpy(cpi->hba_vid, "", HBA_IDLEN);
5159                 strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
5160                 cpi->unit_number = sim->unit_number;
5161                 cpi->bus_id = sim->bus_id;
5162                 cpi->base_transfer_speed = 0;
5163                 cpi->protocol = PROTO_UNSPECIFIED;
5164                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
5165                 cpi->transport = XPORT_UNSPECIFIED;
5166                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
5167                 cpi->ccb_h.status = CAM_REQ_CMP;
5168                 xpt_done(work_ccb);
5169                 break;
5170         }
5171         default:
5172                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
5173                 xpt_done(work_ccb);
5174                 break;
5175         }
5176 }
5177
5178 /*
5179  * The xpt as a "controller" has no interrupt sources, so polling
5180  * is a no-op.
5181  */
5182 static void
5183 xptpoll(struct cam_sim *sim)
5184 {
5185 }
5186
5187 void
5188 xpt_lock_buses(void)
5189 {
5190         mtx_lock(&xsoftc.xpt_topo_lock);
5191 }
5192
5193 void
5194 xpt_unlock_buses(void)
5195 {
5196         mtx_unlock(&xsoftc.xpt_topo_lock);
5197 }
5198
5199 struct mtx *
5200 xpt_path_mtx(struct cam_path *path)
5201 {
5202
5203         return (&path->device->device_mtx);
5204 }
5205
5206 static void
5207 xpt_done_process(struct ccb_hdr *ccb_h)
5208 {
5209         struct cam_sim *sim;
5210         struct cam_devq *devq;
5211         struct mtx *mtx = NULL;
5212
5213         if (ccb_h->flags & CAM_HIGH_POWER) {
5214                 struct highpowerlist    *hphead;
5215                 struct cam_ed           *device;
5216
5217                 mtx_lock(&xsoftc.xpt_highpower_lock);
5218                 hphead = &xsoftc.highpowerq;
5219
5220                 device = STAILQ_FIRST(hphead);
5221
5222                 /*
5223                  * Increment the count since this command is done.
5224                  */
5225                 xsoftc.num_highpower++;
5226
5227                 /*
5228                  * Any high powered commands queued up?
5229                  */
5230                 if (device != NULL) {
5231
5232                         STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
5233                         mtx_unlock(&xsoftc.xpt_highpower_lock);
5234
5235                         mtx_lock(&device->sim->devq->send_mtx);
5236                         xpt_release_devq_device(device,
5237                                          /*count*/1, /*runqueue*/TRUE);
5238                         mtx_unlock(&device->sim->devq->send_mtx);
5239                 } else
5240                         mtx_unlock(&xsoftc.xpt_highpower_lock);
5241         }
5242
5243         sim = ccb_h->path->bus->sim;
5244
5245         if (ccb_h->status & CAM_RELEASE_SIMQ) {
5246                 xpt_release_simq(sim, /*run_queue*/FALSE);
5247                 ccb_h->status &= ~CAM_RELEASE_SIMQ;
5248         }
5249
5250         if ((ccb_h->flags & CAM_DEV_QFRZDIS)
5251          && (ccb_h->status & CAM_DEV_QFRZN)) {
5252                 xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE);
5253                 ccb_h->status &= ~CAM_DEV_QFRZN;
5254         }
5255
5256         devq = sim->devq;
5257         if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
5258                 struct cam_ed *dev = ccb_h->path->device;
5259
5260                 mtx_lock(&devq->send_mtx);
5261                 devq->send_active--;
5262                 devq->send_openings++;
5263                 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
5264
5265                 if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
5266                   && (dev->ccbq.dev_active == 0))) {
5267                         dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
5268                         xpt_release_devq_device(dev, /*count*/1,
5269                                          /*run_queue*/FALSE);
5270                 }
5271
5272                 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
5273                   && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
5274                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
5275                         xpt_release_devq_device(dev, /*count*/1,
5276                                          /*run_queue*/FALSE);
5277                 }
5278
5279                 if (!device_is_queued(dev))
5280                         (void)xpt_schedule_devq(devq, dev);
5281                 xpt_run_devq(devq);
5282                 mtx_unlock(&devq->send_mtx);
5283
5284                 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) {
5285                         mtx = xpt_path_mtx(ccb_h->path);
5286                         mtx_lock(mtx);
5287
5288                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5289                          && (--dev->tag_delay_count == 0))
5290                                 xpt_start_tags(ccb_h->path);
5291                 }
5292         }
5293
5294         if ((ccb_h->flags & CAM_UNLOCKED) == 0) {
5295                 if (mtx == NULL) {
5296                         mtx = xpt_path_mtx(ccb_h->path);
5297                         mtx_lock(mtx);
5298                 }
5299         } else {
5300                 if (mtx != NULL) {
5301                         mtx_unlock(mtx);
5302                         mtx = NULL;
5303                 }
5304         }
5305
5306         /* Call the peripheral driver's callback */
5307         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
5308         (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
5309         if (mtx != NULL)
5310                 mtx_unlock(mtx);
5311 }
5312
5313 void
5314 xpt_done_td(void *arg)
5315 {
5316         struct cam_doneq *queue = arg;
5317         struct ccb_hdr *ccb_h;
5318         STAILQ_HEAD(, ccb_hdr)  doneq;
5319
5320         STAILQ_INIT(&doneq);
5321         mtx_lock(&queue->cam_doneq_mtx);
5322         while (1) {
5323                 while (STAILQ_EMPTY(&queue->cam_doneq)) {
5324                         queue->cam_doneq_sleep = 1;
5325                         msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
5326                             PRIBIO, "-", 0);
5327                         queue->cam_doneq_sleep = 0;
5328                 }
5329                 STAILQ_CONCAT(&doneq, &queue->cam_doneq);
5330                 mtx_unlock(&queue->cam_doneq_mtx);
5331
5332                 THREAD_NO_SLEEPING();
5333                 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
5334                         STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
5335                         xpt_done_process(ccb_h);
5336                 }
5337                 THREAD_SLEEPING_OK();
5338
5339                 mtx_lock(&queue->cam_doneq_mtx);
5340         }
5341 }
5342
5343 static void
5344 camisr_runqueue(void)
5345 {
5346         struct  ccb_hdr *ccb_h;
5347         struct cam_doneq *queue;
5348         int i;
5349
5350         /* Process global queues. */
5351         for (i = 0; i < cam_num_doneqs; i++) {
5352                 queue = &cam_doneqs[i];
5353                 mtx_lock(&queue->cam_doneq_mtx);
5354                 while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) {
5355                         STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe);
5356                         mtx_unlock(&queue->cam_doneq_mtx);
5357                         xpt_done_process(ccb_h);
5358                         mtx_lock(&queue->cam_doneq_mtx);
5359                 }
5360                 mtx_unlock(&queue->cam_doneq_mtx);
5361         }
5362 }
5363
5364 struct kv 
5365 {
5366         uint32_t v;
5367         const char *name;
5368 };
5369
5370 static struct kv map[] = {
5371         { XPT_NOOP, "XPT_NOOP" },
5372         { XPT_SCSI_IO, "XPT_SCSI_IO" },
5373         { XPT_GDEV_TYPE, "XPT_GDEV_TYPE" },
5374         { XPT_GDEVLIST, "XPT_GDEVLIST" },
5375         { XPT_PATH_INQ, "XPT_PATH_INQ" },
5376         { XPT_REL_SIMQ, "XPT_REL_SIMQ" },
5377         { XPT_SASYNC_CB, "XPT_SASYNC_CB" },
5378         { XPT_SDEV_TYPE, "XPT_SDEV_TYPE" },
5379         { XPT_SCAN_BUS, "XPT_SCAN_BUS" },
5380         { XPT_DEV_MATCH, "XPT_DEV_MATCH" },
5381         { XPT_DEBUG, "XPT_DEBUG" },
5382         { XPT_PATH_STATS, "XPT_PATH_STATS" },
5383         { XPT_GDEV_STATS, "XPT_GDEV_STATS" },
5384         { XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" },
5385         { XPT_ASYNC, "XPT_ASYNC" },
5386         { XPT_ABORT, "XPT_ABORT" },
5387         { XPT_RESET_BUS, "XPT_RESET_BUS" },
5388         { XPT_RESET_DEV, "XPT_RESET_DEV" },
5389         { XPT_TERM_IO, "XPT_TERM_IO" },
5390         { XPT_SCAN_LUN, "XPT_SCAN_LUN" },
5391         { XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" },
5392         { XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" },
5393         { XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" },
5394         { XPT_ATA_IO, "XPT_ATA_IO" },
5395         { XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" },
5396         { XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" },
5397         { XPT_NVME_IO, "XPT_NVME_IO" },
5398         { XPT_MMCSD_IO, "XPT_MMCSD_IO" },
5399         { XPT_SMP_IO, "XPT_SMP_IO" },
5400         { XPT_SCAN_TGT, "XPT_SCAN_TGT" },
5401         { XPT_NVME_ADMIN, "XPT_NVME_ADMIN" },
5402         { XPT_ENG_INQ, "XPT_ENG_INQ" },
5403         { XPT_ENG_EXEC, "XPT_ENG_EXEC" },
5404         { XPT_EN_LUN, "XPT_EN_LUN" },
5405         { XPT_TARGET_IO, "XPT_TARGET_IO" },
5406         { XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" },
5407         { XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" },
5408         { XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" },
5409         { XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" },
5410         { XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" },
5411         { XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" },
5412         { 0, 0 }
5413 };
5414
5415 static const char *
5416 xpt_action_name(uint32_t action) 
5417 {
5418         static char buffer[32]; /* Only for unknown messages -- racy */
5419         struct kv *walker = map;
5420
5421         while (walker->name != NULL) {
5422                 if (walker->v == action)
5423                         return (walker->name);
5424                 walker++;
5425         }
5426
5427         snprintf(buffer, sizeof(buffer), "%#x", action);
5428         return (buffer);
5429 }