]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/cam/cam_xpt.c
Inline cam_periph_lock|unlock to make debugging easier. Use more
[FreeBSD/FreeBSD.git] / sys / cam / cam_xpt.c
1 /*-
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/systm.h>
36 #include <sys/types.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/time.h>
40 #include <sys/conf.h>
41 #include <sys/fcntl.h>
42 #include <sys/md5.h>
43 #include <sys/interrupt.h>
44 #include <sys/sbuf.h>
45 #include <sys/taskqueue.h>
46
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/sysctl.h>
50 #include <sys/kthread.h>
51
52 #ifdef PC98
53 #include <pc98/pc98/pc98_machdep.h>     /* geometry translation */
54 #endif
55
56 #include <cam/cam.h>
57 #include <cam/cam_ccb.h>
58 #include <cam/cam_periph.h>
59 #include <cam/cam_sim.h>
60 #include <cam/cam_xpt.h>
61 #include <cam/cam_xpt_sim.h>
62 #include <cam/cam_xpt_periph.h>
63 #include <cam/cam_debug.h>
64
65 #include <cam/scsi/scsi_all.h>
66 #include <cam/scsi/scsi_message.h>
67 #include <cam/scsi/scsi_pass.h>
68 #include <machine/stdarg.h>     /* for xpt_print below */
69 #include "opt_cam.h"
70
71 /* Datastructures internal to the xpt layer */
72 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
73
74 /* Object for defering XPT actions to a taskqueue */
75 struct xpt_task {
76         struct task     task;
77         void            *data1;
78         uintptr_t       data2;
79 };
80
81 /*
82  * Definition of an async handler callback block.  These are used to add
83  * SIMs and peripherals to the async callback lists.
84  */
85 struct async_node {
86         SLIST_ENTRY(async_node) links;
87         u_int32_t       event_enable;   /* Async Event enables */
88         void            (*callback)(void *arg, u_int32_t code,
89                                     struct cam_path *path, void *args);
90         void            *callback_arg;
91 };
92
93 SLIST_HEAD(async_list, async_node);
94 SLIST_HEAD(periph_list, cam_periph);
95
96 /*
97  * This is the maximum number of high powered commands (e.g. start unit)
98  * that can be outstanding at a particular time.
99  */
100 #ifndef CAM_MAX_HIGHPOWER
101 #define CAM_MAX_HIGHPOWER  4
102 #endif
103
104 /*
105  * Structure for queueing a device in a run queue.
106  * There is one run queue for allocating new ccbs,
107  * and another for sending ccbs to the controller.
108  */
109 struct cam_ed_qinfo {
110         cam_pinfo pinfo;
111         struct    cam_ed *device;
112 };
113
114 /*
115  * The CAM EDT (Existing Device Table) contains the device information for
116  * all devices for all busses in the system.  The table contains a
117  * cam_ed structure for each device on the bus.
118  */
119 struct cam_ed {
120         TAILQ_ENTRY(cam_ed) links;
121         struct  cam_ed_qinfo alloc_ccb_entry;
122         struct  cam_ed_qinfo send_ccb_entry;
123         struct  cam_et   *target;
124         struct  cam_sim  *sim;
125         lun_id_t         lun_id;
126         struct  camq drvq;              /*
127                                          * Queue of type drivers wanting to do
128                                          * work on this device.
129                                          */
130         struct  cam_ccbq ccbq;          /* Queue of pending ccbs */
131         struct  async_list asyncs;      /* Async callback info for this B/T/L */
132         struct  periph_list periphs;    /* All attached devices */
133         u_int   generation;             /* Generation number */
134         struct  cam_periph *owner;      /* Peripheral driver's ownership tag */
135         struct  xpt_quirk_entry *quirk; /* Oddities about this device */
136                                         /* Storage for the inquiry data */
137         cam_proto        protocol;
138         u_int            protocol_version;
139         cam_xport        transport;
140         u_int            transport_version;
141         struct           scsi_inquiry_data inq_data;
142         u_int8_t         inq_flags;     /*
143                                          * Current settings for inquiry flags.
144                                          * This allows us to override settings
145                                          * like disconnection and tagged
146                                          * queuing for a device.
147                                          */
148         u_int8_t         queue_flags;   /* Queue flags from the control page */
149         u_int8_t         serial_num_len;
150         u_int8_t        *serial_num;
151         u_int32_t        qfrozen_cnt;
152         u_int32_t        flags;
153 #define CAM_DEV_UNCONFIGURED            0x01
154 #define CAM_DEV_REL_TIMEOUT_PENDING     0x02
155 #define CAM_DEV_REL_ON_COMPLETE         0x04
156 #define CAM_DEV_REL_ON_QUEUE_EMPTY      0x08
157 #define CAM_DEV_RESIZE_QUEUE_NEEDED     0x10
158 #define CAM_DEV_TAG_AFTER_COUNT         0x20
159 #define CAM_DEV_INQUIRY_DATA_VALID      0x40
160 #define CAM_DEV_IN_DV                   0x80
161 #define CAM_DEV_DV_HIT_BOTTOM           0x100
162         u_int32_t        tag_delay_count;
163 #define CAM_TAG_DELAY_COUNT             5
164         u_int32_t        tag_saved_openings;
165         u_int32_t        refcount;
166         struct callout   callout;
167 };
168
169 /*
170  * Each target is represented by an ET (Existing Target).  These
171  * entries are created when a target is successfully probed with an
172  * identify, and removed when a device fails to respond after a number
173  * of retries, or a bus rescan finds the device missing.
174  */
175 struct cam_et { 
176         TAILQ_HEAD(, cam_ed) ed_entries;
177         TAILQ_ENTRY(cam_et) links;
178         struct  cam_eb  *bus;   
179         target_id_t     target_id;
180         u_int32_t       refcount;       
181         u_int           generation;
182         struct          timeval last_reset;
183 };
184
185 /*
186  * Each bus is represented by an EB (Existing Bus).  These entries
187  * are created by calls to xpt_bus_register and deleted by calls to
188  * xpt_bus_deregister.
189  */
190 struct cam_eb { 
191         TAILQ_HEAD(, cam_et) et_entries;
192         TAILQ_ENTRY(cam_eb)  links;
193         path_id_t            path_id;
194         struct cam_sim       *sim;
195         struct timeval       last_reset;
196         u_int32_t            flags;
197 #define CAM_EB_RUNQ_SCHEDULED   0x01
198         u_int32_t            refcount;
199         u_int                generation;
200 };
201
202 struct cam_path {
203         struct cam_periph *periph;
204         struct cam_eb     *bus;
205         struct cam_et     *target;
206         struct cam_ed     *device;
207 };
208
209 struct xpt_quirk_entry {
210         struct scsi_inquiry_pattern inq_pat;
211         u_int8_t quirks;
212 #define CAM_QUIRK_NOLUNS        0x01
213 #define CAM_QUIRK_NOSERIAL      0x02
214 #define CAM_QUIRK_HILUNS        0x04
215 #define CAM_QUIRK_NOHILUNS      0x08
216         u_int mintags;
217         u_int maxtags;
218 };
219
220 static int cam_srch_hi = 0;
221 TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
222 static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
223 SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
224     sysctl_cam_search_luns, "I",
225     "allow search above LUN 7 for SCSI3 and greater devices");
226
227 #define CAM_SCSI2_MAXLUN        8
228 /*
229  * If we're not quirked to search <= the first 8 luns
230  * and we are either quirked to search above lun 8,
231  * or we're > SCSI-2 and we've enabled hilun searching,
232  * or we're > SCSI-2 and the last lun was a success,
233  * we can look for luns above lun 8.
234  */
235 #define CAN_SRCH_HI_SPARSE(dv)                          \
236   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0)      \
237   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)            \
238   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
239
240 #define CAN_SRCH_HI_DENSE(dv)                           \
241   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0)      \
242   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)            \
243   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
244
245 typedef enum {
246         XPT_FLAG_OPEN           = 0x01
247 } xpt_flags;
248
249 struct xpt_softc {
250         xpt_flags               flags;
251         u_int32_t               xpt_generation;
252
253         /* number of high powered commands that can go through right now */
254         STAILQ_HEAD(highpowerlist, ccb_hdr)     highpowerq;
255         int                     num_highpower;
256
257         /* queue for handling async rescan requests. */
258         TAILQ_HEAD(, ccb_hdr) ccb_scanq;
259
260         /* Registered busses */
261         TAILQ_HEAD(,cam_eb)     xpt_busses;
262         u_int                   bus_generation;
263
264         struct intr_config_hook *xpt_config_hook;
265
266         struct mtx              xpt_topo_lock;
267         struct mtx              xpt_lock;
268 };
269
270 static const char quantum[] = "QUANTUM";
271 static const char sony[] = "SONY";
272 static const char west_digital[] = "WDIGTL";
273 static const char samsung[] = "SAMSUNG";
274 static const char seagate[] = "SEAGATE";
275 static const char microp[] = "MICROP";
276
277 static struct xpt_quirk_entry xpt_quirk_table[] = 
278 {
279         {
280                 /* Reports QUEUE FULL for temporary resource shortages */
281                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
282                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
283         },
284         {
285                 /* Reports QUEUE FULL for temporary resource shortages */
286                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
287                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
288         },
289         {
290                 /* Reports QUEUE FULL for temporary resource shortages */
291                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
292                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
293         },
294         {
295                 /* Broken tagged queuing drive */
296                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
297                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
298         },
299         {
300                 /* Broken tagged queuing drive */
301                 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
302                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
303         },
304         {
305                 /* Broken tagged queuing drive */
306                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
307                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
308         },
309         {
310                 /*
311                  * Unfortunately, the Quantum Atlas III has the same
312                  * problem as the Atlas II drives above.
313                  * Reported by: "Johan Granlund" <johan@granlund.nu>
314                  *
315                  * For future reference, the drive with the problem was:
316                  * QUANTUM QM39100TD-SW N1B0
317                  * 
318                  * It's possible that Quantum will fix the problem in later
319                  * firmware revisions.  If that happens, the quirk entry
320                  * will need to be made specific to the firmware revisions
321                  * with the problem.
322                  * 
323                  */
324                 /* Reports QUEUE FULL for temporary resource shortages */
325                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
326                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
327         },
328         {
329                 /*
330                  * 18 Gig Atlas III, same problem as the 9G version.
331                  * Reported by: Andre Albsmeier
332                  *              <andre.albsmeier@mchp.siemens.de>
333                  *
334                  * For future reference, the drive with the problem was:
335                  * QUANTUM QM318000TD-S N491
336                  */
337                 /* Reports QUEUE FULL for temporary resource shortages */
338                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
339                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
340         },
341         {
342                 /*
343                  * Broken tagged queuing drive
344                  * Reported by: Bret Ford <bford@uop.cs.uop.edu>
345                  *         and: Martin Renters <martin@tdc.on.ca>
346                  */
347                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
348                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
349         },
350                 /*
351                  * The Seagate Medalist Pro drives have very poor write
352                  * performance with anything more than 2 tags.
353                  * 
354                  * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
355                  * Drive:  <SEAGATE ST36530N 1444>
356                  *
357                  * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
358                  * Drive:  <SEAGATE ST34520W 1281>
359                  *
360                  * No one has actually reported that the 9G version
361                  * (ST39140*) of the Medalist Pro has the same problem, but
362                  * we're assuming that it does because the 4G and 6.5G
363                  * versions of the drive are broken.
364                  */
365         {
366                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
367                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
368         },
369         {
370                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
371                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
372         },
373         {
374                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
375                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
376         },
377         {
378                 /*
379                  * Slow when tagged queueing is enabled.  Write performance
380                  * steadily drops off with more and more concurrent
381                  * transactions.  Best sequential write performance with
382                  * tagged queueing turned off and write caching turned on.
383                  *
384                  * PR:  kern/10398
385                  * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
386                  * Drive:  DCAS-34330 w/ "S65A" firmware.
387                  *
388                  * The drive with the problem had the "S65A" firmware
389                  * revision, and has also been reported (by Stephen J.
390                  * Roznowski <sjr@home.net>) for a drive with the "S61A"
391                  * firmware revision.
392                  *
393                  * Although no one has reported problems with the 2 gig
394                  * version of the DCAS drive, the assumption is that it
395                  * has the same problems as the 4 gig version.  Therefore
396                  * this quirk entries disables tagged queueing for all
397                  * DCAS drives.
398                  */
399                 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
400                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
401         },
402         {
403                 /* Broken tagged queuing drive */
404                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
405                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
406         },
407         {
408                 /* Broken tagged queuing drive */ 
409                 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
410                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
411         },
412         {
413                 /* This does not support other than LUN 0 */
414                 { T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" },
415                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
416         },
417         {
418                 /*
419                  * Broken tagged queuing drive.
420                  * Submitted by:
421                  * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
422                  * in PR kern/9535
423                  */
424                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
425                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
426         },
427         {
428                 /*
429                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
430                  * 8MB/sec.)
431                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
432                  * Best performance with these drives is achieved with
433                  * tagged queueing turned off, and write caching turned on.
434                  */
435                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
436                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
437         },
438         {
439                 /*
440                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
441                  * 8MB/sec.)
442                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
443                  * Best performance with these drives is achieved with
444                  * tagged queueing turned off, and write caching turned on.
445                  */
446                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
447                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
448         },
449         {
450                 /*
451                  * Doesn't handle queue full condition correctly,
452                  * so we need to limit maxtags to what the device
453                  * can handle instead of determining this automatically.
454                  */
455                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
456                 /*quirks*/0, /*mintags*/2, /*maxtags*/32
457         },
458         {
459                 /* Really only one LUN */
460                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
461                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
462         },
463         {
464                 /* I can't believe we need a quirk for DPT volumes. */
465                 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
466                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
467                 /*mintags*/0, /*maxtags*/255
468         },
469         {
470                 /*
471                  * Many Sony CDROM drives don't like multi-LUN probing.
472                  */
473                 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
474                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
475         },
476         {
477                 /*
478                  * This drive doesn't like multiple LUN probing.
479                  * Submitted by:  Parag Patel <parag@cgt.com>
480                  */
481                 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
482                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
483         },
484         {
485                 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
486                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
487         },
488         {
489                 /*
490                  * The 8200 doesn't like multi-lun probing, and probably
491                  * don't like serial number requests either.
492                  */
493                 {
494                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
495                         "EXB-8200*", "*"
496                 },
497                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
498         },
499         {
500                 /*
501                  * Let's try the same as above, but for a drive that says
502                  * it's an IPL-6860 but is actually an EXB 8200.
503                  */
504                 {
505                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
506                         "IPL-6860*", "*"
507                 },
508                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
509         },
510         {
511                 /*
512                  * These Hitachi drives don't like multi-lun probing.
513                  * The PR submitter has a DK319H, but says that the Linux
514                  * kernel has a similar work-around for the DK312 and DK314,
515                  * so all DK31* drives are quirked here.
516                  * PR:            misc/18793
517                  * Submitted by:  Paul Haddad <paul@pth.com>
518                  */
519                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
520                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
521         },
522         {
523                 /*
524                  * The Hitachi CJ series with J8A8 firmware apparantly has
525                  * problems with tagged commands.
526                  * PR: 23536
527                  * Reported by: amagai@nue.org
528                  */
529                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
530                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
531         },
532         {
533                 /*
534                  * These are the large storage arrays.
535                  * Submitted by:  William Carrel <william.carrel@infospace.com>
536                  */
537                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
538                 CAM_QUIRK_HILUNS, 2, 1024
539         },
540         {
541                 /*
542                  * This old revision of the TDC3600 is also SCSI-1, and
543                  * hangs upon serial number probing.
544                  */
545                 {
546                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
547                         " TDC 3600", "U07:"
548                 },
549                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
550         },
551         {
552                 /*
553                  * Maxtor Personal Storage 3000XT (Firewire)
554                  * hangs upon serial number probing.
555                  */
556                 {
557                         T_DIRECT, SIP_MEDIA_FIXED, "Maxtor",
558                         "1394 storage", "*"
559                 },
560                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
561         },
562         {
563                 /*
564                  * Would repond to all LUNs if asked for.
565                  */
566                 {
567                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
568                         "CP150", "*"
569                 },
570                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
571         },
572         {
573                 /*
574                  * Would repond to all LUNs if asked for.
575                  */
576                 {
577                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
578                         "96X2*", "*"
579                 },
580                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
581         },
582         {
583                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
584                 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
585                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
586         },
587         {
588                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
589                 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
590                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
591         },
592         {
593                 /* TeraSolutions special settings for TRC-22 RAID */
594                 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
595                   /*quirks*/0, /*mintags*/55, /*maxtags*/255
596         },
597         {
598                 /* Veritas Storage Appliance */
599                 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
600                   CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
601         },
602         {
603                 /*
604                  * Would respond to all LUNs.  Device type and removable
605                  * flag are jumper-selectable.
606                  */
607                 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
608                   "Tahiti 1", "*"
609                 },
610                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
611         },
612         {
613                 /* EasyRAID E5A aka. areca ARC-6010 */
614                 { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" },
615                   CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255
616         },
617         {
618                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" },
619                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
620         },
621         {
622                 /* Default tagged queuing parameters for all devices */
623                 {
624                   T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
625                   /*vendor*/"*", /*product*/"*", /*revision*/"*"
626                 },
627                 /*quirks*/0, /*mintags*/2, /*maxtags*/255
628         },
629 };
630
631 static const int xpt_quirk_table_size =
632         sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
633
634 typedef enum {
635         DM_RET_COPY             = 0x01,
636         DM_RET_FLAG_MASK        = 0x0f,
637         DM_RET_NONE             = 0x00,
638         DM_RET_STOP             = 0x10,
639         DM_RET_DESCEND          = 0x20,
640         DM_RET_ERROR            = 0x30,
641         DM_RET_ACTION_MASK      = 0xf0
642 } dev_match_ret;
643
644 typedef enum {
645         XPT_DEPTH_BUS,
646         XPT_DEPTH_TARGET,
647         XPT_DEPTH_DEVICE,
648         XPT_DEPTH_PERIPH
649 } xpt_traverse_depth;
650
651 struct xpt_traverse_config {
652         xpt_traverse_depth      depth;
653         void                    *tr_func;
654         void                    *tr_arg;
655 };
656
657 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
658 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
659 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
660 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
661 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
662
663 /* Transport layer configuration information */
664 static struct xpt_softc xsoftc;
665
666 /* Queues for our software interrupt handler */
667 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
668 typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
669 static cam_simq_t cam_simq;
670 static struct mtx cam_simq_lock;
671
672 /* Pointers to software interrupt handlers */
673 static void *cambio_ih;
674
675 struct cam_periph *xpt_periph;
676
677 static periph_init_t xpt_periph_init;
678
679 static periph_init_t probe_periph_init;
680
681 static struct periph_driver xpt_driver =
682 {
683         xpt_periph_init, "xpt",
684         TAILQ_HEAD_INITIALIZER(xpt_driver.units)
685 };
686
687 static struct periph_driver probe_driver =
688 {
689         probe_periph_init, "probe",
690         TAILQ_HEAD_INITIALIZER(probe_driver.units)
691 };
692
693 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
694 PERIPHDRIVER_DECLARE(probe, probe_driver);
695
696
697 static d_open_t xptopen;
698 static d_close_t xptclose;
699 static d_ioctl_t xptioctl;
700
701 static struct cdevsw xpt_cdevsw = {
702         .d_version =    D_VERSION,
703         .d_flags =      0,
704         .d_open =       xptopen,
705         .d_close =      xptclose,
706         .d_ioctl =      xptioctl,
707         .d_name =       "xpt",
708 };
709
710
711 static void dead_sim_action(struct cam_sim *sim, union ccb *ccb);
712 static void dead_sim_poll(struct cam_sim *sim);
713
714 /* Dummy SIM that is used when the real one has gone. */
715 static struct cam_sim cam_dead_sim = {
716         .sim_action =   dead_sim_action,
717         .sim_poll =     dead_sim_poll,
718         .sim_name =     "dead_sim",
719 };
720
721 #define SIM_DEAD(sim)   ((sim) == &cam_dead_sim)
722
723
724 /* Storage for debugging datastructures */
725 #ifdef  CAMDEBUG
726 struct cam_path *cam_dpath;
727 u_int32_t cam_dflags;
728 u_int32_t cam_debug_delay;
729 #endif
730
731 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
732 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
733 #endif
734
735 /*
736  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
737  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
738  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
739  */
740 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
741     || defined(CAM_DEBUG_LUN)
742 #ifdef CAMDEBUG
743 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
744     || !defined(CAM_DEBUG_LUN)
745 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
746         and CAM_DEBUG_LUN"
747 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
748 #else /* !CAMDEBUG */
749 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
750 #endif /* CAMDEBUG */
751 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
752
753 /* Our boot-time initialization hook */
754 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
755
756 static moduledata_t cam_moduledata = {
757         "cam",
758         cam_module_event_handler,
759         NULL
760 };
761
762 static int      xpt_init(void *);
763
764 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
765 MODULE_VERSION(cam, 1);
766
767
768 static cam_status       xpt_compile_path(struct cam_path *new_path,
769                                          struct cam_periph *perph,
770                                          path_id_t path_id,
771                                          target_id_t target_id,
772                                          lun_id_t lun_id);
773
774 static void             xpt_release_path(struct cam_path *path);
775
776 static void             xpt_async_bcast(struct async_list *async_head,
777                                         u_int32_t async_code,
778                                         struct cam_path *path,
779                                         void *async_arg);
780 static void             xpt_dev_async(u_int32_t async_code,
781                                       struct cam_eb *bus,
782                                       struct cam_et *target,
783                                       struct cam_ed *device,
784                                       void *async_arg);
785 static path_id_t xptnextfreepathid(void);
786 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
787 static union ccb *xpt_get_ccb(struct cam_ed *device);
788 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
789                                   u_int32_t new_priority);
790 static void      xpt_run_dev_allocq(struct cam_eb *bus);
791 static void      xpt_run_dev_sendq(struct cam_eb *bus);
792 static timeout_t xpt_release_devq_timeout;
793 static void      xpt_release_simq_timeout(void *arg) __unused;
794 static void      xpt_release_bus(struct cam_eb *bus);
795 static void      xpt_release_devq_device(struct cam_ed *dev, u_int count,
796                                          int run_queue);
797 static struct cam_et*
798                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
799 static void      xpt_release_target(struct cam_eb *bus, struct cam_et *target);
800 static struct cam_ed*
801                  xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
802                                   lun_id_t lun_id);
803 static void      xpt_release_device(struct cam_eb *bus, struct cam_et *target,
804                                     struct cam_ed *device);
805 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
806 static struct cam_eb*
807                  xpt_find_bus(path_id_t path_id);
808 static struct cam_et*
809                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
810 static struct cam_ed*
811                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
812 static void      xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
813 static void      xpt_scan_lun(struct cam_periph *periph,
814                               struct cam_path *path, cam_flags flags,
815                               union ccb *ccb);
816 static void      xptscandone(struct cam_periph *periph, union ccb *done_ccb);
817 static xpt_busfunc_t    xptconfigbuscountfunc;
818 static xpt_busfunc_t    xptconfigfunc;
819 static void      xpt_config(void *arg);
820 static xpt_devicefunc_t xptpassannouncefunc;
821 static void      xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
822 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
823 static void      xptpoll(struct cam_sim *sim);
824 static void      camisr(void *);
825 static void      camisr_runqueue(void *);
826 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
827                                     u_int num_patterns, struct cam_eb *bus);
828 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
829                                        u_int num_patterns,
830                                        struct cam_ed *device);
831 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
832                                        u_int num_patterns,
833                                        struct cam_periph *periph);
834 static xpt_busfunc_t    xptedtbusfunc;
835 static xpt_targetfunc_t xptedttargetfunc;
836 static xpt_devicefunc_t xptedtdevicefunc;
837 static xpt_periphfunc_t xptedtperiphfunc;
838 static xpt_pdrvfunc_t   xptplistpdrvfunc;
839 static xpt_periphfunc_t xptplistperiphfunc;
840 static int              xptedtmatch(struct ccb_dev_match *cdm);
841 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
842 static int              xptbustraverse(struct cam_eb *start_bus,
843                                        xpt_busfunc_t *tr_func, void *arg);
844 static int              xpttargettraverse(struct cam_eb *bus,
845                                           struct cam_et *start_target,
846                                           xpt_targetfunc_t *tr_func, void *arg);
847 static int              xptdevicetraverse(struct cam_et *target,
848                                           struct cam_ed *start_device,
849                                           xpt_devicefunc_t *tr_func, void *arg);
850 static int              xptperiphtraverse(struct cam_ed *device,
851                                           struct cam_periph *start_periph,
852                                           xpt_periphfunc_t *tr_func, void *arg);
853 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
854                                         xpt_pdrvfunc_t *tr_func, void *arg);
855 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
856                                             struct cam_periph *start_periph,
857                                             xpt_periphfunc_t *tr_func,
858                                             void *arg);
859 static xpt_busfunc_t    xptdefbusfunc;
860 static xpt_targetfunc_t xptdeftargetfunc;
861 static xpt_devicefunc_t xptdefdevicefunc;
862 static xpt_periphfunc_t xptdefperiphfunc;
863 static int              xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
864 static int              xpt_for_all_devices(xpt_devicefunc_t *tr_func,
865                                             void *arg);
866 static xpt_devicefunc_t xptsetasyncfunc;
867 static xpt_busfunc_t    xptsetasyncbusfunc;
868 static cam_status       xptregister(struct cam_periph *periph,
869                                     void *arg);
870 static cam_status       proberegister(struct cam_periph *periph,
871                                       void *arg);
872 static void      probeschedule(struct cam_periph *probe_periph);
873 static void      probestart(struct cam_periph *periph, union ccb *start_ccb);
874 static void      proberequestdefaultnegotiation(struct cam_periph *periph);
875 static int       proberequestbackoff(struct cam_periph *periph,
876                                      struct cam_ed *device);
877 static void      probedone(struct cam_periph *periph, union ccb *done_ccb);
878 static void      probecleanup(struct cam_periph *periph);
879 static void      xpt_find_quirk(struct cam_ed *device);
880 static void      xpt_devise_transport(struct cam_path *path);
881 static void      xpt_set_transfer_settings(struct ccb_trans_settings *cts,
882                                            struct cam_ed *device,
883                                            int async_update);
884 static void      xpt_toggle_tags(struct cam_path *path);
885 static void      xpt_start_tags(struct cam_path *path);
886 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
887                                             struct cam_ed *dev);
888 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
889                                            struct cam_ed *dev);
890 static __inline int periph_is_queued(struct cam_periph *periph);
891 static __inline int device_is_alloc_queued(struct cam_ed *device);
892 static __inline int device_is_send_queued(struct cam_ed *device);
893 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
894
895 static __inline int
896 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
897 {
898         int retval;
899
900         if (dev->ccbq.devq_openings > 0) {
901                 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
902                         cam_ccbq_resize(&dev->ccbq,
903                                         dev->ccbq.dev_openings
904                                         + dev->ccbq.dev_active);
905                         dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
906                 }
907                 /*
908                  * The priority of a device waiting for CCB resources
909                  * is that of the the highest priority peripheral driver
910                  * enqueued.
911                  */
912                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
913                                           &dev->alloc_ccb_entry.pinfo,
914                                           CAMQ_GET_HEAD(&dev->drvq)->priority); 
915         } else {
916                 retval = 0;
917         }
918
919         return (retval);
920 }
921
922 static __inline int
923 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
924 {
925         int     retval;
926
927         if (dev->ccbq.dev_openings > 0) {
928                 /*
929                  * The priority of a device waiting for controller
930                  * resources is that of the the highest priority CCB
931                  * enqueued.
932                  */
933                 retval =
934                     xpt_schedule_dev(&bus->sim->devq->send_queue,
935                                      &dev->send_ccb_entry.pinfo,
936                                      CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
937         } else {
938                 retval = 0;
939         }
940         return (retval);
941 }
942
943 static __inline int
944 periph_is_queued(struct cam_periph *periph)
945 {
946         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
947 }
948
949 static __inline int
950 device_is_alloc_queued(struct cam_ed *device)
951 {
952         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
953 }
954
955 static __inline int
956 device_is_send_queued(struct cam_ed *device)
957 {
958         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
959 }
960
961 static __inline int
962 dev_allocq_is_runnable(struct cam_devq *devq)
963 {
964         /*
965          * Have work to do.
966          * Have space to do more work.
967          * Allowed to do work.
968          */
969         return ((devq->alloc_queue.qfrozen_cnt == 0)
970              && (devq->alloc_queue.entries > 0)
971              && (devq->alloc_openings > 0));
972 }
973
974 static void
975 xpt_periph_init()
976 {
977         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
978 }
979
980 static void
981 probe_periph_init()
982 {
983 }
984
985
986 static void
987 xptdone(struct cam_periph *periph, union ccb *done_ccb)
988 {
989         /* Caller will release the CCB */
990         wakeup(&done_ccb->ccb_h.cbfcnp);
991 }
992
993 static int
994 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
995 {
996
997         /*
998          * Only allow read-write access.
999          */
1000         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
1001                 return(EPERM);
1002
1003         /*
1004          * We don't allow nonblocking access.
1005          */
1006         if ((flags & O_NONBLOCK) != 0) {
1007                 printf("%s: can't do nonblocking access\n", devtoname(dev));
1008                 return(ENODEV);
1009         }
1010
1011         /* Mark ourselves open */
1012         mtx_lock(&xsoftc.xpt_lock);
1013         xsoftc.flags |= XPT_FLAG_OPEN;
1014         mtx_unlock(&xsoftc.xpt_lock);
1015         
1016         return(0);
1017 }
1018
1019 static int
1020 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
1021 {
1022
1023         /* Mark ourselves closed */
1024         mtx_lock(&xsoftc.xpt_lock);
1025         xsoftc.flags &= ~XPT_FLAG_OPEN;
1026         mtx_unlock(&xsoftc.xpt_lock);
1027
1028         return(0);
1029 }
1030
1031 /*
1032  * Don't automatically grab the xpt softc lock here even though this is going
1033  * through the xpt device.  The xpt device is really just a back door for
1034  * accessing other devices and SIMs, so the right thing to do is to grab
1035  * the appropriate SIM lock once the bus/SIM is located.
1036  */
1037 static int
1038 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
1039 {
1040         int error;
1041
1042         error = 0;
1043
1044         switch(cmd) {
1045         /*
1046          * For the transport layer CAMIOCOMMAND ioctl, we really only want
1047          * to accept CCB types that don't quite make sense to send through a
1048          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
1049          * in the CAM spec.
1050          */
1051         case CAMIOCOMMAND: {
1052                 union ccb *ccb;
1053                 union ccb *inccb;
1054                 struct cam_eb *bus;
1055
1056                 inccb = (union ccb *)addr;
1057
1058                 bus = xpt_find_bus(inccb->ccb_h.path_id);
1059                 if (bus == NULL) {
1060                         error = EINVAL;
1061                         break;
1062                 }
1063
1064                 switch(inccb->ccb_h.func_code) {
1065                 case XPT_SCAN_BUS:
1066                 case XPT_RESET_BUS:
1067                         if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
1068                          || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
1069                                 error = EINVAL;
1070                                 break;
1071                         }
1072                         /* FALLTHROUGH */
1073                 case XPT_PATH_INQ:
1074                 case XPT_ENG_INQ:
1075                 case XPT_SCAN_LUN:
1076
1077                         ccb = xpt_alloc_ccb();
1078
1079                         CAM_SIM_LOCK(bus->sim);
1080
1081                         /*
1082                          * Create a path using the bus, target, and lun the
1083                          * user passed in.
1084                          */
1085                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1086                                             inccb->ccb_h.path_id,
1087                                             inccb->ccb_h.target_id,
1088                                             inccb->ccb_h.target_lun) !=
1089                                             CAM_REQ_CMP){
1090                                 error = EINVAL;
1091                                 CAM_SIM_UNLOCK(bus->sim);
1092                                 xpt_free_ccb(ccb);
1093                                 break;
1094                         }
1095                         /* Ensure all of our fields are correct */
1096                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1097                                       inccb->ccb_h.pinfo.priority);
1098                         xpt_merge_ccb(ccb, inccb);
1099                         ccb->ccb_h.cbfcnp = xptdone;
1100                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1101                         bcopy(ccb, inccb, sizeof(union ccb));
1102                         xpt_free_path(ccb->ccb_h.path);
1103                         xpt_free_ccb(ccb);
1104                         CAM_SIM_UNLOCK(bus->sim);
1105                         break;
1106
1107                 case XPT_DEBUG: {
1108                         union ccb ccb;
1109
1110                         /*
1111                          * This is an immediate CCB, so it's okay to
1112                          * allocate it on the stack.
1113                          */
1114
1115                         CAM_SIM_LOCK(bus->sim);
1116
1117                         /*
1118                          * Create a path using the bus, target, and lun the
1119                          * user passed in.
1120                          */
1121                         if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1122                                             inccb->ccb_h.path_id,
1123                                             inccb->ccb_h.target_id,
1124                                             inccb->ccb_h.target_lun) !=
1125                                             CAM_REQ_CMP){
1126                                 error = EINVAL;
1127                                 break;
1128                         }
1129                         /* Ensure all of our fields are correct */
1130                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1131                                       inccb->ccb_h.pinfo.priority);
1132                         xpt_merge_ccb(&ccb, inccb);
1133                         ccb.ccb_h.cbfcnp = xptdone;
1134                         xpt_action(&ccb);
1135                         CAM_SIM_UNLOCK(bus->sim);
1136                         bcopy(&ccb, inccb, sizeof(union ccb));
1137                         xpt_free_path(ccb.ccb_h.path);
1138                         break;
1139
1140                 }
1141                 case XPT_DEV_MATCH: {
1142                         struct cam_periph_map_info mapinfo;
1143                         struct cam_path *old_path;
1144
1145                         /*
1146                          * We can't deal with physical addresses for this
1147                          * type of transaction.
1148                          */
1149                         if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1150                                 error = EINVAL;
1151                                 break;
1152                         }
1153
1154                         /*
1155                          * Save this in case the caller had it set to
1156                          * something in particular.
1157                          */
1158                         old_path = inccb->ccb_h.path;
1159
1160                         /*
1161                          * We really don't need a path for the matching
1162                          * code.  The path is needed because of the
1163                          * debugging statements in xpt_action().  They
1164                          * assume that the CCB has a valid path.
1165                          */
1166                         inccb->ccb_h.path = xpt_periph->path;
1167
1168                         bzero(&mapinfo, sizeof(mapinfo));
1169
1170                         /*
1171                          * Map the pattern and match buffers into kernel
1172                          * virtual address space.
1173                          */
1174                         error = cam_periph_mapmem(inccb, &mapinfo);
1175
1176                         if (error) {
1177                                 inccb->ccb_h.path = old_path;
1178                                 break;
1179                         }
1180
1181                         /*
1182                          * This is an immediate CCB, we can send it on directly.
1183                          */
1184                         xpt_action(inccb);
1185
1186                         /*
1187                          * Map the buffers back into user space.
1188                          */
1189                         cam_periph_unmapmem(inccb, &mapinfo);
1190
1191                         inccb->ccb_h.path = old_path;
1192
1193                         error = 0;
1194                         break;
1195                 }
1196                 default:
1197                         error = ENOTSUP;
1198                         break;
1199                 }
1200                 break;
1201         }
1202         /*
1203          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1204          * with the periphal driver name and unit name filled in.  The other
1205          * fields don't really matter as input.  The passthrough driver name
1206          * ("pass"), and unit number are passed back in the ccb.  The current
1207          * device generation number, and the index into the device peripheral
1208          * driver list, and the status are also passed back.  Note that
1209          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1210          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
1211          * (or rather should be) impossible for the device peripheral driver
1212          * list to change since we look at the whole thing in one pass, and
1213          * we do it with splcam protection.
1214          * 
1215          */
1216         case CAMGETPASSTHRU: {
1217                 union ccb *ccb;
1218                 struct cam_periph *periph;
1219                 struct periph_driver **p_drv;
1220                 char   *name;
1221                 u_int unit;
1222                 u_int cur_generation;
1223                 int base_periph_found;
1224                 int splbreaknum;
1225                 int s;
1226
1227                 ccb = (union ccb *)addr;
1228                 unit = ccb->cgdl.unit_number;
1229                 name = ccb->cgdl.periph_name;
1230                 /*
1231                  * Every 100 devices, we want to drop our spl protection to
1232                  * give the software interrupt handler a chance to run.
1233                  * Most systems won't run into this check, but this should
1234                  * avoid starvation in the software interrupt handler in
1235                  * large systems.
1236                  */
1237                 splbreaknum = 100;
1238
1239                 ccb = (union ccb *)addr;
1240
1241                 base_periph_found = 0;
1242
1243                 /*
1244                  * Sanity check -- make sure we don't get a null peripheral
1245                  * driver name.
1246                  */
1247                 if (*ccb->cgdl.periph_name == '\0') {
1248                         error = EINVAL;
1249                         break;
1250                 }
1251
1252                 /* Keep the list from changing while we traverse it */
1253                 s = splcam();
1254 ptstartover:
1255                 cur_generation = xsoftc.xpt_generation;
1256
1257                 /* first find our driver in the list of drivers */
1258                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
1259                         if (strcmp((*p_drv)->driver_name, name) == 0)
1260                                 break;
1261
1262                 if (*p_drv == NULL) {
1263                         splx(s);
1264                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1265                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1266                         *ccb->cgdl.periph_name = '\0';
1267                         ccb->cgdl.unit_number = 0;
1268                         error = ENOENT;
1269                         break;
1270                 }       
1271
1272                 /*
1273                  * Run through every peripheral instance of this driver
1274                  * and check to see whether it matches the unit passed
1275                  * in by the user.  If it does, get out of the loops and
1276                  * find the passthrough driver associated with that
1277                  * peripheral driver.
1278                  */
1279                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1280                      periph = TAILQ_NEXT(periph, unit_links)) {
1281
1282                         if (periph->unit_number == unit) {
1283                                 break;
1284                         } else if (--splbreaknum == 0) {
1285                                 splx(s);
1286                                 s = splcam();
1287                                 splbreaknum = 100;
1288                                 if (cur_generation != xsoftc.xpt_generation)
1289                                        goto ptstartover;
1290                         }
1291                 }
1292                 /*
1293                  * If we found the peripheral driver that the user passed
1294                  * in, go through all of the peripheral drivers for that
1295                  * particular device and look for a passthrough driver.
1296                  */
1297                 if (periph != NULL) {
1298                         struct cam_ed *device;
1299                         int i;
1300
1301                         base_periph_found = 1;
1302                         device = periph->path->device;
1303                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
1304                              periph != NULL;
1305                              periph = SLIST_NEXT(periph, periph_links), i++) {
1306                                 /*
1307                                  * Check to see whether we have a
1308                                  * passthrough device or not. 
1309                                  */
1310                                 if (strcmp(periph->periph_name, "pass") == 0) {
1311                                         /*
1312                                          * Fill in the getdevlist fields.
1313                                          */
1314                                         strcpy(ccb->cgdl.periph_name,
1315                                                periph->periph_name);
1316                                         ccb->cgdl.unit_number =
1317                                                 periph->unit_number;
1318                                         if (SLIST_NEXT(periph, periph_links))
1319                                                 ccb->cgdl.status =
1320                                                         CAM_GDEVLIST_MORE_DEVS;
1321                                         else
1322                                                 ccb->cgdl.status =
1323                                                        CAM_GDEVLIST_LAST_DEVICE;
1324                                         ccb->cgdl.generation =
1325                                                 device->generation;
1326                                         ccb->cgdl.index = i;
1327                                         /*
1328                                          * Fill in some CCB header fields
1329                                          * that the user may want.
1330                                          */
1331                                         ccb->ccb_h.path_id =
1332                                                 periph->path->bus->path_id;
1333                                         ccb->ccb_h.target_id =
1334                                                 periph->path->target->target_id;
1335                                         ccb->ccb_h.target_lun =
1336                                                 periph->path->device->lun_id;
1337                                         ccb->ccb_h.status = CAM_REQ_CMP;
1338                                         break;
1339                                 }
1340                         }
1341                 }
1342
1343                 /*
1344                  * If the periph is null here, one of two things has
1345                  * happened.  The first possibility is that we couldn't
1346                  * find the unit number of the particular peripheral driver
1347                  * that the user is asking about.  e.g. the user asks for
1348                  * the passthrough driver for "da11".  We find the list of
1349                  * "da" peripherals all right, but there is no unit 11.
1350                  * The other possibility is that we went through the list
1351                  * of peripheral drivers attached to the device structure,
1352                  * but didn't find one with the name "pass".  Either way,
1353                  * we return ENOENT, since we couldn't find something.
1354                  */
1355                 if (periph == NULL) {
1356                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1357                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1358                         *ccb->cgdl.periph_name = '\0';
1359                         ccb->cgdl.unit_number = 0;
1360                         error = ENOENT;
1361                         /*
1362                          * It is unfortunate that this is even necessary,
1363                          * but there are many, many clueless users out there.
1364                          * If this is true, the user is looking for the
1365                          * passthrough driver, but doesn't have one in his
1366                          * kernel.
1367                          */
1368                         if (base_periph_found == 1) {
1369                                 printf("xptioctl: pass driver is not in the "
1370                                        "kernel\n");
1371                                 printf("xptioctl: put \"device pass0\" in "
1372                                        "your kernel config file\n");
1373                         }
1374                 }
1375                 splx(s);
1376                 break;
1377                 }
1378         default:
1379                 error = ENOTTY;
1380                 break;
1381         }
1382
1383         return(error);
1384 }
1385
1386 static int
1387 cam_module_event_handler(module_t mod, int what, void *arg)
1388 {
1389         int error;
1390
1391         switch (what) {
1392         case MOD_LOAD:
1393                 if ((error = xpt_init(NULL)) != 0)
1394                         return (error);
1395                 break;
1396         case MOD_UNLOAD:
1397                 return EBUSY;
1398         default:
1399                 return EOPNOTSUPP;
1400         }
1401
1402         return 0;
1403 }
1404
1405 /* thread to handle bus rescans */
1406 static void
1407 xpt_scanner_thread(void *dummy)
1408 {
1409         cam_isrq_t      queue;
1410         union ccb       *ccb;
1411         struct cam_sim  *sim;
1412
1413         for (;;) {
1414                 /*
1415                  * Wait for a rescan request to come in.  When it does, splice
1416                  * it onto a queue from local storage so that the xpt lock
1417                  * doesn't need to be held while the requests are being
1418                  * processed.
1419                  */
1420                 xpt_lock_buses();
1421                 msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
1422                     "ccb_scanq", 0);
1423                 TAILQ_INIT(&queue);
1424                 TAILQ_CONCAT(&queue, &xsoftc.ccb_scanq, sim_links.tqe);
1425                 xpt_unlock_buses();
1426
1427                 while ((ccb = (union ccb *)TAILQ_FIRST(&queue)) != NULL) {
1428                         TAILQ_REMOVE(&queue, &ccb->ccb_h, sim_links.tqe);
1429
1430                         sim = ccb->ccb_h.path->bus->sim;
1431                         CAM_SIM_LOCK(sim);
1432
1433                         ccb->ccb_h.func_code = XPT_SCAN_BUS;
1434                         ccb->ccb_h.cbfcnp = xptdone;
1435                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5);
1436                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1437                         xpt_free_path(ccb->ccb_h.path);
1438                         xpt_free_ccb(ccb);
1439                         CAM_SIM_UNLOCK(sim);
1440                 }
1441         }
1442 }
1443
1444 void
1445 xpt_rescan(union ccb *ccb)
1446 {
1447         struct ccb_hdr *hdr;
1448
1449         /*
1450          * Don't make duplicate entries for the same paths.
1451          */
1452         xpt_lock_buses();
1453         TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
1454                 if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
1455                         xpt_unlock_buses();
1456                         xpt_print(ccb->ccb_h.path, "rescan already queued\n");
1457                         xpt_free_path(ccb->ccb_h.path);
1458                         xpt_free_ccb(ccb);
1459                         return;
1460                 }
1461         }
1462         TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
1463         wakeup(&xsoftc.ccb_scanq);
1464         xpt_unlock_buses();
1465 }
1466
1467 /* Functions accessed by the peripheral drivers */
1468 static int
1469 xpt_init(void *dummy)
1470 {
1471         struct cam_sim *xpt_sim;
1472         struct cam_path *path;
1473         struct cam_devq *devq;
1474         cam_status status;
1475
1476         TAILQ_INIT(&xsoftc.xpt_busses);
1477         TAILQ_INIT(&cam_simq);
1478         TAILQ_INIT(&xsoftc.ccb_scanq);
1479         STAILQ_INIT(&xsoftc.highpowerq);
1480         xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
1481
1482         mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF);
1483         mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
1484         mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
1485
1486         /*
1487          * The xpt layer is, itself, the equivelent of a SIM.
1488          * Allow 16 ccbs in the ccb pool for it.  This should
1489          * give decent parallelism when we probe busses and
1490          * perform other XPT functions.
1491          */
1492         devq = cam_simq_alloc(16);
1493         xpt_sim = cam_sim_alloc(xptaction,
1494                                 xptpoll,
1495                                 "xpt",
1496                                 /*softc*/NULL,
1497                                 /*unit*/0,
1498                                 /*mtx*/&xsoftc.xpt_lock,
1499                                 /*max_dev_transactions*/0,
1500                                 /*max_tagged_dev_transactions*/0,
1501                                 devq);
1502         if (xpt_sim == NULL)
1503                 return (ENOMEM);
1504
1505         xpt_sim->max_ccbs = 16;
1506
1507         mtx_lock(&xsoftc.xpt_lock);
1508         if ((status = xpt_bus_register(xpt_sim, /*bus #*/0)) != CAM_SUCCESS) {
1509                 printf("xpt_init: xpt_bus_register failed with status %#x,"
1510                        " failing attach\n", status);
1511                 return (EINVAL);
1512         }
1513
1514         /*
1515          * Looking at the XPT from the SIM layer, the XPT is
1516          * the equivelent of a peripheral driver.  Allocate
1517          * a peripheral driver entry for us.
1518          */
1519         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1520                                       CAM_TARGET_WILDCARD,
1521                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1522                 printf("xpt_init: xpt_create_path failed with status %#x,"
1523                        " failing attach\n", status);
1524                 return (EINVAL);
1525         }
1526
1527         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1528                          path, NULL, 0, xpt_sim);
1529         xpt_free_path(path);
1530         mtx_unlock(&xsoftc.xpt_lock);
1531
1532         /*
1533          * Register a callback for when interrupts are enabled.
1534          */
1535         xsoftc.xpt_config_hook =
1536             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
1537                                               M_TEMP, M_NOWAIT | M_ZERO);
1538         if (xsoftc.xpt_config_hook == NULL) {
1539                 printf("xpt_init: Cannot malloc config hook "
1540                        "- failing attach\n");
1541                 return (ENOMEM);
1542         }
1543
1544         xsoftc.xpt_config_hook->ich_func = xpt_config;
1545         if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
1546                 free (xsoftc.xpt_config_hook, M_TEMP);
1547                 printf("xpt_init: config_intrhook_establish failed "
1548                        "- failing attach\n");
1549         }
1550
1551         /* fire up rescan thread */
1552         if (kthread_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) {
1553                 printf("xpt_init: failed to create rescan thread\n");
1554         }
1555         /* Install our software interrupt handlers */
1556         swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih);
1557
1558         return (0);
1559 }
1560
1561 static cam_status
1562 xptregister(struct cam_periph *periph, void *arg)
1563 {
1564         struct cam_sim *xpt_sim;
1565
1566         if (periph == NULL) {
1567                 printf("xptregister: periph was NULL!!\n");
1568                 return(CAM_REQ_CMP_ERR);
1569         }
1570
1571         xpt_sim = (struct cam_sim *)arg;
1572         xpt_sim->softc = periph;
1573         xpt_periph = periph;
1574         periph->softc = NULL;
1575
1576         return(CAM_REQ_CMP);
1577 }
1578
1579 int32_t
1580 xpt_add_periph(struct cam_periph *periph)
1581 {
1582         struct cam_ed *device;
1583         int32_t  status;
1584         struct periph_list *periph_head;
1585
1586         mtx_assert(periph->sim->mtx, MA_OWNED);
1587
1588         device = periph->path->device;
1589
1590         periph_head = &device->periphs;
1591
1592         status = CAM_REQ_CMP;
1593
1594         if (device != NULL) {
1595                 int s;
1596
1597                 /*
1598                  * Make room for this peripheral
1599                  * so it will fit in the queue
1600                  * when it's scheduled to run
1601                  */
1602                 s = splsoftcam();
1603                 status = camq_resize(&device->drvq,
1604                                      device->drvq.array_size + 1);
1605
1606                 device->generation++;
1607
1608                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1609
1610                 splx(s);
1611         }
1612
1613         atomic_add_int(&xsoftc.xpt_generation, 1);
1614
1615         return (status);
1616 }
1617
1618 void
1619 xpt_remove_periph(struct cam_periph *periph)
1620 {
1621         struct cam_ed *device;
1622
1623         mtx_assert(periph->sim->mtx, MA_OWNED);
1624
1625         device = periph->path->device;
1626
1627         if (device != NULL) {
1628                 int s;
1629                 struct periph_list *periph_head;
1630
1631                 periph_head = &device->periphs;
1632                 
1633                 /* Release the slot for this peripheral */
1634                 s = splsoftcam();
1635                 camq_resize(&device->drvq, device->drvq.array_size - 1);
1636
1637                 device->generation++;
1638
1639                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1640
1641                 splx(s);
1642         }
1643
1644         atomic_add_int(&xsoftc.xpt_generation, 1);
1645
1646 }
1647
1648
1649 void
1650 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1651 {
1652         struct  ccb_pathinq cpi;
1653         struct  ccb_trans_settings cts;
1654         struct  cam_path *path;
1655         u_int   speed;
1656         u_int   freq;
1657         u_int   mb;
1658         int     s;
1659
1660         mtx_assert(periph->sim->mtx, MA_OWNED);
1661
1662         path = periph->path;
1663         /*
1664          * To ensure that this is printed in one piece,
1665          * mask out CAM interrupts.
1666          */
1667         s = splsoftcam();
1668         printf("%s%d at %s%d bus %d target %d lun %d\n",
1669                periph->periph_name, periph->unit_number,
1670                path->bus->sim->sim_name,
1671                path->bus->sim->unit_number,
1672                path->bus->sim->bus_id,
1673                path->target->target_id,
1674                path->device->lun_id);
1675         printf("%s%d: ", periph->periph_name, periph->unit_number);
1676         scsi_print_inquiry(&path->device->inq_data);
1677         if (bootverbose && path->device->serial_num_len > 0) {
1678                 /* Don't wrap the screen  - print only the first 60 chars */
1679                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1680                        periph->unit_number, path->device->serial_num);
1681         }
1682         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1683         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1684         cts.type = CTS_TYPE_CURRENT_SETTINGS;
1685         xpt_action((union ccb*)&cts);
1686         if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1687                 return;
1688         }
1689
1690         /* Ask the SIM for its base transfer speed */
1691         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1692         cpi.ccb_h.func_code = XPT_PATH_INQ;
1693         xpt_action((union ccb *)&cpi);
1694
1695         speed = cpi.base_transfer_speed;
1696         freq = 0;
1697         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1698                 struct  ccb_trans_settings_spi *spi;
1699
1700                 spi = &cts.xport_specific.spi;
1701                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1702                   && spi->sync_offset != 0) {
1703                         freq = scsi_calc_syncsrate(spi->sync_period);
1704                         speed = freq;
1705                 }
1706
1707                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1708                         speed *= (0x01 << spi->bus_width);
1709         }
1710
1711         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1712                 struct  ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
1713                 if (fc->valid & CTS_FC_VALID_SPEED) {
1714                         speed = fc->bitrate;
1715                 }
1716         }
1717
1718         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SAS) {
1719                 struct  ccb_trans_settings_sas *sas = &cts.xport_specific.sas;
1720                 if (sas->valid & CTS_SAS_VALID_SPEED) {
1721                         speed = sas->bitrate;
1722                 }
1723         }
1724
1725         mb = speed / 1000;
1726         if (mb > 0)
1727                 printf("%s%d: %d.%03dMB/s transfers",
1728                        periph->periph_name, periph->unit_number,
1729                        mb, speed % 1000);
1730         else
1731                 printf("%s%d: %dKB/s transfers", periph->periph_name,
1732                        periph->unit_number, speed);
1733         /* Report additional information about SPI connections */
1734         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1735                 struct  ccb_trans_settings_spi *spi;
1736
1737                 spi = &cts.xport_specific.spi;
1738                 if (freq != 0) {
1739                         printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
1740                                freq % 1000,
1741                                (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1742                              ? " DT" : "",
1743                                spi->sync_offset);
1744                 }
1745                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1746                  && spi->bus_width > 0) {
1747                         if (freq != 0) {
1748                                 printf(", ");
1749                         } else {
1750                                 printf(" (");
1751                         }
1752                         printf("%dbit)", 8 * (0x01 << spi->bus_width));
1753                 } else if (freq != 0) {
1754                         printf(")");
1755                 }
1756         }
1757         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1758                 struct  ccb_trans_settings_fc *fc;
1759
1760                 fc = &cts.xport_specific.fc;
1761                 if (fc->valid & CTS_FC_VALID_WWNN)
1762                         printf(" WWNN 0x%llx", (long long) fc->wwnn);
1763                 if (fc->valid & CTS_FC_VALID_WWPN)
1764                         printf(" WWPN 0x%llx", (long long) fc->wwpn);
1765                 if (fc->valid & CTS_FC_VALID_PORT)
1766                         printf(" PortID 0x%x", fc->port);
1767         }
1768
1769         if (path->device->inq_flags & SID_CmdQue
1770          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1771                 printf("\n%s%d: Command Queueing Enabled",
1772                        periph->periph_name, periph->unit_number);
1773         }
1774         printf("\n");
1775
1776         /*
1777          * We only want to print the caller's announce string if they've
1778          * passed one in..
1779          */
1780         if (announce_string != NULL)
1781                 printf("%s%d: %s\n", periph->periph_name,
1782                        periph->unit_number, announce_string);
1783         splx(s);
1784 }
1785
1786 static dev_match_ret
1787 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1788             struct cam_eb *bus)
1789 {
1790         dev_match_ret retval;
1791         int i;
1792
1793         retval = DM_RET_NONE;
1794
1795         /*
1796          * If we aren't given something to match against, that's an error.
1797          */
1798         if (bus == NULL)
1799                 return(DM_RET_ERROR);
1800
1801         /*
1802          * If there are no match entries, then this bus matches no
1803          * matter what.
1804          */
1805         if ((patterns == NULL) || (num_patterns == 0))
1806                 return(DM_RET_DESCEND | DM_RET_COPY);
1807
1808         for (i = 0; i < num_patterns; i++) {
1809                 struct bus_match_pattern *cur_pattern;
1810
1811                 /*
1812                  * If the pattern in question isn't for a bus node, we
1813                  * aren't interested.  However, we do indicate to the
1814                  * calling routine that we should continue descending the
1815                  * tree, since the user wants to match against lower-level
1816                  * EDT elements.
1817                  */
1818                 if (patterns[i].type != DEV_MATCH_BUS) {
1819                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1820                                 retval |= DM_RET_DESCEND;
1821                         continue;
1822                 }
1823
1824                 cur_pattern = &patterns[i].pattern.bus_pattern;
1825
1826                 /*
1827                  * If they want to match any bus node, we give them any
1828                  * device node.
1829                  */
1830                 if (cur_pattern->flags == BUS_MATCH_ANY) {
1831                         /* set the copy flag */
1832                         retval |= DM_RET_COPY;
1833
1834                         /*
1835                          * If we've already decided on an action, go ahead
1836                          * and return.
1837                          */
1838                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1839                                 return(retval);
1840                 }
1841
1842                 /*
1843                  * Not sure why someone would do this...
1844                  */
1845                 if (cur_pattern->flags == BUS_MATCH_NONE)
1846                         continue;
1847
1848                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1849                  && (cur_pattern->path_id != bus->path_id))
1850                         continue;
1851
1852                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1853                  && (cur_pattern->bus_id != bus->sim->bus_id))
1854                         continue;
1855
1856                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1857                  && (cur_pattern->unit_number != bus->sim->unit_number))
1858                         continue;
1859
1860                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1861                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1862                              DEV_IDLEN) != 0))
1863                         continue;
1864
1865                 /*
1866                  * If we get to this point, the user definitely wants 
1867                  * information on this bus.  So tell the caller to copy the
1868                  * data out.
1869                  */
1870                 retval |= DM_RET_COPY;
1871
1872                 /*
1873                  * If the return action has been set to descend, then we
1874                  * know that we've already seen a non-bus matching
1875                  * expression, therefore we need to further descend the tree.
1876                  * This won't change by continuing around the loop, so we
1877                  * go ahead and return.  If we haven't seen a non-bus
1878                  * matching expression, we keep going around the loop until
1879                  * we exhaust the matching expressions.  We'll set the stop
1880                  * flag once we fall out of the loop.
1881                  */
1882                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1883                         return(retval);
1884         }
1885
1886         /*
1887          * If the return action hasn't been set to descend yet, that means
1888          * we haven't seen anything other than bus matching patterns.  So
1889          * tell the caller to stop descending the tree -- the user doesn't
1890          * want to match against lower level tree elements.
1891          */
1892         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1893                 retval |= DM_RET_STOP;
1894
1895         return(retval);
1896 }
1897
1898 static dev_match_ret
1899 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1900                struct cam_ed *device)
1901 {
1902         dev_match_ret retval;
1903         int i;
1904
1905         retval = DM_RET_NONE;
1906
1907         /*
1908          * If we aren't given something to match against, that's an error.
1909          */
1910         if (device == NULL)
1911                 return(DM_RET_ERROR);
1912
1913         /*
1914          * If there are no match entries, then this device matches no
1915          * matter what.
1916          */
1917         if ((patterns == NULL) || (num_patterns == 0))
1918                 return(DM_RET_DESCEND | DM_RET_COPY);
1919
1920         for (i = 0; i < num_patterns; i++) {
1921                 struct device_match_pattern *cur_pattern;
1922
1923                 /*
1924                  * If the pattern in question isn't for a device node, we
1925                  * aren't interested.
1926                  */
1927                 if (patterns[i].type != DEV_MATCH_DEVICE) {
1928                         if ((patterns[i].type == DEV_MATCH_PERIPH)
1929                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1930                                 retval |= DM_RET_DESCEND;
1931                         continue;
1932                 }
1933
1934                 cur_pattern = &patterns[i].pattern.device_pattern;
1935
1936                 /*
1937                  * If they want to match any device node, we give them any
1938                  * device node.
1939                  */
1940                 if (cur_pattern->flags == DEV_MATCH_ANY) {
1941                         /* set the copy flag */
1942                         retval |= DM_RET_COPY;
1943
1944                         
1945                         /*
1946                          * If we've already decided on an action, go ahead
1947                          * and return.
1948                          */
1949                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1950                                 return(retval);
1951                 }
1952
1953                 /*
1954                  * Not sure why someone would do this...
1955                  */
1956                 if (cur_pattern->flags == DEV_MATCH_NONE)
1957                         continue;
1958
1959                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1960                  && (cur_pattern->path_id != device->target->bus->path_id))
1961                         continue;
1962
1963                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1964                  && (cur_pattern->target_id != device->target->target_id))
1965                         continue;
1966
1967                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1968                  && (cur_pattern->target_lun != device->lun_id))
1969                         continue;
1970
1971                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1972                  && (cam_quirkmatch((caddr_t)&device->inq_data,
1973                                     (caddr_t)&cur_pattern->inq_pat,
1974                                     1, sizeof(cur_pattern->inq_pat),
1975                                     scsi_static_inquiry_match) == NULL))
1976                         continue;
1977
1978                 /*
1979                  * If we get to this point, the user definitely wants 
1980                  * information on this device.  So tell the caller to copy
1981                  * the data out.
1982                  */
1983                 retval |= DM_RET_COPY;
1984
1985                 /*
1986                  * If the return action has been set to descend, then we
1987                  * know that we've already seen a peripheral matching
1988                  * expression, therefore we need to further descend the tree.
1989                  * This won't change by continuing around the loop, so we
1990                  * go ahead and return.  If we haven't seen a peripheral
1991                  * matching expression, we keep going around the loop until
1992                  * we exhaust the matching expressions.  We'll set the stop
1993                  * flag once we fall out of the loop.
1994                  */
1995                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1996                         return(retval);
1997         }
1998
1999         /*
2000          * If the return action hasn't been set to descend yet, that means
2001          * we haven't seen any peripheral matching patterns.  So tell the
2002          * caller to stop descending the tree -- the user doesn't want to
2003          * match against lower level tree elements.
2004          */
2005         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
2006                 retval |= DM_RET_STOP;
2007
2008         return(retval);
2009 }
2010
2011 /*
2012  * Match a single peripheral against any number of match patterns.
2013  */
2014 static dev_match_ret
2015 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
2016                struct cam_periph *periph)
2017 {
2018         dev_match_ret retval;
2019         int i;
2020
2021         /*
2022          * If we aren't given something to match against, that's an error.
2023          */
2024         if (periph == NULL)
2025                 return(DM_RET_ERROR);
2026
2027         /*
2028          * If there are no match entries, then this peripheral matches no
2029          * matter what.
2030          */
2031         if ((patterns == NULL) || (num_patterns == 0))
2032                 return(DM_RET_STOP | DM_RET_COPY);
2033
2034         /*
2035          * There aren't any nodes below a peripheral node, so there's no
2036          * reason to descend the tree any further.
2037          */
2038         retval = DM_RET_STOP;
2039
2040         for (i = 0; i < num_patterns; i++) {
2041                 struct periph_match_pattern *cur_pattern;
2042
2043                 /*
2044                  * If the pattern in question isn't for a peripheral, we
2045                  * aren't interested.
2046                  */
2047                 if (patterns[i].type != DEV_MATCH_PERIPH)
2048                         continue;
2049
2050                 cur_pattern = &patterns[i].pattern.periph_pattern;
2051
2052                 /*
2053                  * If they want to match on anything, then we will do so.
2054                  */
2055                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
2056                         /* set the copy flag */
2057                         retval |= DM_RET_COPY;
2058
2059                         /*
2060                          * We've already set the return action to stop,
2061                          * since there are no nodes below peripherals in
2062                          * the tree.
2063                          */
2064                         return(retval);
2065                 }
2066
2067                 /*
2068                  * Not sure why someone would do this...
2069                  */
2070                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
2071                         continue;
2072
2073                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
2074                  && (cur_pattern->path_id != periph->path->bus->path_id))
2075                         continue;
2076
2077                 /*
2078                  * For the target and lun id's, we have to make sure the
2079                  * target and lun pointers aren't NULL.  The xpt peripheral
2080                  * has a wildcard target and device.
2081                  */
2082                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
2083                  && ((periph->path->target == NULL)
2084                  ||(cur_pattern->target_id != periph->path->target->target_id)))
2085                         continue;
2086
2087                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
2088                  && ((periph->path->device == NULL)
2089                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
2090                         continue;
2091
2092                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
2093                  && (cur_pattern->unit_number != periph->unit_number))
2094                         continue;
2095
2096                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
2097                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
2098                              DEV_IDLEN) != 0))
2099                         continue;
2100
2101                 /*
2102                  * If we get to this point, the user definitely wants 
2103                  * information on this peripheral.  So tell the caller to
2104                  * copy the data out.
2105                  */
2106                 retval |= DM_RET_COPY;
2107
2108                 /*
2109                  * The return action has already been set to stop, since
2110                  * peripherals don't have any nodes below them in the EDT.
2111                  */
2112                 return(retval);
2113         }
2114
2115         /*
2116          * If we get to this point, the peripheral that was passed in
2117          * doesn't match any of the patterns.
2118          */
2119         return(retval);
2120 }
2121
2122 static int
2123 xptedtbusfunc(struct cam_eb *bus, void *arg)
2124 {
2125         struct ccb_dev_match *cdm;
2126         dev_match_ret retval;
2127
2128         cdm = (struct ccb_dev_match *)arg;
2129
2130         /*
2131          * If our position is for something deeper in the tree, that means
2132          * that we've already seen this node.  So, we keep going down.
2133          */
2134         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2135          && (cdm->pos.cookie.bus == bus)
2136          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2137          && (cdm->pos.cookie.target != NULL))
2138                 retval = DM_RET_DESCEND;
2139         else
2140                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
2141
2142         /*
2143          * If we got an error, bail out of the search.
2144          */
2145         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2146                 cdm->status = CAM_DEV_MATCH_ERROR;
2147                 return(0);
2148         }
2149
2150         /*
2151          * If the copy flag is set, copy this bus out.
2152          */
2153         if (retval & DM_RET_COPY) {
2154                 int spaceleft, j;
2155
2156                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2157                         sizeof(struct dev_match_result));
2158
2159                 /*
2160                  * If we don't have enough space to put in another
2161                  * match result, save our position and tell the
2162                  * user there are more devices to check.
2163                  */
2164                 if (spaceleft < sizeof(struct dev_match_result)) {
2165                         bzero(&cdm->pos, sizeof(cdm->pos));
2166                         cdm->pos.position_type = 
2167                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
2168
2169                         cdm->pos.cookie.bus = bus;
2170                         cdm->pos.generations[CAM_BUS_GENERATION]=
2171                                 xsoftc.bus_generation;
2172                         cdm->status = CAM_DEV_MATCH_MORE;
2173                         return(0);
2174                 }
2175                 j = cdm->num_matches;
2176                 cdm->num_matches++;
2177                 cdm->matches[j].type = DEV_MATCH_BUS;
2178                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
2179                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
2180                 cdm->matches[j].result.bus_result.unit_number =
2181                         bus->sim->unit_number;
2182                 strncpy(cdm->matches[j].result.bus_result.dev_name,
2183                         bus->sim->sim_name, DEV_IDLEN);
2184         }
2185
2186         /*
2187          * If the user is only interested in busses, there's no
2188          * reason to descend to the next level in the tree.
2189          */
2190         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2191                 return(1);
2192
2193         /*
2194          * If there is a target generation recorded, check it to
2195          * make sure the target list hasn't changed.
2196          */
2197         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2198          && (bus == cdm->pos.cookie.bus)
2199          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2200          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
2201          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
2202              bus->generation)) {
2203                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2204                 return(0);
2205         }
2206
2207         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2208          && (cdm->pos.cookie.bus == bus)
2209          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2210          && (cdm->pos.cookie.target != NULL))
2211                 return(xpttargettraverse(bus,
2212                                         (struct cam_et *)cdm->pos.cookie.target,
2213                                          xptedttargetfunc, arg));
2214         else
2215                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
2216 }
2217
2218 static int
2219 xptedttargetfunc(struct cam_et *target, void *arg)
2220 {
2221         struct ccb_dev_match *cdm;
2222
2223         cdm = (struct ccb_dev_match *)arg;
2224
2225         /*
2226          * If there is a device list generation recorded, check it to
2227          * make sure the device list hasn't changed.
2228          */
2229         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2230          && (cdm->pos.cookie.bus == target->bus)
2231          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2232          && (cdm->pos.cookie.target == target)
2233          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2234          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2235          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2236              target->generation)) {
2237                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2238                 return(0);
2239         }
2240
2241         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2242          && (cdm->pos.cookie.bus == target->bus)
2243          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2244          && (cdm->pos.cookie.target == target)
2245          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2246          && (cdm->pos.cookie.device != NULL))
2247                 return(xptdevicetraverse(target,
2248                                         (struct cam_ed *)cdm->pos.cookie.device,
2249                                          xptedtdevicefunc, arg));
2250         else
2251                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2252 }
2253
2254 static int
2255 xptedtdevicefunc(struct cam_ed *device, void *arg)
2256 {
2257
2258         struct ccb_dev_match *cdm;
2259         dev_match_ret retval;
2260
2261         cdm = (struct ccb_dev_match *)arg;
2262
2263         /*
2264          * If our position is for something deeper in the tree, that means
2265          * that we've already seen this node.  So, we keep going down.
2266          */
2267         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2268          && (cdm->pos.cookie.device == device)
2269          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2270          && (cdm->pos.cookie.periph != NULL))
2271                 retval = DM_RET_DESCEND;
2272         else
2273                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2274                                         device);
2275
2276         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2277                 cdm->status = CAM_DEV_MATCH_ERROR;
2278                 return(0);
2279         }
2280
2281         /*
2282          * If the copy flag is set, copy this device out.
2283          */
2284         if (retval & DM_RET_COPY) {
2285                 int spaceleft, j;
2286
2287                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2288                         sizeof(struct dev_match_result));
2289
2290                 /*
2291                  * If we don't have enough space to put in another
2292                  * match result, save our position and tell the
2293                  * user there are more devices to check.
2294                  */
2295                 if (spaceleft < sizeof(struct dev_match_result)) {
2296                         bzero(&cdm->pos, sizeof(cdm->pos));
2297                         cdm->pos.position_type = 
2298                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2299                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2300
2301                         cdm->pos.cookie.bus = device->target->bus;
2302                         cdm->pos.generations[CAM_BUS_GENERATION]=
2303                                 xsoftc.bus_generation;
2304                         cdm->pos.cookie.target = device->target;
2305                         cdm->pos.generations[CAM_TARGET_GENERATION] =
2306                                 device->target->bus->generation;
2307                         cdm->pos.cookie.device = device;
2308                         cdm->pos.generations[CAM_DEV_GENERATION] = 
2309                                 device->target->generation;
2310                         cdm->status = CAM_DEV_MATCH_MORE;
2311                         return(0);
2312                 }
2313                 j = cdm->num_matches;
2314                 cdm->num_matches++;
2315                 cdm->matches[j].type = DEV_MATCH_DEVICE;
2316                 cdm->matches[j].result.device_result.path_id =
2317                         device->target->bus->path_id;
2318                 cdm->matches[j].result.device_result.target_id =
2319                         device->target->target_id;
2320                 cdm->matches[j].result.device_result.target_lun =
2321                         device->lun_id;
2322                 bcopy(&device->inq_data,
2323                       &cdm->matches[j].result.device_result.inq_data,
2324                       sizeof(struct scsi_inquiry_data));
2325
2326                 /* Let the user know whether this device is unconfigured */
2327                 if (device->flags & CAM_DEV_UNCONFIGURED)
2328                         cdm->matches[j].result.device_result.flags =
2329                                 DEV_RESULT_UNCONFIGURED;
2330                 else
2331                         cdm->matches[j].result.device_result.flags =
2332                                 DEV_RESULT_NOFLAG;
2333         }
2334
2335         /*
2336          * If the user isn't interested in peripherals, don't descend
2337          * the tree any further.
2338          */
2339         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2340                 return(1);
2341
2342         /*
2343          * If there is a peripheral list generation recorded, make sure
2344          * it hasn't changed.
2345          */
2346         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2347          && (device->target->bus == cdm->pos.cookie.bus)
2348          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2349          && (device->target == cdm->pos.cookie.target)
2350          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2351          && (device == cdm->pos.cookie.device)
2352          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2353          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2354          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2355              device->generation)){
2356                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2357                 return(0);
2358         }
2359
2360         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2361          && (cdm->pos.cookie.bus == device->target->bus)
2362          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2363          && (cdm->pos.cookie.target == device->target)
2364          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2365          && (cdm->pos.cookie.device == device)
2366          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2367          && (cdm->pos.cookie.periph != NULL))
2368                 return(xptperiphtraverse(device,
2369                                 (struct cam_periph *)cdm->pos.cookie.periph,
2370                                 xptedtperiphfunc, arg));
2371         else
2372                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2373 }
2374
2375 static int
2376 xptedtperiphfunc(struct cam_periph *periph, void *arg)
2377 {
2378         struct ccb_dev_match *cdm;
2379         dev_match_ret retval;
2380
2381         cdm = (struct ccb_dev_match *)arg;
2382
2383         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2384
2385         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2386                 cdm->status = CAM_DEV_MATCH_ERROR;
2387                 return(0);
2388         }
2389
2390         /*
2391          * If the copy flag is set, copy this peripheral out.
2392          */
2393         if (retval & DM_RET_COPY) {
2394                 int spaceleft, j;
2395
2396                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2397                         sizeof(struct dev_match_result));
2398
2399                 /*
2400                  * If we don't have enough space to put in another
2401                  * match result, save our position and tell the
2402                  * user there are more devices to check.
2403                  */
2404                 if (spaceleft < sizeof(struct dev_match_result)) {
2405                         bzero(&cdm->pos, sizeof(cdm->pos));
2406                         cdm->pos.position_type = 
2407                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2408                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2409                                 CAM_DEV_POS_PERIPH;
2410
2411                         cdm->pos.cookie.bus = periph->path->bus;
2412                         cdm->pos.generations[CAM_BUS_GENERATION]=
2413                                 xsoftc.bus_generation;
2414                         cdm->pos.cookie.target = periph->path->target;
2415                         cdm->pos.generations[CAM_TARGET_GENERATION] =
2416                                 periph->path->bus->generation;
2417                         cdm->pos.cookie.device = periph->path->device;
2418                         cdm->pos.generations[CAM_DEV_GENERATION] = 
2419                                 periph->path->target->generation;
2420                         cdm->pos.cookie.periph = periph;
2421                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2422                                 periph->path->device->generation;
2423                         cdm->status = CAM_DEV_MATCH_MORE;
2424                         return(0);
2425                 }
2426
2427                 j = cdm->num_matches;
2428                 cdm->num_matches++;
2429                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2430                 cdm->matches[j].result.periph_result.path_id =
2431                         periph->path->bus->path_id;
2432                 cdm->matches[j].result.periph_result.target_id =
2433                         periph->path->target->target_id;
2434                 cdm->matches[j].result.periph_result.target_lun =
2435                         periph->path->device->lun_id;
2436                 cdm->matches[j].result.periph_result.unit_number =
2437                         periph->unit_number;
2438                 strncpy(cdm->matches[j].result.periph_result.periph_name,
2439                         periph->periph_name, DEV_IDLEN);
2440         }
2441
2442         return(1);
2443 }
2444
2445 static int
2446 xptedtmatch(struct ccb_dev_match *cdm)
2447 {
2448         int ret;
2449
2450         cdm->num_matches = 0;
2451
2452         /*
2453          * Check the bus list generation.  If it has changed, the user
2454          * needs to reset everything and start over.
2455          */
2456         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2457          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2458          && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
2459                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2460                 return(0);
2461         }
2462
2463         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2464          && (cdm->pos.cookie.bus != NULL))
2465                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2466                                      xptedtbusfunc, cdm);
2467         else
2468                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2469
2470         /*
2471          * If we get back 0, that means that we had to stop before fully
2472          * traversing the EDT.  It also means that one of the subroutines
2473          * has set the status field to the proper value.  If we get back 1,
2474          * we've fully traversed the EDT and copied out any matching entries.
2475          */
2476         if (ret == 1)
2477                 cdm->status = CAM_DEV_MATCH_LAST;
2478
2479         return(ret);
2480 }
2481
2482 static int
2483 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2484 {
2485         struct ccb_dev_match *cdm;
2486
2487         cdm = (struct ccb_dev_match *)arg;
2488
2489         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2490          && (cdm->pos.cookie.pdrv == pdrv)
2491          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2492          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2493          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2494              (*pdrv)->generation)) {
2495                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2496                 return(0);
2497         }
2498
2499         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2500          && (cdm->pos.cookie.pdrv == pdrv)
2501          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2502          && (cdm->pos.cookie.periph != NULL))
2503                 return(xptpdperiphtraverse(pdrv,
2504                                 (struct cam_periph *)cdm->pos.cookie.periph,
2505                                 xptplistperiphfunc, arg));
2506         else
2507                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2508 }
2509
2510 static int
2511 xptplistperiphfunc(struct cam_periph *periph, void *arg)
2512 {
2513         struct ccb_dev_match *cdm;
2514         dev_match_ret retval;
2515
2516         cdm = (struct ccb_dev_match *)arg;
2517
2518         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2519
2520         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2521                 cdm->status = CAM_DEV_MATCH_ERROR;
2522                 return(0);
2523         }
2524
2525         /*
2526          * If the copy flag is set, copy this peripheral out.
2527          */
2528         if (retval & DM_RET_COPY) {
2529                 int spaceleft, j;
2530
2531                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2532                         sizeof(struct dev_match_result));
2533
2534                 /*
2535                  * If we don't have enough space to put in another
2536                  * match result, save our position and tell the
2537                  * user there are more devices to check.
2538                  */
2539                 if (spaceleft < sizeof(struct dev_match_result)) {
2540                         struct periph_driver **pdrv;
2541
2542                         pdrv = NULL;
2543                         bzero(&cdm->pos, sizeof(cdm->pos));
2544                         cdm->pos.position_type = 
2545                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2546                                 CAM_DEV_POS_PERIPH;
2547
2548                         /*
2549                          * This may look a bit non-sensical, but it is
2550                          * actually quite logical.  There are very few
2551                          * peripheral drivers, and bloating every peripheral
2552                          * structure with a pointer back to its parent
2553                          * peripheral driver linker set entry would cost
2554                          * more in the long run than doing this quick lookup.
2555                          */
2556                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2557                                 if (strcmp((*pdrv)->driver_name,
2558                                     periph->periph_name) == 0)
2559                                         break;
2560                         }
2561
2562                         if (*pdrv == NULL) {
2563                                 cdm->status = CAM_DEV_MATCH_ERROR;
2564                                 return(0);
2565                         }
2566
2567                         cdm->pos.cookie.pdrv = pdrv;
2568                         /*
2569                          * The periph generation slot does double duty, as
2570                          * does the periph pointer slot.  They are used for
2571                          * both edt and pdrv lookups and positioning.
2572                          */
2573                         cdm->pos.cookie.periph = periph;
2574                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2575                                 (*pdrv)->generation;
2576                         cdm->status = CAM_DEV_MATCH_MORE;
2577                         return(0);
2578                 }
2579
2580                 j = cdm->num_matches;
2581                 cdm->num_matches++;
2582                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2583                 cdm->matches[j].result.periph_result.path_id =
2584                         periph->path->bus->path_id;
2585
2586                 /*
2587                  * The transport layer peripheral doesn't have a target or
2588                  * lun.
2589                  */
2590                 if (periph->path->target)
2591                         cdm->matches[j].result.periph_result.target_id =
2592                                 periph->path->target->target_id;
2593                 else
2594                         cdm->matches[j].result.periph_result.target_id = -1;
2595
2596                 if (periph->path->device)
2597                         cdm->matches[j].result.periph_result.target_lun =
2598                                 periph->path->device->lun_id;
2599                 else
2600                         cdm->matches[j].result.periph_result.target_lun = -1;
2601
2602                 cdm->matches[j].result.periph_result.unit_number =
2603                         periph->unit_number;
2604                 strncpy(cdm->matches[j].result.periph_result.periph_name,
2605                         periph->periph_name, DEV_IDLEN);
2606         }
2607
2608         return(1);
2609 }
2610
2611 static int
2612 xptperiphlistmatch(struct ccb_dev_match *cdm)
2613 {
2614         int ret;
2615
2616         cdm->num_matches = 0;
2617
2618         /*
2619          * At this point in the edt traversal function, we check the bus
2620          * list generation to make sure that no busses have been added or
2621          * removed since the user last sent a XPT_DEV_MATCH ccb through.
2622          * For the peripheral driver list traversal function, however, we
2623          * don't have to worry about new peripheral driver types coming or
2624          * going; they're in a linker set, and therefore can't change
2625          * without a recompile.
2626          */
2627
2628         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2629          && (cdm->pos.cookie.pdrv != NULL))
2630                 ret = xptpdrvtraverse(
2631                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
2632                                 xptplistpdrvfunc, cdm);
2633         else
2634                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2635
2636         /*
2637          * If we get back 0, that means that we had to stop before fully
2638          * traversing the peripheral driver tree.  It also means that one of
2639          * the subroutines has set the status field to the proper value.  If
2640          * we get back 1, we've fully traversed the EDT and copied out any
2641          * matching entries.
2642          */
2643         if (ret == 1)
2644                 cdm->status = CAM_DEV_MATCH_LAST;
2645
2646         return(ret);
2647 }
2648
2649 static int
2650 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2651 {
2652         struct cam_eb *bus, *next_bus;
2653         int retval;
2654
2655         retval = 1;
2656
2657         mtx_lock(&xsoftc.xpt_topo_lock);
2658         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
2659              bus != NULL;
2660              bus = next_bus) {
2661                 next_bus = TAILQ_NEXT(bus, links);
2662
2663                 mtx_unlock(&xsoftc.xpt_topo_lock);
2664                 CAM_SIM_LOCK(bus->sim);
2665                 retval = tr_func(bus, arg);
2666                 CAM_SIM_UNLOCK(bus->sim);
2667                 if (retval == 0)
2668                         return(retval);
2669                 mtx_lock(&xsoftc.xpt_topo_lock);
2670         }
2671         mtx_unlock(&xsoftc.xpt_topo_lock);
2672
2673         return(retval);
2674 }
2675
2676 static int
2677 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2678                   xpt_targetfunc_t *tr_func, void *arg)
2679 {
2680         struct cam_et *target, *next_target;
2681         int retval;
2682
2683         retval = 1;
2684         for (target = (start_target ? start_target :
2685                        TAILQ_FIRST(&bus->et_entries));
2686              target != NULL; target = next_target) {
2687
2688                 next_target = TAILQ_NEXT(target, links);
2689
2690                 retval = tr_func(target, arg);
2691
2692                 if (retval == 0)
2693                         return(retval);
2694         }
2695
2696         return(retval);
2697 }
2698
2699 static int
2700 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2701                   xpt_devicefunc_t *tr_func, void *arg)
2702 {
2703         struct cam_ed *device, *next_device;
2704         int retval;
2705
2706         retval = 1;
2707         for (device = (start_device ? start_device :
2708                        TAILQ_FIRST(&target->ed_entries));
2709              device != NULL;
2710              device = next_device) {
2711
2712                 next_device = TAILQ_NEXT(device, links);
2713
2714                 retval = tr_func(device, arg);
2715
2716                 if (retval == 0)
2717                         return(retval);
2718         }
2719
2720         return(retval);
2721 }
2722
2723 static int
2724 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2725                   xpt_periphfunc_t *tr_func, void *arg)
2726 {
2727         struct cam_periph *periph, *next_periph;
2728         int retval;
2729
2730         retval = 1;
2731
2732         for (periph = (start_periph ? start_periph :
2733                        SLIST_FIRST(&device->periphs));
2734              periph != NULL;
2735              periph = next_periph) {
2736
2737                 next_periph = SLIST_NEXT(periph, periph_links);
2738
2739                 retval = tr_func(periph, arg);
2740                 if (retval == 0)
2741                         return(retval);
2742         }
2743
2744         return(retval);
2745 }
2746
2747 static int
2748 xptpdrvtraverse(struct periph_driver **start_pdrv,
2749                 xpt_pdrvfunc_t *tr_func, void *arg)
2750 {
2751         struct periph_driver **pdrv;
2752         int retval;
2753
2754         retval = 1;
2755
2756         /*
2757          * We don't traverse the peripheral driver list like we do the
2758          * other lists, because it is a linker set, and therefore cannot be
2759          * changed during runtime.  If the peripheral driver list is ever
2760          * re-done to be something other than a linker set (i.e. it can
2761          * change while the system is running), the list traversal should
2762          * be modified to work like the other traversal functions.
2763          */
2764         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2765              *pdrv != NULL; pdrv++) {
2766                 retval = tr_func(pdrv, arg);
2767
2768                 if (retval == 0)
2769                         return(retval);
2770         }
2771
2772         return(retval);
2773 }
2774
2775 static int
2776 xptpdperiphtraverse(struct periph_driver **pdrv,
2777                     struct cam_periph *start_periph,
2778                     xpt_periphfunc_t *tr_func, void *arg)
2779 {
2780         struct cam_periph *periph, *next_periph;
2781         int retval;
2782
2783         retval = 1;
2784
2785         for (periph = (start_periph ? start_periph :
2786              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2787              periph = next_periph) {
2788
2789                 next_periph = TAILQ_NEXT(periph, unit_links);
2790
2791                 retval = tr_func(periph, arg);
2792                 if (retval == 0)
2793                         return(retval);
2794         }
2795         return(retval);
2796 }
2797
2798 static int
2799 xptdefbusfunc(struct cam_eb *bus, void *arg)
2800 {
2801         struct xpt_traverse_config *tr_config;
2802
2803         tr_config = (struct xpt_traverse_config *)arg;
2804
2805         if (tr_config->depth == XPT_DEPTH_BUS) {
2806                 xpt_busfunc_t *tr_func;
2807
2808                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2809
2810                 return(tr_func(bus, tr_config->tr_arg));
2811         } else
2812                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2813 }
2814
2815 static int
2816 xptdeftargetfunc(struct cam_et *target, void *arg)
2817 {
2818         struct xpt_traverse_config *tr_config;
2819
2820         tr_config = (struct xpt_traverse_config *)arg;
2821
2822         if (tr_config->depth == XPT_DEPTH_TARGET) {
2823                 xpt_targetfunc_t *tr_func;
2824
2825                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2826
2827                 return(tr_func(target, tr_config->tr_arg));
2828         } else
2829                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2830 }
2831
2832 static int
2833 xptdefdevicefunc(struct cam_ed *device, void *arg)
2834 {
2835         struct xpt_traverse_config *tr_config;
2836
2837         tr_config = (struct xpt_traverse_config *)arg;
2838
2839         if (tr_config->depth == XPT_DEPTH_DEVICE) {
2840                 xpt_devicefunc_t *tr_func;
2841
2842                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2843
2844                 return(tr_func(device, tr_config->tr_arg));
2845         } else
2846                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2847 }
2848
2849 static int
2850 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2851 {
2852         struct xpt_traverse_config *tr_config;
2853         xpt_periphfunc_t *tr_func;
2854
2855         tr_config = (struct xpt_traverse_config *)arg;
2856
2857         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2858
2859         /*
2860          * Unlike the other default functions, we don't check for depth
2861          * here.  The peripheral driver level is the last level in the EDT,
2862          * so if we're here, we should execute the function in question.
2863          */
2864         return(tr_func(periph, tr_config->tr_arg));
2865 }
2866
2867 /*
2868  * Execute the given function for every bus in the EDT.
2869  */
2870 static int
2871 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2872 {
2873         struct xpt_traverse_config tr_config;
2874
2875         tr_config.depth = XPT_DEPTH_BUS;
2876         tr_config.tr_func = tr_func;
2877         tr_config.tr_arg = arg;
2878
2879         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2880 }
2881
2882 /*
2883  * Execute the given function for every device in the EDT.
2884  */
2885 static int
2886 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2887 {
2888         struct xpt_traverse_config tr_config;
2889
2890         tr_config.depth = XPT_DEPTH_DEVICE;
2891         tr_config.tr_func = tr_func;
2892         tr_config.tr_arg = arg;
2893
2894         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2895 }
2896
2897 static int
2898 xptsetasyncfunc(struct cam_ed *device, void *arg)
2899 {
2900         struct cam_path path;
2901         struct ccb_getdev cgd;
2902         struct async_node *cur_entry;
2903
2904         cur_entry = (struct async_node *)arg;
2905
2906         /*
2907          * Don't report unconfigured devices (Wildcard devs,
2908          * devices only for target mode, device instances
2909          * that have been invalidated but are waiting for
2910          * their last reference count to be released).
2911          */
2912         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2913                 return (1);
2914
2915         xpt_compile_path(&path,
2916                          NULL,
2917                          device->target->bus->path_id,
2918                          device->target->target_id,
2919                          device->lun_id);
2920         xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2921         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2922         xpt_action((union ccb *)&cgd);
2923         cur_entry->callback(cur_entry->callback_arg,
2924                             AC_FOUND_DEVICE,
2925                             &path, &cgd);
2926         xpt_release_path(&path);
2927
2928         return(1);
2929 }
2930
2931 static int
2932 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2933 {
2934         struct cam_path path;
2935         struct ccb_pathinq cpi;
2936         struct async_node *cur_entry;
2937
2938         cur_entry = (struct async_node *)arg;
2939
2940         xpt_compile_path(&path, /*periph*/NULL,
2941                          bus->sim->path_id,
2942                          CAM_TARGET_WILDCARD,
2943                          CAM_LUN_WILDCARD);
2944         xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2945         cpi.ccb_h.func_code = XPT_PATH_INQ;
2946         xpt_action((union ccb *)&cpi);
2947         cur_entry->callback(cur_entry->callback_arg,
2948                             AC_PATH_REGISTERED,
2949                             &path, &cpi);
2950         xpt_release_path(&path);
2951
2952         return(1);
2953 }
2954
2955 static void
2956 xpt_action_sasync_cb(void *context, int pending)
2957 {
2958         struct async_node *cur_entry;
2959         struct xpt_task *task;
2960         uint32_t added;
2961
2962         task = (struct xpt_task *)context;
2963         cur_entry = (struct async_node *)task->data1;
2964         added = task->data2;
2965
2966         if ((added & AC_FOUND_DEVICE) != 0) {
2967                 /*
2968                  * Get this peripheral up to date with all
2969                  * the currently existing devices.
2970                  */
2971                 xpt_for_all_devices(xptsetasyncfunc, cur_entry);
2972         }
2973         if ((added & AC_PATH_REGISTERED) != 0) {
2974                 /*
2975                  * Get this peripheral up to date with all
2976                  * the currently existing busses.
2977                  */
2978                 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
2979                 }
2980
2981         free(task, M_CAMXPT);
2982 }
2983
2984 void
2985 xpt_action(union ccb *start_ccb)
2986 {
2987         int iopl;
2988
2989         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2990
2991         start_ccb->ccb_h.status = CAM_REQ_INPROG;
2992
2993         iopl = splsoftcam();
2994         switch (start_ccb->ccb_h.func_code) {
2995         case XPT_SCSI_IO:
2996         {
2997                 struct cam_ed *device;
2998 #ifdef CAMDEBUG
2999                 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
3000                 struct cam_path *path;
3001
3002                 path = start_ccb->ccb_h.path;
3003 #endif
3004
3005                 /*
3006                  * For the sake of compatibility with SCSI-1
3007                  * devices that may not understand the identify
3008                  * message, we include lun information in the
3009                  * second byte of all commands.  SCSI-1 specifies
3010                  * that luns are a 3 bit value and reserves only 3
3011                  * bits for lun information in the CDB.  Later
3012                  * revisions of the SCSI spec allow for more than 8
3013                  * luns, but have deprecated lun information in the
3014                  * CDB.  So, if the lun won't fit, we must omit.
3015                  *
3016                  * Also be aware that during initial probing for devices,
3017                  * the inquiry information is unknown but initialized to 0.
3018                  * This means that this code will be exercised while probing
3019                  * devices with an ANSI revision greater than 2.
3020                  */
3021                 device = start_ccb->ccb_h.path->device;
3022                 if (device->protocol_version <= SCSI_REV_2
3023                  && start_ccb->ccb_h.target_lun < 8
3024                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
3025
3026                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
3027                             start_ccb->ccb_h.target_lun << 5;
3028                 }
3029                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
3030                 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
3031                           scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
3032                                        &path->device->inq_data),
3033                           scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
3034                                           cdb_str, sizeof(cdb_str))));
3035         }
3036         /* FALLTHROUGH */
3037         case XPT_TARGET_IO:
3038         case XPT_CONT_TARGET_IO:
3039                 start_ccb->csio.sense_resid = 0;
3040                 start_ccb->csio.resid = 0;
3041                 /* FALLTHROUGH */
3042         case XPT_RESET_DEV:
3043         case XPT_ENG_EXEC:
3044         {
3045                 struct cam_path *path;
3046                 struct cam_sim *sim;
3047                 int s;
3048                 int runq;
3049
3050                 path = start_ccb->ccb_h.path;
3051                 s = splsoftcam();
3052
3053                 sim = path->bus->sim;
3054                 if (SIM_DEAD(sim)) {
3055                         /* The SIM has gone; just execute the CCB directly. */
3056                         cam_ccbq_send_ccb(&path->device->ccbq, start_ccb);
3057                         (*(sim->sim_action))(sim, start_ccb);
3058                         splx(s);
3059                         break;
3060                 }
3061
3062                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
3063                 if (path->device->qfrozen_cnt == 0)
3064                         runq = xpt_schedule_dev_sendq(path->bus, path->device);
3065                 else
3066                         runq = 0;
3067                 splx(s);
3068                 if (runq != 0)
3069                         xpt_run_dev_sendq(path->bus);
3070                 break;
3071         }
3072         case XPT_SET_TRAN_SETTINGS:
3073         {
3074                 xpt_set_transfer_settings(&start_ccb->cts,
3075                                           start_ccb->ccb_h.path->device,
3076                                           /*async_update*/FALSE);
3077                 break;
3078         }
3079         case XPT_CALC_GEOMETRY:
3080         {
3081                 struct cam_sim *sim;
3082
3083                 /* Filter out garbage */
3084                 if (start_ccb->ccg.block_size == 0
3085                  || start_ccb->ccg.volume_size == 0) {
3086                         start_ccb->ccg.cylinders = 0;
3087                         start_ccb->ccg.heads = 0;
3088                         start_ccb->ccg.secs_per_track = 0;
3089                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3090                         break;
3091                 }
3092 #ifdef PC98
3093                 /*
3094                  * In a PC-98 system, geometry translation depens on
3095                  * the "real" device geometry obtained from mode page 4.
3096                  * SCSI geometry translation is performed in the
3097                  * initialization routine of the SCSI BIOS and the result
3098                  * stored in host memory.  If the translation is available
3099                  * in host memory, use it.  If not, rely on the default
3100                  * translation the device driver performs.
3101                  */
3102                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
3103                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3104                         break;
3105                 }
3106 #endif
3107                 sim = start_ccb->ccb_h.path->bus->sim;
3108                 (*(sim->sim_action))(sim, start_ccb);
3109                 break;
3110         }
3111         case XPT_ABORT:
3112         {
3113                 union ccb* abort_ccb;
3114                 int s;                          
3115
3116                 abort_ccb = start_ccb->cab.abort_ccb;
3117                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
3118
3119                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
3120                                 struct cam_ccbq *ccbq;
3121
3122                                 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
3123                                 cam_ccbq_remove_ccb(ccbq, abort_ccb);
3124                                 abort_ccb->ccb_h.status =
3125                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3126                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3127                                 s = splcam();
3128                                 xpt_done(abort_ccb);
3129                                 splx(s);
3130                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3131                                 break;
3132                         }
3133                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
3134                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
3135                                 /*
3136                                  * We've caught this ccb en route to
3137                                  * the SIM.  Flag it for abort and the
3138                                  * SIM will do so just before starting
3139                                  * real work on the CCB.
3140                                  */
3141                                 abort_ccb->ccb_h.status =
3142                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3143                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3144                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3145                                 break;
3146                         }
3147                 } 
3148                 if (XPT_FC_IS_QUEUED(abort_ccb)
3149                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
3150                         /*
3151                          * It's already completed but waiting
3152                          * for our SWI to get to it.
3153                          */
3154                         start_ccb->ccb_h.status = CAM_UA_ABORT;
3155                         break;
3156                 }
3157                 /*
3158                  * If we weren't able to take care of the abort request
3159                  * in the XPT, pass the request down to the SIM for processing.
3160                  */
3161         }
3162         /* FALLTHROUGH */
3163         case XPT_ACCEPT_TARGET_IO:
3164         case XPT_EN_LUN:
3165         case XPT_IMMED_NOTIFY:
3166         case XPT_NOTIFY_ACK:
3167         case XPT_GET_TRAN_SETTINGS:
3168         case XPT_RESET_BUS:
3169         {
3170                 struct cam_sim *sim;
3171
3172                 sim = start_ccb->ccb_h.path->bus->sim;
3173                 (*(sim->sim_action))(sim, start_ccb);
3174                 break;
3175         }
3176         case XPT_PATH_INQ:
3177         {
3178                 struct cam_sim *sim;
3179
3180                 sim = start_ccb->ccb_h.path->bus->sim;
3181                 (*(sim->sim_action))(sim, start_ccb);
3182                 break;
3183         }
3184         case XPT_PATH_STATS:
3185                 start_ccb->cpis.last_reset =
3186                         start_ccb->ccb_h.path->bus->last_reset;
3187                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3188                 break;
3189         case XPT_GDEV_TYPE:
3190         {
3191                 struct cam_ed *dev;
3192                 int s;
3193
3194                 dev = start_ccb->ccb_h.path->device;
3195                 s = splcam();
3196                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3197                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3198                 } else {
3199                         struct ccb_getdev *cgd;
3200                         struct cam_eb *bus;
3201                         struct cam_et *tar;
3202
3203                         cgd = &start_ccb->cgd;
3204                         bus = cgd->ccb_h.path->bus;
3205                         tar = cgd->ccb_h.path->target;
3206                         cgd->inq_data = dev->inq_data;
3207                         cgd->ccb_h.status = CAM_REQ_CMP;
3208                         cgd->serial_num_len = dev->serial_num_len;
3209                         if ((dev->serial_num_len > 0)
3210                          && (dev->serial_num != NULL))
3211                                 bcopy(dev->serial_num, cgd->serial_num,
3212                                       dev->serial_num_len);
3213                 }
3214                 splx(s);
3215                 break; 
3216         }
3217         case XPT_GDEV_STATS:
3218         {
3219                 struct cam_ed *dev;
3220                 int s;
3221
3222                 dev = start_ccb->ccb_h.path->device;
3223                 s = splcam();
3224                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3225                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3226                 } else {
3227                         struct ccb_getdevstats *cgds;
3228                         struct cam_eb *bus;
3229                         struct cam_et *tar;
3230
3231                         cgds = &start_ccb->cgds;
3232                         bus = cgds->ccb_h.path->bus;
3233                         tar = cgds->ccb_h.path->target;
3234                         cgds->dev_openings = dev->ccbq.dev_openings;
3235                         cgds->dev_active = dev->ccbq.dev_active;
3236                         cgds->devq_openings = dev->ccbq.devq_openings;
3237                         cgds->devq_queued = dev->ccbq.queue.entries;
3238                         cgds->held = dev->ccbq.held;
3239                         cgds->last_reset = tar->last_reset;
3240                         cgds->maxtags = dev->quirk->maxtags;
3241                         cgds->mintags = dev->quirk->mintags;
3242                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3243                                 cgds->last_reset = bus->last_reset;
3244                         cgds->ccb_h.status = CAM_REQ_CMP;
3245                 }
3246                 splx(s);
3247                 break;
3248         }
3249         case XPT_GDEVLIST:
3250         {
3251                 struct cam_periph       *nperiph;
3252                 struct periph_list      *periph_head;
3253                 struct ccb_getdevlist   *cgdl;
3254                 u_int                   i;
3255                 int                     s;
3256                 struct cam_ed           *device;
3257                 int                     found;
3258
3259
3260                 found = 0;
3261
3262                 /*
3263                  * Don't want anyone mucking with our data.
3264                  */
3265                 s = splcam();
3266                 device = start_ccb->ccb_h.path->device;
3267                 periph_head = &device->periphs;
3268                 cgdl = &start_ccb->cgdl;
3269
3270                 /*
3271                  * Check and see if the list has changed since the user
3272                  * last requested a list member.  If so, tell them that the
3273                  * list has changed, and therefore they need to start over 
3274                  * from the beginning.
3275                  */
3276                 if ((cgdl->index != 0) && 
3277                     (cgdl->generation != device->generation)) {
3278                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3279                         splx(s);
3280                         break;
3281                 }
3282
3283                 /*
3284                  * Traverse the list of peripherals and attempt to find 
3285                  * the requested peripheral.
3286                  */
3287                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
3288                      (nperiph != NULL) && (i <= cgdl->index);
3289                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
3290                         if (i == cgdl->index) {
3291                                 strncpy(cgdl->periph_name,
3292                                         nperiph->periph_name,
3293                                         DEV_IDLEN);
3294                                 cgdl->unit_number = nperiph->unit_number;
3295                                 found = 1;
3296                         }
3297                 }
3298                 if (found == 0) {
3299                         cgdl->status = CAM_GDEVLIST_ERROR;
3300                         splx(s);
3301                         break;
3302                 }
3303
3304                 if (nperiph == NULL)
3305                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3306                 else
3307                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3308
3309                 cgdl->index++;
3310                 cgdl->generation = device->generation;
3311
3312                 splx(s);
3313                 cgdl->ccb_h.status = CAM_REQ_CMP;
3314                 break;
3315         }
3316         case XPT_DEV_MATCH:
3317         {
3318                 int s;
3319                 dev_pos_type position_type;
3320                 struct ccb_dev_match *cdm;
3321
3322                 cdm = &start_ccb->cdm;
3323
3324                 /*
3325                  * Prevent EDT changes while we traverse it.
3326                  */
3327                 s = splcam();
3328                 /*
3329                  * There are two ways of getting at information in the EDT.
3330                  * The first way is via the primary EDT tree.  It starts
3331                  * with a list of busses, then a list of targets on a bus,
3332                  * then devices/luns on a target, and then peripherals on a
3333                  * device/lun.  The "other" way is by the peripheral driver
3334                  * lists.  The peripheral driver lists are organized by
3335                  * peripheral driver.  (obviously)  So it makes sense to
3336                  * use the peripheral driver list if the user is looking
3337                  * for something like "da1", or all "da" devices.  If the
3338                  * user is looking for something on a particular bus/target
3339                  * or lun, it's generally better to go through the EDT tree.
3340                  */
3341
3342                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3343                         position_type = cdm->pos.position_type;
3344                 else {
3345                         u_int i;
3346
3347                         position_type = CAM_DEV_POS_NONE;
3348
3349                         for (i = 0; i < cdm->num_patterns; i++) {
3350                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3351                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3352                                         position_type = CAM_DEV_POS_EDT;
3353                                         break;
3354                                 }
3355                         }
3356
3357                         if (cdm->num_patterns == 0)
3358                                 position_type = CAM_DEV_POS_EDT;
3359                         else if (position_type == CAM_DEV_POS_NONE)
3360                                 position_type = CAM_DEV_POS_PDRV;
3361                 }
3362
3363                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3364                 case CAM_DEV_POS_EDT:
3365                         xptedtmatch(cdm);
3366                         break;
3367                 case CAM_DEV_POS_PDRV:
3368                         xptperiphlistmatch(cdm);
3369                         break;
3370                 default:
3371                         cdm->status = CAM_DEV_MATCH_ERROR;
3372                         break;
3373                 }
3374
3375                 splx(s);
3376
3377                 if (cdm->status == CAM_DEV_MATCH_ERROR)
3378                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3379                 else
3380                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3381
3382                 break;
3383         }
3384         case XPT_SASYNC_CB:
3385         {
3386                 struct ccb_setasync *csa;
3387                 struct async_node *cur_entry;
3388                 struct async_list *async_head;
3389                 u_int32_t added;
3390
3391                 csa = &start_ccb->csa;
3392                 added = csa->event_enable;
3393                 async_head = &csa->ccb_h.path->device->asyncs;
3394
3395                 /*
3396                  * If there is already an entry for us, simply
3397                  * update it.
3398                  */
3399                 cur_entry = SLIST_FIRST(async_head);
3400                 while (cur_entry != NULL) {
3401                         if ((cur_entry->callback_arg == csa->callback_arg)
3402                          && (cur_entry->callback == csa->callback))
3403                                 break;
3404                         cur_entry = SLIST_NEXT(cur_entry, links);
3405                 }
3406
3407                 if (cur_entry != NULL) {
3408                         /*
3409                          * If the request has no flags set,
3410                          * remove the entry.
3411                          */
3412                         added &= ~cur_entry->event_enable;
3413                         if (csa->event_enable == 0) {
3414                                 SLIST_REMOVE(async_head, cur_entry,
3415                                              async_node, links);
3416                                 csa->ccb_h.path->device->refcount--;
3417                                 free(cur_entry, M_CAMXPT);
3418                         } else {
3419                                 cur_entry->event_enable = csa->event_enable;
3420                         }
3421                 } else {
3422                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
3423                                            M_NOWAIT);
3424                         if (cur_entry == NULL) {
3425                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3426                                 break;
3427                         }
3428                         cur_entry->event_enable = csa->event_enable;
3429                         cur_entry->callback_arg = csa->callback_arg;
3430                         cur_entry->callback = csa->callback;
3431                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
3432                         csa->ccb_h.path->device->refcount++;
3433                 }
3434
3435                 /*
3436                  * Need to decouple this operation via a taqskqueue so that
3437                  * the locking doesn't become a mess.
3438                  */
3439                 if ((added & (AC_FOUND_DEVICE | AC_PATH_REGISTERED)) != 0) {
3440                         struct xpt_task *task;
3441
3442                         task = malloc(sizeof(struct xpt_task), M_CAMXPT,
3443                                       M_NOWAIT);
3444                         if (task == NULL) {
3445                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3446                                 break;
3447                         }
3448
3449                         TASK_INIT(&task->task, 0, xpt_action_sasync_cb, task);
3450                         task->data1 = cur_entry;
3451                         task->data2 = added;
3452                         taskqueue_enqueue(taskqueue_thread, &task->task);
3453                 }
3454
3455                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3456                 break;
3457         }
3458         case XPT_REL_SIMQ:
3459         {
3460                 struct ccb_relsim *crs;
3461                 struct cam_ed *dev;
3462                 int s;
3463
3464                 crs = &start_ccb->crs;
3465                 dev = crs->ccb_h.path->device;
3466                 if (dev == NULL) {
3467
3468                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
3469                         break;
3470                 }
3471
3472                 s = splcam();
3473
3474                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3475
3476                         if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
3477                                 /* Don't ever go below one opening */
3478                                 if (crs->openings > 0) {
3479                                         xpt_dev_ccbq_resize(crs->ccb_h.path,
3480                                                             crs->openings);
3481
3482                                         if (bootverbose) {
3483                                                 xpt_print(crs->ccb_h.path,
3484                                                     "tagged openings now %d\n",
3485                                                     crs->openings);
3486                                         }
3487                                 }
3488                         }
3489                 }
3490
3491                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3492
3493                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3494
3495                                 /*
3496                                  * Just extend the old timeout and decrement
3497                                  * the freeze count so that a single timeout
3498                                  * is sufficient for releasing the queue.
3499                                  */
3500                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3501                                 callout_stop(&dev->callout);
3502                         } else {
3503
3504                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3505                         }
3506
3507                         callout_reset(&dev->callout,
3508                             (crs->release_timeout * hz) / 1000,
3509                             xpt_release_devq_timeout, dev);
3510
3511                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3512
3513                 }
3514
3515                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3516
3517                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3518                                 /*
3519                                  * Decrement the freeze count so that a single
3520                                  * completion is still sufficient to unfreeze
3521                                  * the queue.
3522                                  */
3523                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3524                         } else {
3525                                 
3526                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3527                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3528                         }
3529                 }
3530
3531                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3532
3533                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3534                          || (dev->ccbq.dev_active == 0)) {
3535
3536                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3537                         } else {
3538                                 
3539                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3540                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3541                         }
3542                 }
3543                 splx(s);
3544                 
3545                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3546
3547                         xpt_release_devq(crs->ccb_h.path, /*count*/1,
3548                                          /*run_queue*/TRUE);
3549                 }
3550                 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3551                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3552                 break;
3553         }
3554         case XPT_SCAN_BUS:
3555                 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3556                 break;
3557         case XPT_SCAN_LUN:
3558                 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3559                              start_ccb->ccb_h.path, start_ccb->crcn.flags,
3560                              start_ccb);
3561                 break;
3562         case XPT_DEBUG: {
3563 #ifdef CAMDEBUG
3564                 int s;
3565                 
3566                 s = splcam();
3567 #ifdef CAM_DEBUG_DELAY
3568                 cam_debug_delay = CAM_DEBUG_DELAY;
3569 #endif
3570                 cam_dflags = start_ccb->cdbg.flags;
3571                 if (cam_dpath != NULL) {
3572                         xpt_free_path(cam_dpath);
3573                         cam_dpath = NULL;
3574                 }
3575
3576                 if (cam_dflags != CAM_DEBUG_NONE) {
3577                         if (xpt_create_path(&cam_dpath, xpt_periph,
3578                                             start_ccb->ccb_h.path_id,
3579                                             start_ccb->ccb_h.target_id,
3580                                             start_ccb->ccb_h.target_lun) !=
3581                                             CAM_REQ_CMP) {
3582                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3583                                 cam_dflags = CAM_DEBUG_NONE;
3584                         } else {
3585                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3586                                 xpt_print(cam_dpath, "debugging flags now %x\n",
3587                                     cam_dflags);
3588                         }
3589                 } else {
3590                         cam_dpath = NULL;
3591                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3592                 }
3593                 splx(s);
3594 #else /* !CAMDEBUG */
3595                 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3596 #endif /* CAMDEBUG */
3597                 break;
3598         }
3599         case XPT_NOOP:
3600                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3601                         xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3602                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3603                 break;
3604         default:
3605         case XPT_SDEV_TYPE:
3606         case XPT_TERM_IO:
3607         case XPT_ENG_INQ:
3608                 /* XXX Implement */
3609                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3610                 break;
3611         }
3612         splx(iopl);
3613 }
3614
3615 void
3616 xpt_polled_action(union ccb *start_ccb)
3617 {
3618         int       s;
3619         u_int32_t timeout;
3620         struct    cam_sim *sim; 
3621         struct    cam_devq *devq;
3622         struct    cam_ed *dev;
3623
3624
3625         timeout = start_ccb->ccb_h.timeout;
3626         sim = start_ccb->ccb_h.path->bus->sim;
3627         devq = sim->devq;
3628         dev = start_ccb->ccb_h.path->device;
3629
3630         mtx_assert(sim->mtx, MA_OWNED);
3631         s = splcam();
3632
3633         /*
3634          * Steal an opening so that no other queued requests
3635          * can get it before us while we simulate interrupts.
3636          */
3637         dev->ccbq.devq_openings--;
3638         dev->ccbq.dev_openings--;       
3639         
3640         while(((devq != NULL && devq->send_openings <= 0) ||
3641            dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
3642                 DELAY(1000);
3643                 (*(sim->sim_poll))(sim);
3644                 camisr_runqueue(&sim->sim_doneq);
3645         }
3646         
3647         dev->ccbq.devq_openings++;
3648         dev->ccbq.dev_openings++;
3649         
3650         if (timeout != 0) {
3651                 xpt_action(start_ccb);
3652                 while(--timeout > 0) {
3653                         (*(sim->sim_poll))(sim);
3654                         camisr_runqueue(&sim->sim_doneq);
3655                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3656                             != CAM_REQ_INPROG)
3657                                 break;
3658                         DELAY(1000);
3659                 }
3660                 if (timeout == 0) {
3661                         /*
3662                          * XXX Is it worth adding a sim_timeout entry
3663                          * point so we can attempt recovery?  If
3664                          * this is only used for dumps, I don't think
3665                          * it is.
3666                          */
3667                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3668                 }
3669         } else {
3670                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3671         }
3672         splx(s);
3673 }
3674         
3675 /*
3676  * Schedule a peripheral driver to receive a ccb when it's
3677  * target device has space for more transactions.
3678  */
3679 void
3680 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3681 {
3682         struct cam_ed *device;
3683         union ccb *work_ccb;
3684         int s;
3685         int runq;
3686
3687         mtx_assert(perph->sim->mtx, MA_OWNED);
3688
3689         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3690         device = perph->path->device;
3691         s = splsoftcam();
3692         if (periph_is_queued(perph)) {
3693                 /* Simply reorder based on new priority */
3694                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3695                           ("   change priority to %d\n", new_priority));
3696                 if (new_priority < perph->pinfo.priority) {
3697                         camq_change_priority(&device->drvq,
3698                                              perph->pinfo.index,
3699                                              new_priority);
3700                 }
3701                 runq = 0;
3702         } else if (SIM_DEAD(perph->path->bus->sim)) {
3703                 /* The SIM is gone so just call periph_start directly. */
3704                 work_ccb = xpt_get_ccb(perph->path->device);
3705                 splx(s);
3706                 if (work_ccb == NULL)
3707                         return; /* XXX */
3708                 xpt_setup_ccb(&work_ccb->ccb_h, perph->path, new_priority);
3709                 perph->pinfo.priority = new_priority;
3710                 perph->periph_start(perph, work_ccb);
3711                 return;
3712         } else {
3713                 /* New entry on the queue */
3714                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3715                           ("   added periph to queue\n"));
3716                 perph->pinfo.priority = new_priority;
3717                 perph->pinfo.generation = ++device->drvq.generation;
3718                 camq_insert(&device->drvq, &perph->pinfo);
3719                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3720         }
3721         splx(s);
3722         if (runq != 0) {
3723                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3724                           ("   calling xpt_run_devq\n"));
3725                 xpt_run_dev_allocq(perph->path->bus);
3726         }
3727 }
3728
3729
3730 /*
3731  * Schedule a device to run on a given queue.
3732  * If the device was inserted as a new entry on the queue,
3733  * return 1 meaning the device queue should be run. If we
3734  * were already queued, implying someone else has already
3735  * started the queue, return 0 so the caller doesn't attempt
3736  * to run the queue.  Must be run at either splsoftcam
3737  * (or splcam since that encompases splsoftcam).
3738  */
3739 static int
3740 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3741                  u_int32_t new_priority)
3742 {
3743         int retval;
3744         u_int32_t old_priority;
3745
3746         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3747
3748         old_priority = pinfo->priority;
3749
3750         /*
3751          * Are we already queued?
3752          */
3753         if (pinfo->index != CAM_UNQUEUED_INDEX) {
3754                 /* Simply reorder based on new priority */
3755                 if (new_priority < old_priority) {
3756                         camq_change_priority(queue, pinfo->index,
3757                                              new_priority);
3758                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3759                                         ("changed priority to %d\n",
3760                                          new_priority));
3761                 }
3762                 retval = 0;
3763         } else {
3764                 /* New entry on the queue */
3765                 if (new_priority < old_priority)
3766                         pinfo->priority = new_priority;
3767
3768                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3769                                 ("Inserting onto queue\n"));
3770                 pinfo->generation = ++queue->generation;
3771                 camq_insert(queue, pinfo);
3772                 retval = 1;
3773         }
3774         return (retval);
3775 }
3776
3777 static void
3778 xpt_run_dev_allocq(struct cam_eb *bus)
3779 {
3780         struct  cam_devq *devq;
3781         int     s;
3782
3783         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3784         devq = bus->sim->devq;
3785
3786         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3787                         ("   qfrozen_cnt == 0x%x, entries == %d, "
3788                          "openings == %d, active == %d\n",
3789                          devq->alloc_queue.qfrozen_cnt,
3790                          devq->alloc_queue.entries,
3791                          devq->alloc_openings,
3792                          devq->alloc_active));
3793
3794         s = splsoftcam();
3795         devq->alloc_queue.qfrozen_cnt++;
3796         while ((devq->alloc_queue.entries > 0)
3797             && (devq->alloc_openings > 0)
3798             && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3799                 struct  cam_ed_qinfo *qinfo;
3800                 struct  cam_ed *device;
3801                 union   ccb *work_ccb;
3802                 struct  cam_periph *drv;
3803                 struct  camq *drvq;
3804                 
3805                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3806                                                            CAMQ_HEAD);
3807                 device = qinfo->device;
3808
3809                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3810                                 ("running device %p\n", device));
3811
3812                 drvq = &device->drvq;
3813
3814 #ifdef CAMDEBUG
3815                 if (drvq->entries <= 0) {
3816                         panic("xpt_run_dev_allocq: "
3817                               "Device on queue without any work to do");
3818                 }
3819 #endif
3820                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3821                         devq->alloc_openings--;
3822                         devq->alloc_active++;
3823                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3824                         splx(s);
3825                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3826                                       drv->pinfo.priority);
3827                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3828                                         ("calling periph start\n"));
3829                         drv->periph_start(drv, work_ccb);
3830                 } else {
3831                         /*
3832                          * Malloc failure in alloc_ccb
3833                          */
3834                         /*
3835                          * XXX add us to a list to be run from free_ccb
3836                          * if we don't have any ccbs active on this
3837                          * device queue otherwise we may never get run
3838                          * again.
3839                          */
3840                         break;
3841                 }
3842         
3843                 /* Raise IPL for possible insertion and test at top of loop */
3844                 s = splsoftcam();
3845
3846                 if (drvq->entries > 0) {
3847                         /* We have more work.  Attempt to reschedule */
3848                         xpt_schedule_dev_allocq(bus, device);
3849                 }
3850         }
3851         devq->alloc_queue.qfrozen_cnt--;
3852         splx(s);
3853 }
3854
3855 static void
3856 xpt_run_dev_sendq(struct cam_eb *bus)
3857 {
3858         struct  cam_devq *devq;
3859         int     s;
3860
3861         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3862         
3863         devq = bus->sim->devq;
3864
3865         s = splcam();
3866         devq->send_queue.qfrozen_cnt++;
3867         splx(s);
3868         s = splsoftcam();
3869         while ((devq->send_queue.entries > 0)
3870             && (devq->send_openings > 0)) {
3871                 struct  cam_ed_qinfo *qinfo;
3872                 struct  cam_ed *device;
3873                 union ccb *work_ccb;
3874                 struct  cam_sim *sim;
3875                 int     ospl;
3876
3877                 ospl = splcam();
3878                 if (devq->send_queue.qfrozen_cnt > 1) {
3879                         splx(ospl);
3880                         break;
3881                 }
3882
3883                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3884                                                            CAMQ_HEAD);
3885                 device = qinfo->device;
3886
3887                 /*
3888                  * If the device has been "frozen", don't attempt
3889                  * to run it.
3890                  */
3891                 if (device->qfrozen_cnt > 0) {
3892                         splx(ospl);
3893                         continue;
3894                 }
3895
3896                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3897                                 ("running device %p\n", device));
3898
3899                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3900                 if (work_ccb == NULL) {
3901                         printf("device on run queue with no ccbs???\n");
3902                         splx(ospl);
3903                         continue;
3904                 }
3905
3906                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3907
3908                         mtx_lock(&xsoftc.xpt_lock);
3909                         if (xsoftc.num_highpower <= 0) {
3910                                 /*
3911                                  * We got a high power command, but we
3912                                  * don't have any available slots.  Freeze
3913                                  * the device queue until we have a slot
3914                                  * available.
3915                                  */
3916                                 device->qfrozen_cnt++;
3917                                 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, 
3918                                                    &work_ccb->ccb_h, 
3919                                                    xpt_links.stqe);
3920
3921                                 splx(ospl);
3922                                 continue;
3923                         } else {
3924                                 /*
3925                                  * Consume a high power slot while
3926                                  * this ccb runs.
3927                                  */
3928                                 xsoftc.num_highpower--;
3929                         }
3930                         mtx_unlock(&xsoftc.xpt_lock);
3931                 }
3932                 devq->active_dev = device;
3933                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3934
3935                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3936                 splx(ospl);
3937
3938                 devq->send_openings--;
3939                 devq->send_active++;            
3940                 
3941                 if (device->ccbq.queue.entries > 0)
3942                         xpt_schedule_dev_sendq(bus, device);
3943
3944                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3945                         /*
3946                          * The client wants to freeze the queue
3947                          * after this CCB is sent.
3948                          */
3949                         ospl = splcam();
3950                         device->qfrozen_cnt++;
3951                         splx(ospl);
3952                 }
3953                 
3954                 splx(s);
3955
3956                 /* In Target mode, the peripheral driver knows best... */
3957                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3958                         if ((device->inq_flags & SID_CmdQue) != 0
3959                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3960                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3961                         else
3962                                 /*
3963                                  * Clear this in case of a retried CCB that
3964                                  * failed due to a rejected tag.
3965                                  */
3966                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3967                 }
3968
3969                 /*
3970                  * Device queues can be shared among multiple sim instances
3971                  * that reside on different busses.  Use the SIM in the queue
3972                  * CCB's path, rather than the one in the bus that was passed
3973                  * into this function.
3974                  */
3975                 sim = work_ccb->ccb_h.path->bus->sim;
3976                 (*(sim->sim_action))(sim, work_ccb);
3977
3978                 ospl = splcam();
3979                 devq->active_dev = NULL;
3980                 splx(ospl);
3981                 /* Raise IPL for possible insertion and test at top of loop */
3982                 s = splsoftcam();
3983         }
3984         splx(s);
3985         s = splcam();
3986         devq->send_queue.qfrozen_cnt--;
3987         splx(s);
3988 }
3989
3990 /*
3991  * This function merges stuff from the slave ccb into the master ccb, while
3992  * keeping important fields in the master ccb constant.
3993  */
3994 void
3995 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3996 {
3997
3998         /*
3999          * Pull fields that are valid for peripheral drivers to set
4000          * into the master CCB along with the CCB "payload".
4001          */
4002         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
4003         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
4004         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
4005         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
4006         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
4007               sizeof(union ccb) - sizeof(struct ccb_hdr));
4008 }
4009
4010 void
4011 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
4012 {
4013
4014         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
4015         ccb_h->pinfo.priority = priority;
4016         ccb_h->path = path;
4017         ccb_h->path_id = path->bus->path_id;
4018         if (path->target)
4019                 ccb_h->target_id = path->target->target_id;
4020         else
4021                 ccb_h->target_id = CAM_TARGET_WILDCARD;
4022         if (path->device) {
4023                 ccb_h->target_lun = path->device->lun_id;
4024                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
4025         } else {
4026                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
4027         }
4028         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
4029         ccb_h->flags = 0;
4030 }
4031
4032 /* Path manipulation functions */
4033 cam_status
4034 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
4035                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
4036 {
4037         struct     cam_path *path;
4038         cam_status status;
4039
4040         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
4041
4042         if (path == NULL) {
4043                 status = CAM_RESRC_UNAVAIL;
4044                 return(status);
4045         }
4046         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
4047         if (status != CAM_REQ_CMP) {
4048                 free(path, M_CAMXPT);
4049                 path = NULL;
4050         }
4051         *new_path_ptr = path;
4052         return (status);
4053 }
4054
4055 cam_status
4056 xpt_create_path_unlocked(struct cam_path **new_path_ptr,
4057                          struct cam_periph *periph, path_id_t path_id,
4058                          target_id_t target_id, lun_id_t lun_id)
4059 {
4060         struct     cam_path *path;
4061         struct     cam_eb *bus = NULL;
4062         cam_status status;
4063         int        need_unlock = 0;
4064
4065         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_WAITOK);
4066
4067         if (path_id != CAM_BUS_WILDCARD) {
4068                 bus = xpt_find_bus(path_id);
4069                 if (bus != NULL) {
4070                         need_unlock = 1;
4071                         CAM_SIM_LOCK(bus->sim);
4072                 }
4073         }
4074         status = xpt_compile_path(path, periph, path_id, target_id, lun_id);
4075         if (need_unlock)
4076                 CAM_SIM_UNLOCK(bus->sim);
4077         if (status != CAM_REQ_CMP) {
4078                 free(path, M_CAMXPT);
4079                 path = NULL;
4080         }
4081         *new_path_ptr = path;
4082         return (status);
4083 }
4084
4085 static cam_status
4086 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
4087                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
4088 {
4089         struct       cam_eb *bus;
4090         struct       cam_et *target;
4091         struct       cam_ed *device;
4092         cam_status   status;
4093         int          s;
4094
4095         status = CAM_REQ_CMP;   /* Completed without error */
4096         target = NULL;          /* Wildcarded */
4097         device = NULL;          /* Wildcarded */
4098
4099         /*
4100          * We will potentially modify the EDT, so block interrupts
4101          * that may attempt to create cam paths.
4102          */
4103         s = splcam();
4104         bus = xpt_find_bus(path_id);
4105         if (bus == NULL) {
4106                 status = CAM_PATH_INVALID;
4107         } else {
4108                 target = xpt_find_target(bus, target_id);
4109                 if (target == NULL) {
4110                         /* Create one */
4111                         struct cam_et *new_target;
4112
4113                         new_target = xpt_alloc_target(bus, target_id);
4114                         if (new_target == NULL) {
4115                                 status = CAM_RESRC_UNAVAIL;
4116                         } else {
4117                                 target = new_target;
4118                         }
4119                 }
4120                 if (target != NULL) {
4121                         device = xpt_find_device(target, lun_id);
4122                         if (device == NULL) {
4123                                 /* Create one */
4124                                 struct cam_ed *new_device;
4125
4126                                 new_device = xpt_alloc_device(bus,
4127                                                               target,
4128                                                               lun_id);
4129                                 if (new_device == NULL) {
4130                                         status = CAM_RESRC_UNAVAIL;
4131                                 } else {
4132                                         device = new_device;
4133                                 }
4134                         }
4135                 }
4136         }
4137         splx(s);
4138
4139         /*
4140          * Only touch the user's data if we are successful.
4141          */
4142         if (status == CAM_REQ_CMP) {
4143                 new_path->periph = perph;
4144                 new_path->bus = bus;
4145                 new_path->target = target;
4146                 new_path->device = device;
4147                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
4148         } else {
4149                 if (device != NULL)
4150                         xpt_release_device(bus, target, device);
4151                 if (target != NULL)
4152                         xpt_release_target(bus, target);
4153                 if (bus != NULL)
4154                         xpt_release_bus(bus);
4155         }
4156         return (status);
4157 }
4158
4159 static void
4160 xpt_release_path(struct cam_path *path)
4161 {
4162         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
4163         if (path->device != NULL) {
4164                 xpt_release_device(path->bus, path->target, path->device);
4165                 path->device = NULL;
4166         }
4167         if (path->target != NULL) {
4168                 xpt_release_target(path->bus, path->target);
4169                 path->target = NULL;
4170         }
4171         if (path->bus != NULL) {
4172                 xpt_release_bus(path->bus);
4173                 path->bus = NULL;
4174         }
4175 }
4176
4177 void
4178 xpt_free_path(struct cam_path *path)
4179 {
4180
4181         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
4182         xpt_release_path(path);
4183         free(path, M_CAMXPT);
4184 }
4185
4186
4187 /*
4188  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
4189  * in path1, 2 for match with wildcards in path2.
4190  */
4191 int
4192 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
4193 {
4194         int retval = 0;
4195
4196         if (path1->bus != path2->bus) {
4197                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
4198                         retval = 1;
4199                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
4200                         retval = 2;
4201                 else
4202                         return (-1);
4203         }
4204         if (path1->target != path2->target) {
4205                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
4206                         if (retval == 0)
4207                                 retval = 1;
4208                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
4209                         retval = 2;
4210                 else
4211                         return (-1);
4212         }
4213         if (path1->device != path2->device) {
4214                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
4215                         if (retval == 0)
4216                                 retval = 1;
4217                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
4218                         retval = 2;
4219                 else
4220                         return (-1);
4221         }
4222         return (retval);
4223 }
4224
4225 void
4226 xpt_print_path(struct cam_path *path)
4227 {
4228         mtx_assert(path->bus->sim->mtx, MA_OWNED);
4229
4230         if (path == NULL)
4231                 printf("(nopath): ");
4232         else {
4233                 if (path->periph != NULL)
4234                         printf("(%s%d:", path->periph->periph_name,
4235                                path->periph->unit_number);
4236                 else
4237                         printf("(noperiph:");
4238
4239                 if (path->bus != NULL)
4240                         printf("%s%d:%d:", path->bus->sim->sim_name,
4241                                path->bus->sim->unit_number,
4242                                path->bus->sim->bus_id);
4243                 else
4244                         printf("nobus:");
4245
4246                 if (path->target != NULL)
4247                         printf("%d:", path->target->target_id);
4248                 else
4249                         printf("X:");
4250
4251                 if (path->device != NULL)
4252                         printf("%d): ", path->device->lun_id);
4253                 else
4254                         printf("X): ");
4255         }
4256 }
4257
4258 void
4259 xpt_print(struct cam_path *path, const char *fmt, ...)
4260 {
4261         va_list ap;
4262         xpt_print_path(path);
4263         va_start(ap, fmt);
4264         vprintf(fmt, ap);
4265         va_end(ap);
4266 }
4267
4268 int
4269 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
4270 {
4271         struct sbuf sb;
4272
4273         mtx_assert(path->bus->sim->mtx, MA_OWNED);
4274
4275         sbuf_new(&sb, str, str_len, 0);
4276
4277         if (path == NULL)
4278                 sbuf_printf(&sb, "(nopath): ");
4279         else {
4280                 if (path->periph != NULL)
4281                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
4282                                     path->periph->unit_number);
4283                 else
4284                         sbuf_printf(&sb, "(noperiph:");
4285
4286                 if (path->bus != NULL)
4287                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
4288                                     path->bus->sim->unit_number,
4289                                     path->bus->sim->bus_id);
4290                 else
4291                         sbuf_printf(&sb, "nobus:");
4292
4293                 if (path->target != NULL)
4294                         sbuf_printf(&sb, "%d:", path->target->target_id);
4295                 else
4296                         sbuf_printf(&sb, "X:");
4297
4298                 if (path->device != NULL)
4299                         sbuf_printf(&sb, "%d): ", path->device->lun_id);
4300                 else
4301                         sbuf_printf(&sb, "X): ");
4302         }
4303         sbuf_finish(&sb);
4304
4305         return(sbuf_len(&sb));
4306 }
4307
4308 path_id_t
4309 xpt_path_path_id(struct cam_path *path)
4310 {
4311         mtx_assert(path->bus->sim->mtx, MA_OWNED);
4312
4313         return(path->bus->path_id);
4314 }
4315
4316 target_id_t
4317 xpt_path_target_id(struct cam_path *path)
4318 {
4319         mtx_assert(path->bus->sim->mtx, MA_OWNED);
4320
4321         if (path->target != NULL)
4322                 return (path->target->target_id);
4323         else
4324                 return (CAM_TARGET_WILDCARD);
4325 }
4326
4327 lun_id_t
4328 xpt_path_lun_id(struct cam_path *path)
4329 {
4330         mtx_assert(path->bus->sim->mtx, MA_OWNED);
4331
4332         if (path->device != NULL)
4333                 return (path->device->lun_id);
4334         else
4335                 return (CAM_LUN_WILDCARD);
4336 }
4337
4338 struct cam_sim *
4339 xpt_path_sim(struct cam_path *path)
4340 {
4341
4342         return (path->bus->sim);
4343 }
4344
4345 struct cam_periph*
4346 xpt_path_periph(struct cam_path *path)
4347 {
4348         mtx_assert(path->bus->sim->mtx, MA_OWNED);
4349
4350         return (path->periph);
4351 }
4352
4353 /*
4354  * Release a CAM control block for the caller.  Remit the cost of the structure
4355  * to the device referenced by the path.  If the this device had no 'credits'
4356  * and peripheral drivers have registered async callbacks for this notification
4357  * call them now.
4358  */
4359 void
4360 xpt_release_ccb(union ccb *free_ccb)
4361 {
4362         int      s;
4363         struct   cam_path *path;
4364         struct   cam_ed *device;
4365         struct   cam_eb *bus;
4366         struct   cam_sim *sim;
4367
4368         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4369         path = free_ccb->ccb_h.path;
4370         device = path->device;
4371         bus = path->bus;
4372         sim = bus->sim;
4373         s = splsoftcam();
4374
4375         mtx_assert(sim->mtx, MA_OWNED);
4376
4377         cam_ccbq_release_opening(&device->ccbq);
4378         if (sim->ccb_count > sim->max_ccbs) {
4379                 xpt_free_ccb(free_ccb);
4380                 sim->ccb_count--;
4381         } else {
4382                 SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h,
4383                     xpt_links.sle);
4384         }
4385         if (sim->devq == NULL) {
4386                 splx(s);
4387                 return;
4388         }
4389         sim->devq->alloc_openings++;
4390         sim->devq->alloc_active--;
4391         /* XXX Turn this into an inline function - xpt_run_device?? */
4392         if ((device_is_alloc_queued(device) == 0)
4393          && (device->drvq.entries > 0)) {
4394                 xpt_schedule_dev_allocq(bus, device);
4395         }
4396         splx(s);
4397         if (dev_allocq_is_runnable(sim->devq))
4398                 xpt_run_dev_allocq(bus);
4399 }
4400
4401 /* Functions accessed by SIM drivers */
4402
4403 /*
4404  * A sim structure, listing the SIM entry points and instance
4405  * identification info is passed to xpt_bus_register to hook the SIM
4406  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
4407  * for this new bus and places it in the array of busses and assigns
4408  * it a path_id.  The path_id may be influenced by "hard wiring"
4409  * information specified by the user.  Once interrupt services are
4410  * availible, the bus will be probed.
4411  */
4412 int32_t
4413 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4414 {
4415         struct cam_eb *new_bus;
4416         struct cam_eb *old_bus;
4417         struct ccb_pathinq cpi;
4418         int s;
4419
4420         mtx_assert(sim->mtx, MA_OWNED);
4421
4422         sim->bus_id = bus;
4423         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
4424                                           M_CAMXPT, M_NOWAIT);
4425         if (new_bus == NULL) {
4426                 /* Couldn't satisfy request */
4427                 return (CAM_RESRC_UNAVAIL);
4428         }
4429
4430         if (strcmp(sim->sim_name, "xpt") != 0) {
4431
4432                 sim->path_id =
4433                     xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4434         }
4435
4436         TAILQ_INIT(&new_bus->et_entries);
4437         new_bus->path_id = sim->path_id;
4438         new_bus->sim = sim;
4439         timevalclear(&new_bus->last_reset);
4440         new_bus->flags = 0;
4441         new_bus->refcount = 1;  /* Held until a bus_deregister event */
4442         new_bus->generation = 0;
4443         s = splcam();
4444         mtx_lock(&xsoftc.xpt_topo_lock);
4445         old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4446         while (old_bus != NULL
4447             && old_bus->path_id < new_bus->path_id)
4448                 old_bus = TAILQ_NEXT(old_bus, links);
4449         if (old_bus != NULL)
4450                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4451         else
4452                 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
4453         xsoftc.bus_generation++;
4454         mtx_unlock(&xsoftc.xpt_topo_lock);
4455         splx(s);
4456
4457         /* Notify interested parties */
4458         if (sim->path_id != CAM_XPT_PATH_ID) {
4459                 struct cam_path path;
4460
4461                 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4462                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4463                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4464                 cpi.ccb_h.func_code = XPT_PATH_INQ;
4465                 xpt_action((union ccb *)&cpi);
4466                 xpt_async(AC_PATH_REGISTERED, &path, &cpi);
4467                 xpt_release_path(&path);
4468         }
4469         return (CAM_SUCCESS);
4470 }
4471
4472 int32_t
4473 xpt_bus_deregister(path_id_t pathid)
4474 {
4475         struct cam_path bus_path;
4476         struct cam_ed *device;
4477         struct cam_ed_qinfo *qinfo;
4478         struct cam_devq *devq;
4479         struct cam_periph *periph;
4480         struct cam_sim *ccbsim;
4481         union ccb *work_ccb;
4482         cam_status status;
4483
4484
4485         status = xpt_compile_path(&bus_path, NULL, pathid,
4486                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4487         if (status != CAM_REQ_CMP)
4488                 return (status);
4489
4490         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4491         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4492
4493         /* The SIM may be gone, so use a dummy SIM for any stray operations. */
4494         devq = bus_path.bus->sim->devq;
4495         ccbsim = bus_path.bus->sim;
4496         bus_path.bus->sim = &cam_dead_sim;
4497
4498         /* Execute any pending operations now. */
4499         while ((qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
4500             CAMQ_HEAD)) != NULL ||
4501             (qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
4502             CAMQ_HEAD)) != NULL) {
4503                 do {
4504                         device = qinfo->device;
4505                         work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
4506                         if (work_ccb != NULL) {
4507                                 devq->active_dev = device;
4508                                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
4509                                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
4510                                 (*(ccbsim->sim_action))(ccbsim, work_ccb);
4511                         }
4512
4513                         periph = (struct cam_periph *)camq_remove(&device->drvq,
4514                             CAMQ_HEAD);
4515                         if (periph != NULL)
4516                                 xpt_schedule(periph, periph->pinfo.priority);
4517                 } while (work_ccb != NULL || periph != NULL);
4518         }
4519
4520         /* Make sure all completed CCBs are processed. */
4521         while (!TAILQ_EMPTY(&ccbsim->sim_doneq)) {
4522                 camisr_runqueue(&ccbsim->sim_doneq);
4523
4524                 /* Repeat the async's for the benefit of any new devices. */
4525                 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4526                 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4527         }
4528
4529         /* Release the reference count held while registered. */
4530         xpt_release_bus(bus_path.bus);
4531         xpt_release_path(&bus_path);
4532
4533         return (CAM_REQ_CMP);
4534 }
4535
4536 static path_id_t
4537 xptnextfreepathid(void)
4538 {
4539         struct cam_eb *bus;
4540         path_id_t pathid;
4541         const char *strval;
4542
4543         pathid = 0;
4544         mtx_lock(&xsoftc.xpt_topo_lock);
4545         bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4546 retry:
4547         /* Find an unoccupied pathid */
4548         while (bus != NULL && bus->path_id <= pathid) {
4549                 if (bus->path_id == pathid)
4550                         pathid++;
4551                 bus = TAILQ_NEXT(bus, links);
4552         }
4553         mtx_unlock(&xsoftc.xpt_topo_lock);
4554
4555         /*
4556          * Ensure that this pathid is not reserved for
4557          * a bus that may be registered in the future.
4558          */
4559         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4560                 ++pathid;
4561                 /* Start the search over */
4562                 mtx_lock(&xsoftc.xpt_topo_lock);
4563                 goto retry;
4564         }
4565         return (pathid);
4566 }
4567
4568 static path_id_t
4569 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4570 {
4571         path_id_t pathid;
4572         int i, dunit, val;
4573         char buf[32];
4574         const char *dname;
4575
4576         pathid = CAM_XPT_PATH_ID;
4577         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4578         i = 0;
4579         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4580                 if (strcmp(dname, "scbus")) {
4581                         /* Avoid a bit of foot shooting. */
4582                         continue;
4583                 }
4584                 if (dunit < 0)          /* unwired?! */
4585                         continue;
4586                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4587                         if (sim_bus == val) {
4588                                 pathid = dunit;
4589                                 break;
4590                         }
4591                 } else if (sim_bus == 0) {
4592                         /* Unspecified matches bus 0 */
4593                         pathid = dunit;
4594                         break;
4595                 } else {
4596                         printf("Ambiguous scbus configuration for %s%d "
4597                                "bus %d, cannot wire down.  The kernel "
4598                                "config entry for scbus%d should "
4599                                "specify a controller bus.\n"
4600                                "Scbus will be assigned dynamically.\n",
4601                                sim_name, sim_unit, sim_bus, dunit);
4602                         break;
4603                 }
4604         }
4605
4606         if (pathid == CAM_XPT_PATH_ID)
4607                 pathid = xptnextfreepathid();
4608         return (pathid);
4609 }
4610
4611 void
4612 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4613 {
4614         struct cam_eb *bus;
4615         struct cam_et *target, *next_target;
4616         struct cam_ed *device, *next_device;
4617         int s;
4618
4619         mtx_assert(path->bus->sim->mtx, MA_OWNED);
4620
4621         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4622
4623         /*
4624          * Most async events come from a CAM interrupt context.  In
4625          * a few cases, the error recovery code at the peripheral layer,
4626          * which may run from our SWI or a process context, may signal
4627          * deferred events with a call to xpt_async. Ensure async
4628          * notifications are serialized by blocking cam interrupts.
4629          */
4630         s = splcam();
4631
4632         bus = path->bus;
4633
4634         if (async_code == AC_BUS_RESET) { 
4635                 int s;
4636
4637                 s = splclock();
4638                 /* Update our notion of when the last reset occurred */
4639                 microtime(&bus->last_reset);
4640                 splx(s);
4641         }
4642
4643         for (target = TAILQ_FIRST(&bus->et_entries);
4644              target != NULL;
4645              target = next_target) {
4646
4647                 next_target = TAILQ_NEXT(target, links);
4648
4649                 if (path->target != target
4650                  && path->target->target_id != CAM_TARGET_WILDCARD
4651                  && target->target_id != CAM_TARGET_WILDCARD)
4652                         continue;
4653
4654                 if (async_code == AC_SENT_BDR) {
4655                         int s;
4656
4657                         /* Update our notion of when the last reset occurred */
4658                         s = splclock();
4659                         microtime(&path->target->last_reset);
4660                         splx(s);
4661                 }
4662
4663                 for (device = TAILQ_FIRST(&target->ed_entries);
4664                      device != NULL;
4665                      device = next_device) {
4666
4667                         next_device = TAILQ_NEXT(device, links);
4668
4669                         if (path->device != device 
4670                          && path->device->lun_id != CAM_LUN_WILDCARD
4671                          && device->lun_id != CAM_LUN_WILDCARD)
4672                                 continue;
4673
4674                         xpt_dev_async(async_code, bus, target,
4675                                       device, async_arg);
4676
4677                         xpt_async_bcast(&device->asyncs, async_code,
4678                                         path, async_arg);
4679                 }
4680         }
4681         
4682         /*
4683          * If this wasn't a fully wildcarded async, tell all
4684          * clients that want all async events.
4685          */
4686         if (bus != xpt_periph->path->bus)
4687                 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4688                                 path, async_arg);
4689         splx(s);
4690 }
4691
4692 static void
4693 xpt_async_bcast(struct async_list *async_head,
4694                 u_int32_t async_code,
4695                 struct cam_path *path, void *async_arg)
4696 {
4697         struct async_node *cur_entry;
4698
4699         cur_entry = SLIST_FIRST(async_head);
4700         while (cur_entry != NULL) {
4701                 struct async_node *next_entry;
4702                 /*
4703                  * Grab the next list entry before we call the current
4704                  * entry's callback.  This is because the callback function
4705                  * can delete its async callback entry.
4706                  */
4707                 next_entry = SLIST_NEXT(cur_entry, links);
4708                 if ((cur_entry->event_enable & async_code) != 0)
4709                         cur_entry->callback(cur_entry->callback_arg,
4710                                             async_code, path,
4711                                             async_arg);
4712                 cur_entry = next_entry;
4713         }
4714 }
4715
4716 /*
4717  * Handle any per-device event notifications that require action by the XPT.
4718  */
4719 static void
4720 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
4721               struct cam_ed *device, void *async_arg)
4722 {
4723         cam_status status;
4724         struct cam_path newpath;
4725
4726         /*
4727          * We only need to handle events for real devices.
4728          */
4729         if (target->target_id == CAM_TARGET_WILDCARD
4730          || device->lun_id == CAM_LUN_WILDCARD)
4731                 return;
4732
4733         /*
4734          * We need our own path with wildcards expanded to
4735          * handle certain types of events.
4736          */
4737         if ((async_code == AC_SENT_BDR)
4738          || (async_code == AC_BUS_RESET)
4739          || (async_code == AC_INQ_CHANGED))
4740                 status = xpt_compile_path(&newpath, NULL,
4741                                           bus->path_id,
4742                                           target->target_id,
4743                                           device->lun_id);
4744         else
4745                 status = CAM_REQ_CMP_ERR;
4746
4747         if (status == CAM_REQ_CMP) {
4748
4749                 /*
4750                  * Allow transfer negotiation to occur in a
4751                  * tag free environment.
4752                  */
4753                 if (async_code == AC_SENT_BDR
4754                  || async_code == AC_BUS_RESET)
4755                         xpt_toggle_tags(&newpath);
4756
4757                 if (async_code == AC_INQ_CHANGED) {
4758                         /*
4759                          * We've sent a start unit command, or
4760                          * something similar to a device that
4761                          * may have caused its inquiry data to
4762                          * change. So we re-scan the device to
4763                          * refresh the inquiry data for it.
4764                          */
4765                         xpt_scan_lun(newpath.periph, &newpath,
4766                                      CAM_EXPECT_INQ_CHANGE, NULL);
4767                 }
4768                 xpt_release_path(&newpath);
4769         } else if (async_code == AC_LOST_DEVICE) {
4770                 device->flags |= CAM_DEV_UNCONFIGURED;
4771         } else if (async_code == AC_TRANSFER_NEG) {
4772                 struct ccb_trans_settings *settings;
4773
4774                 settings = (struct ccb_trans_settings *)async_arg;
4775                 xpt_set_transfer_settings(settings, device,
4776                                           /*async_update*/TRUE);
4777         }
4778 }
4779
4780 u_int32_t
4781 xpt_freeze_devq(struct cam_path *path, u_int count)
4782 {
4783         int s;
4784         struct ccb_hdr *ccbh;
4785
4786         mtx_assert(path->bus->sim->mtx, MA_OWNED);
4787
4788         s = splcam();
4789         path->device->qfrozen_cnt += count;
4790
4791         /*
4792          * Mark the last CCB in the queue as needing
4793          * to be requeued if the driver hasn't
4794          * changed it's state yet.  This fixes a race
4795          * where a ccb is just about to be queued to
4796          * a controller driver when it's interrupt routine
4797          * freezes the queue.  To completly close the
4798          * hole, controller drives must check to see
4799          * if a ccb's status is still CAM_REQ_INPROG
4800          * under spl protection just before they queue
4801          * the CCB.  See ahc_action/ahc_freeze_devq for
4802          * an example.
4803          */
4804         ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4805         if (ccbh && ccbh->status == CAM_REQ_INPROG)
4806                 ccbh->status = CAM_REQUEUE_REQ;
4807         splx(s);
4808         return (path->device->qfrozen_cnt);
4809 }
4810
4811 u_int32_t
4812 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4813 {
4814         mtx_assert(sim->mtx, MA_OWNED);
4815
4816         sim->devq->send_queue.qfrozen_cnt += count;
4817         if (sim->devq->active_dev != NULL) {
4818                 struct ccb_hdr *ccbh;
4819                 
4820                 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4821                                   ccb_hdr_tailq);
4822                 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4823                         ccbh->status = CAM_REQUEUE_REQ;
4824         }
4825         return (sim->devq->send_queue.qfrozen_cnt);
4826 }
4827
4828 static void
4829 xpt_release_devq_timeout(void *arg)
4830 {
4831         struct cam_ed *device;
4832
4833         device = (struct cam_ed *)arg;
4834
4835         xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4836 }
4837
4838 void
4839 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4840 {
4841         mtx_assert(path->bus->sim->mtx, MA_OWNED);
4842
4843         xpt_release_devq_device(path->device, count, run_queue);
4844 }
4845
4846 static void
4847 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4848 {
4849         int     rundevq;
4850         int     s0, s1;
4851
4852         rundevq = 0;
4853         s0 = splsoftcam();
4854         s1 = splcam();
4855         if (dev->qfrozen_cnt > 0) {
4856
4857                 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4858                 dev->qfrozen_cnt -= count;
4859                 if (dev->qfrozen_cnt == 0) {
4860
4861                         /*
4862                          * No longer need to wait for a successful
4863                          * command completion.
4864                          */
4865                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4866
4867                         /*
4868                          * Remove any timeouts that might be scheduled
4869                          * to release this queue.
4870                          */
4871                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4872                                 callout_stop(&dev->callout);
4873                                 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4874                         }
4875
4876                         /*
4877                          * Now that we are unfrozen schedule the
4878                          * device so any pending transactions are
4879                          * run.
4880                          */
4881                         if ((dev->ccbq.queue.entries > 0)
4882                          && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4883                          && (run_queue != 0)) {
4884                                 rundevq = 1;
4885                         }
4886                 }
4887         }
4888         splx(s1);
4889         if (rundevq != 0)
4890                 xpt_run_dev_sendq(dev->target->bus);
4891         splx(s0);
4892 }
4893
4894 void
4895 xpt_release_simq(struct cam_sim *sim, int run_queue)
4896 {
4897         int     s;
4898         struct  camq *sendq;
4899
4900         mtx_assert(sim->mtx, MA_OWNED);
4901
4902         sendq = &(sim->devq->send_queue);
4903         s = splcam();
4904         if (sendq->qfrozen_cnt > 0) {
4905
4906                 sendq->qfrozen_cnt--;
4907                 if (sendq->qfrozen_cnt == 0) {
4908                         struct cam_eb *bus;
4909
4910                         /*
4911                          * If there is a timeout scheduled to release this
4912                          * sim queue, remove it.  The queue frozen count is
4913                          * already at 0.
4914                          */
4915                         if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4916                                 callout_stop(&sim->callout);
4917                                 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4918                         }
4919                         bus = xpt_find_bus(sim->path_id);
4920                         splx(s);
4921
4922                         if (run_queue) {
4923                                 /*
4924                                  * Now that we are unfrozen run the send queue.
4925                                  */
4926                                 xpt_run_dev_sendq(bus);
4927                         }
4928                         xpt_release_bus(bus);
4929                 } else
4930                         splx(s);
4931         } else
4932                 splx(s);
4933 }
4934
4935 /*
4936  * XXX Appears to be unused.
4937  */
4938 static void
4939 xpt_release_simq_timeout(void *arg)
4940 {
4941         struct cam_sim *sim;
4942
4943         sim = (struct cam_sim *)arg;
4944         xpt_release_simq(sim, /* run_queue */ TRUE);
4945 }
4946
4947 void
4948 xpt_done(union ccb *done_ccb)
4949 {
4950         struct cam_sim *sim;
4951         int s;
4952
4953         s = splcam();
4954
4955         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4956         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4957                 /*
4958                  * Queue up the request for handling by our SWI handler
4959                  * any of the "non-immediate" type of ccbs.
4960                  */
4961                 sim = done_ccb->ccb_h.path->bus->sim;
4962                 switch (done_ccb->ccb_h.path->periph->type) {
4963                 case CAM_PERIPH_BIO:
4964                         TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
4965                                           sim_links.tqe);
4966                         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4967                         if ((sim->flags & CAM_SIM_ON_DONEQ) == 0) {
4968                                 mtx_lock(&cam_simq_lock);
4969                                 TAILQ_INSERT_TAIL(&cam_simq, sim,
4970                                                   links);
4971                                 sim->flags |= CAM_SIM_ON_DONEQ;
4972                                 mtx_unlock(&cam_simq_lock);
4973                         }
4974                         if ((done_ccb->ccb_h.path->periph->flags &
4975                             CAM_PERIPH_POLLED) == 0)
4976                                 swi_sched(cambio_ih, 0);
4977                         break;
4978                 default:
4979                         panic("unknown periph type %d",
4980                             done_ccb->ccb_h.path->periph->type);
4981                 }
4982         }
4983         splx(s);
4984 }
4985
4986 union ccb *
4987 xpt_alloc_ccb()
4988 {
4989         union ccb *new_ccb;
4990
4991         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_WAITOK);
4992         return (new_ccb);
4993 }
4994
4995 union ccb *
4996 xpt_alloc_ccb_nowait()
4997 {
4998         union ccb *new_ccb;
4999
5000         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_NOWAIT);
5001         return (new_ccb);
5002 }
5003
5004 void
5005 xpt_free_ccb(union ccb *free_ccb)
5006 {
5007         free(free_ccb, M_CAMXPT);
5008 }
5009
5010
5011
5012 /* Private XPT functions */
5013
5014 /*
5015  * Get a CAM control block for the caller. Charge the structure to the device
5016  * referenced by the path.  If the this device has no 'credits' then the
5017  * device already has the maximum number of outstanding operations under way
5018  * and we return NULL. If we don't have sufficient resources to allocate more
5019  * ccbs, we also return NULL.
5020  */
5021 static union ccb *
5022 xpt_get_ccb(struct cam_ed *device)
5023 {
5024         union ccb *new_ccb;
5025         struct cam_sim *sim;
5026         int s;
5027
5028         s = splsoftcam();
5029         sim = device->sim;
5030         if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) {
5031                 new_ccb = xpt_alloc_ccb_nowait();
5032                 if (new_ccb == NULL) {
5033                         splx(s);
5034                         return (NULL);
5035                 }
5036                 if ((sim->flags & CAM_SIM_MPSAFE) == 0)
5037                         callout_handle_init(&new_ccb->ccb_h.timeout_ch);
5038                 SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h,
5039                                   xpt_links.sle);
5040                 sim->ccb_count++;
5041         }
5042         cam_ccbq_take_opening(&device->ccbq);
5043         SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
5044         splx(s);
5045         return (new_ccb);
5046 }
5047
5048 static void
5049 xpt_release_bus(struct cam_eb *bus)
5050 {
5051         int s;
5052
5053         s = splcam();
5054         if ((--bus->refcount == 0)
5055          && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
5056                 mtx_lock(&xsoftc.xpt_topo_lock);
5057                 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
5058                 xsoftc.bus_generation++;
5059                 mtx_unlock(&xsoftc.xpt_topo_lock);
5060                 splx(s);
5061                 free(bus, M_CAMXPT);
5062         } else
5063                 splx(s);
5064 }
5065
5066 static struct cam_et *
5067 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
5068 {
5069         struct cam_et *target;
5070
5071         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT);
5072         if (target != NULL) {
5073                 struct cam_et *cur_target;
5074
5075                 TAILQ_INIT(&target->ed_entries);
5076                 target->bus = bus;
5077                 target->target_id = target_id;
5078                 target->refcount = 1;
5079                 target->generation = 0;
5080                 timevalclear(&target->last_reset);
5081                 /*
5082                  * Hold a reference to our parent bus so it
5083                  * will not go away before we do.
5084                  */
5085                 bus->refcount++;
5086
5087                 /* Insertion sort into our bus's target list */
5088                 cur_target = TAILQ_FIRST(&bus->et_entries);
5089                 while (cur_target != NULL && cur_target->target_id < target_id)
5090                         cur_target = TAILQ_NEXT(cur_target, links);
5091
5092                 if (cur_target != NULL) {
5093                         TAILQ_INSERT_BEFORE(cur_target, target, links);
5094                 } else {
5095                         TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
5096                 }
5097                 bus->generation++;
5098         }
5099         return (target);
5100 }
5101
5102 static void
5103 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
5104 {
5105         int s;
5106
5107         s = splcam();
5108         if ((--target->refcount == 0)
5109          && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
5110                 TAILQ_REMOVE(&bus->et_entries, target, links);
5111                 bus->generation++;
5112                 splx(s);
5113                 free(target, M_CAMXPT);
5114                 xpt_release_bus(bus);
5115         } else
5116                 splx(s);
5117 }
5118
5119 static struct cam_ed *
5120 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
5121 {
5122         struct     cam_path path;
5123         struct     cam_ed *device;
5124         struct     cam_devq *devq;
5125         cam_status status;
5126
5127         if (SIM_DEAD(bus->sim))
5128                 return (NULL);
5129
5130         /* Make space for us in the device queue on our bus */
5131         devq = bus->sim->devq;
5132         status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
5133
5134         if (status != CAM_REQ_CMP) {
5135                 device = NULL;
5136         } else {
5137                 device = (struct cam_ed *)malloc(sizeof(*device),
5138                                                  M_CAMXPT, M_NOWAIT);
5139         }
5140
5141         if (device != NULL) {
5142                 struct cam_ed *cur_device;
5143
5144                 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
5145                 device->alloc_ccb_entry.device = device;
5146                 cam_init_pinfo(&device->send_ccb_entry.pinfo);
5147                 device->send_ccb_entry.device = device;
5148                 device->target = target;
5149                 device->lun_id = lun_id;
5150                 device->sim = bus->sim;
5151                 /* Initialize our queues */
5152                 if (camq_init(&device->drvq, 0) != 0) {
5153                         free(device, M_CAMXPT);
5154                         return (NULL);
5155                 }
5156                 if (cam_ccbq_init(&device->ccbq,
5157                                   bus->sim->max_dev_openings) != 0) {
5158                         camq_fini(&device->drvq);
5159                         free(device, M_CAMXPT);
5160                         return (NULL);
5161                 }
5162                 SLIST_INIT(&device->asyncs);
5163                 SLIST_INIT(&device->periphs);
5164                 device->generation = 0;
5165                 device->owner = NULL;
5166                 /*
5167                  * Take the default quirk entry until we have inquiry
5168                  * data and can determine a better quirk to use.
5169                  */
5170                 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
5171                 bzero(&device->inq_data, sizeof(device->inq_data));
5172                 device->inq_flags = 0;
5173                 device->queue_flags = 0;
5174                 device->serial_num = NULL;
5175                 device->serial_num_len = 0;
5176                 device->qfrozen_cnt = 0;
5177                 device->flags = CAM_DEV_UNCONFIGURED;
5178                 device->tag_delay_count = 0;
5179                 device->tag_saved_openings = 0;
5180                 device->refcount = 1;
5181                 if (bus->sim->flags & CAM_SIM_MPSAFE)
5182                         callout_init_mtx(&device->callout, bus->sim->mtx, 0);
5183                 else
5184                         callout_init_mtx(&device->callout, &Giant, 0);
5185
5186                 /*
5187                  * Hold a reference to our parent target so it
5188                  * will not go away before we do.
5189                  */
5190                 target->refcount++;
5191
5192                 /*
5193                  * XXX should be limited by number of CCBs this bus can
5194                  * do.
5195                  */
5196                 bus->sim->max_ccbs += device->ccbq.devq_openings;
5197                 /* Insertion sort into our target's device list */
5198                 cur_device = TAILQ_FIRST(&target->ed_entries);
5199                 while (cur_device != NULL && cur_device->lun_id < lun_id)
5200                         cur_device = TAILQ_NEXT(cur_device, links);
5201                 if (cur_device != NULL) {
5202                         TAILQ_INSERT_BEFORE(cur_device, device, links);
5203                 } else {
5204                         TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
5205                 }
5206                 target->generation++;
5207                 if (lun_id != CAM_LUN_WILDCARD) {
5208                         xpt_compile_path(&path,
5209                                          NULL,
5210                                          bus->path_id,
5211                                          target->target_id,
5212                                          lun_id);
5213                         xpt_devise_transport(&path);
5214                         xpt_release_path(&path);
5215                 }
5216         }
5217         return (device);
5218 }
5219
5220 static void
5221 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
5222                    struct cam_ed *device)
5223 {
5224         int s;
5225
5226         s = splcam();
5227         if ((--device->refcount == 0)
5228          && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
5229                 struct cam_devq *devq;
5230
5231                 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
5232                  || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
5233                         panic("Removing device while still queued for ccbs");
5234
5235                 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
5236                                 callout_stop(&device->callout);
5237
5238                 TAILQ_REMOVE(&target->ed_entries, device,links);
5239                 target->generation++;
5240                 bus->sim->max_ccbs -= device->ccbq.devq_openings;
5241                 if (!SIM_DEAD(bus->sim)) {
5242                         /* Release our slot in the devq */
5243                         devq = bus->sim->devq;
5244                         cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
5245                 }
5246                 splx(s);
5247                 camq_fini(&device->drvq);
5248                 camq_fini(&device->ccbq.queue);
5249                 free(device, M_CAMXPT);
5250                 xpt_release_target(bus, target);
5251         } else
5252                 splx(s);
5253 }
5254
5255 static u_int32_t
5256 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
5257 {
5258         int     s;
5259         int     diff;
5260         int     result;
5261         struct  cam_ed *dev;
5262
5263         dev = path->device;
5264         s = splsoftcam();
5265
5266         diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
5267         result = cam_ccbq_resize(&dev->ccbq, newopenings);
5268         if (result == CAM_REQ_CMP && (diff < 0)) {
5269                 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
5270         }
5271         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5272          || (dev->inq_flags & SID_CmdQue) != 0)
5273                 dev->tag_saved_openings = newopenings;
5274         /* Adjust the global limit */
5275         dev->sim->max_ccbs += diff;
5276         splx(s);
5277         return (result);
5278 }
5279
5280 static struct cam_eb *
5281 xpt_find_bus(path_id_t path_id)
5282 {
5283         struct cam_eb *bus;
5284
5285         mtx_lock(&xsoftc.xpt_topo_lock);
5286         for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
5287              bus != NULL;
5288              bus = TAILQ_NEXT(bus, links)) {
5289                 if (bus->path_id == path_id) {
5290                         bus->refcount++;
5291                         break;
5292                 }
5293         }
5294         mtx_unlock(&xsoftc.xpt_topo_lock);
5295         return (bus);
5296 }
5297
5298 static struct cam_et *
5299 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
5300 {
5301         struct cam_et *target;
5302
5303         for (target = TAILQ_FIRST(&bus->et_entries);
5304              target != NULL;
5305              target = TAILQ_NEXT(target, links)) {
5306                 if (target->target_id == target_id) {
5307                         target->refcount++;
5308                         break;
5309                 }
5310         }
5311         return (target);
5312 }
5313
5314 static struct cam_ed *
5315 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
5316 {
5317         struct cam_ed *device;
5318
5319         for (device = TAILQ_FIRST(&target->ed_entries);
5320              device != NULL;
5321              device = TAILQ_NEXT(device, links)) {
5322                 if (device->lun_id == lun_id) {
5323                         device->refcount++;
5324                         break;
5325                 }
5326         }
5327         return (device);
5328 }
5329
5330 typedef struct {
5331         union   ccb *request_ccb;
5332         struct  ccb_pathinq *cpi;
5333         int     counter;
5334 } xpt_scan_bus_info;
5335
5336 /*
5337  * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5338  * As the scan progresses, xpt_scan_bus is used as the
5339  * callback on completion function.
5340  */
5341 static void
5342 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
5343 {
5344         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5345                   ("xpt_scan_bus\n"));
5346         switch (request_ccb->ccb_h.func_code) {
5347         case XPT_SCAN_BUS:
5348         {
5349                 xpt_scan_bus_info *scan_info;
5350                 union   ccb *work_ccb;
5351                 struct  cam_path *path;
5352                 u_int   i;
5353                 u_int   max_target;
5354                 u_int   initiator_id;
5355
5356                 /* Find out the characteristics of the bus */
5357                 work_ccb = xpt_alloc_ccb_nowait();
5358                 if (work_ccb == NULL) {
5359                         request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
5360                         xpt_done(request_ccb);
5361                         return;
5362                 }
5363                 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
5364                               request_ccb->ccb_h.pinfo.priority);
5365                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
5366                 xpt_action(work_ccb);
5367                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
5368                         request_ccb->ccb_h.status = work_ccb->ccb_h.status;
5369                         xpt_free_ccb(work_ccb);
5370                         xpt_done(request_ccb);
5371                         return;
5372                 }
5373
5374                 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5375                         /*
5376                          * Can't scan the bus on an adapter that
5377                          * cannot perform the initiator role.
5378                          */
5379                         request_ccb->ccb_h.status = CAM_REQ_CMP;
5380                         xpt_free_ccb(work_ccb);
5381                         xpt_done(request_ccb);
5382                         return;
5383                 }
5384
5385                 /* Save some state for use while we probe for devices */
5386                 scan_info = (xpt_scan_bus_info *)
5387                     malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_NOWAIT);
5388                 scan_info->request_ccb = request_ccb;
5389                 scan_info->cpi = &work_ccb->cpi;
5390
5391                 /* Cache on our stack so we can work asynchronously */
5392                 max_target = scan_info->cpi->max_target;
5393                 initiator_id = scan_info->cpi->initiator_id;
5394
5395
5396                 /*
5397                  * We can scan all targets in parallel, or do it sequentially.
5398                  */
5399                 if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5400                         max_target = 0;
5401                         scan_info->counter = 0;
5402                 } else {
5403                         scan_info->counter = scan_info->cpi->max_target + 1;
5404                         if (scan_info->cpi->initiator_id < scan_info->counter) {
5405                                 scan_info->counter--;
5406                         }
5407                 }
5408                 
5409                 for (i = 0; i <= max_target; i++) {
5410                         cam_status status;
5411                         if (i == initiator_id)
5412                                 continue;
5413
5414                         status = xpt_create_path(&path, xpt_periph,
5415                                                  request_ccb->ccb_h.path_id,
5416                                                  i, 0);
5417                         if (status != CAM_REQ_CMP) {
5418                                 printf("xpt_scan_bus: xpt_create_path failed"
5419                                        " with status %#x, bus scan halted\n",
5420                                        status);
5421                                 free(scan_info, M_TEMP);
5422                                 request_ccb->ccb_h.status = status;
5423                                 xpt_free_ccb(work_ccb);
5424                                 xpt_done(request_ccb);
5425                                 break;
5426                         }
5427                         work_ccb = xpt_alloc_ccb_nowait();
5428                         if (work_ccb == NULL) {
5429                                 free(scan_info, M_TEMP);
5430                                 xpt_free_path(path);
5431                                 request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
5432                                 xpt_done(request_ccb);
5433                                 break;
5434                         }
5435                         xpt_setup_ccb(&work_ccb->ccb_h, path,
5436                                       request_ccb->ccb_h.pinfo.priority);
5437                         work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5438                         work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5439                         work_ccb->ccb_h.ppriv_ptr0 = scan_info;
5440                         work_ccb->crcn.flags = request_ccb->crcn.flags;
5441                         xpt_action(work_ccb);
5442                 }
5443                 break;
5444         }
5445         case XPT_SCAN_LUN:
5446         {
5447                 cam_status status;
5448                 struct cam_path *path;
5449                 xpt_scan_bus_info *scan_info;
5450                 path_id_t path_id;
5451                 target_id_t target_id;
5452                 lun_id_t lun_id;
5453
5454                 /* Reuse the same CCB to query if a device was really found */
5455                 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
5456                 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
5457                               request_ccb->ccb_h.pinfo.priority);
5458                 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5459
5460                 path_id = request_ccb->ccb_h.path_id;
5461                 target_id = request_ccb->ccb_h.target_id;
5462                 lun_id = request_ccb->ccb_h.target_lun;
5463                 xpt_action(request_ccb);
5464
5465                 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
5466                         struct cam_ed *device;
5467                         struct cam_et *target;
5468                         int s, phl;
5469
5470                         /*
5471                          * If we already probed lun 0 successfully, or
5472                          * we have additional configured luns on this
5473                          * target that might have "gone away", go onto
5474                          * the next lun.
5475                          */
5476                         target = request_ccb->ccb_h.path->target;
5477                         /*
5478                          * We may touch devices that we don't
5479                          * hold references too, so ensure they
5480                          * don't disappear out from under us.
5481                          * The target above is referenced by the
5482                          * path in the request ccb.
5483                          */
5484                         phl = 0;
5485                         s = splcam();
5486                         device = TAILQ_FIRST(&target->ed_entries);
5487                         if (device != NULL) {
5488                                 phl = CAN_SRCH_HI_SPARSE(device);
5489                                 if (device->lun_id == 0)
5490                                         device = TAILQ_NEXT(device, links);
5491                         }
5492                         splx(s);
5493                         if ((lun_id != 0) || (device != NULL)) {
5494                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
5495                                         lun_id++;
5496                         }
5497                 } else {
5498                         struct cam_ed *device;
5499                         
5500                         device = request_ccb->ccb_h.path->device;
5501
5502                         if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
5503                                 /* Try the next lun */
5504                                 if (lun_id < (CAM_SCSI2_MAXLUN-1)
5505                                   || CAN_SRCH_HI_DENSE(device))
5506                                         lun_id++;
5507                         }
5508                 }
5509
5510                 /*
5511                  * Free the current request path- we're done with it.
5512                  */
5513                 xpt_free_path(request_ccb->ccb_h.path);
5514
5515                 /*
5516                  * Check to see if we scan any further luns.
5517                  */
5518                 if (lun_id == request_ccb->ccb_h.target_lun
5519                  || lun_id > scan_info->cpi->max_lun) {
5520                         int done;
5521
5522  hop_again:
5523                         done = 0;
5524                         if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5525                                 scan_info->counter++;
5526                                 if (scan_info->counter == 
5527                                     scan_info->cpi->initiator_id) {
5528                                         scan_info->counter++;
5529                                 }
5530                                 if (scan_info->counter >=
5531                                     scan_info->cpi->max_target+1) {
5532                                         done = 1;
5533                                 }
5534                         } else {
5535                                 scan_info->counter--;
5536                                 if (scan_info->counter == 0) {
5537                                         done = 1;
5538                                 }
5539                         }
5540                         if (done) {
5541                                 xpt_free_ccb(request_ccb);
5542                                 xpt_free_ccb((union ccb *)scan_info->cpi);
5543                                 request_ccb = scan_info->request_ccb;
5544                                 free(scan_info, M_TEMP);
5545                                 request_ccb->ccb_h.status = CAM_REQ_CMP;
5546                                 xpt_done(request_ccb);
5547                                 break;
5548                         }
5549
5550                         if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) {
5551                                 break;
5552                         }
5553                         status = xpt_create_path(&path, xpt_periph,
5554                             scan_info->request_ccb->ccb_h.path_id,
5555                             scan_info->counter, 0);
5556                         if (status != CAM_REQ_CMP) {
5557                                 printf("xpt_scan_bus: xpt_create_path failed"
5558                                     " with status %#x, bus scan halted\n",
5559                                     status);
5560                                 xpt_free_ccb(request_ccb);
5561                                 xpt_free_ccb((union ccb *)scan_info->cpi);
5562                                 request_ccb = scan_info->request_ccb;
5563                                 free(scan_info, M_TEMP);
5564                                 request_ccb->ccb_h.status = status;
5565                                 xpt_done(request_ccb);
5566                                 break;
5567                         }
5568                         xpt_setup_ccb(&request_ccb->ccb_h, path,
5569                             request_ccb->ccb_h.pinfo.priority);
5570                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5571                         request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5572                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5573                         request_ccb->crcn.flags =
5574                             scan_info->request_ccb->crcn.flags;
5575                 } else {
5576                         status = xpt_create_path(&path, xpt_periph,
5577                                                  path_id, target_id, lun_id);
5578                         if (status != CAM_REQ_CMP) {
5579                                 printf("xpt_scan_bus: xpt_create_path failed "
5580                                        "with status %#x, halting LUN scan\n",
5581                                        status);
5582                                 goto hop_again;
5583                         }
5584                         xpt_setup_ccb(&request_ccb->ccb_h, path,
5585                                       request_ccb->ccb_h.pinfo.priority);
5586                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5587                         request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5588                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5589                         request_ccb->crcn.flags =
5590                                 scan_info->request_ccb->crcn.flags;
5591                 }
5592                 xpt_action(request_ccb);
5593                 break;
5594         }
5595         default:
5596                 break;
5597         }
5598 }
5599
5600 typedef enum {
5601         PROBE_TUR,
5602         PROBE_INQUIRY,  /* this counts as DV0 for Basic Domain Validation */
5603         PROBE_FULL_INQUIRY,
5604         PROBE_MODE_SENSE,
5605         PROBE_SERIAL_NUM,
5606         PROBE_TUR_FOR_NEGOTIATION,
5607         PROBE_INQUIRY_BASIC_DV1,
5608         PROBE_INQUIRY_BASIC_DV2,
5609         PROBE_DV_EXIT
5610 } probe_action;
5611
5612 typedef enum {
5613         PROBE_INQUIRY_CKSUM     = 0x01,
5614         PROBE_SERIAL_CKSUM      = 0x02,
5615         PROBE_NO_ANNOUNCE       = 0x04
5616 } probe_flags;
5617
5618 typedef struct {
5619         TAILQ_HEAD(, ccb_hdr) request_ccbs;
5620         probe_action    action;
5621         union ccb       saved_ccb;
5622         probe_flags     flags;
5623         MD5_CTX         context;
5624         u_int8_t        digest[16];
5625 } probe_softc;
5626
5627 static void
5628 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5629              cam_flags flags, union ccb *request_ccb)
5630 {
5631         struct ccb_pathinq cpi;
5632         cam_status status;
5633         struct cam_path *new_path;
5634         struct cam_periph *old_periph;
5635         int s;
5636         
5637         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5638                   ("xpt_scan_lun\n"));
5639         
5640         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5641         cpi.ccb_h.func_code = XPT_PATH_INQ;
5642         xpt_action((union ccb *)&cpi);
5643
5644         if (cpi.ccb_h.status != CAM_REQ_CMP) {
5645                 if (request_ccb != NULL) {
5646                         request_ccb->ccb_h.status = cpi.ccb_h.status;
5647                         xpt_done(request_ccb);
5648                 }
5649                 return;
5650         }
5651
5652         if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5653                 /*
5654                  * Can't scan the bus on an adapter that
5655                  * cannot perform the initiator role.
5656                  */
5657                 if (request_ccb != NULL) {
5658                         request_ccb->ccb_h.status = CAM_REQ_CMP;
5659                         xpt_done(request_ccb);
5660                 }
5661                 return;
5662         }
5663
5664         if (request_ccb == NULL) {
5665                 request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT);
5666                 if (request_ccb == NULL) {
5667                         xpt_print(path, "xpt_scan_lun: can't allocate CCB, "
5668                             "can't continue\n");
5669                         return;
5670                 }
5671                 new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT);
5672                 if (new_path == NULL) {
5673                         xpt_print(path, "xpt_scan_lun: can't allocate path, "
5674                             "can't continue\n");
5675                         free(request_ccb, M_TEMP);
5676                         return;
5677                 }
5678                 status = xpt_compile_path(new_path, xpt_periph,
5679                                           path->bus->path_id,
5680                                           path->target->target_id,
5681                                           path->device->lun_id);
5682
5683                 if (status != CAM_REQ_CMP) {
5684                         xpt_print(path, "xpt_scan_lun: can't compile path, "
5685                             "can't continue\n");
5686                         free(request_ccb, M_TEMP);
5687                         free(new_path, M_TEMP);
5688                         return;
5689                 }
5690                 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5691                 request_ccb->ccb_h.cbfcnp = xptscandone;
5692                 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5693                 request_ccb->crcn.flags = flags;
5694         }
5695
5696         s = splsoftcam();
5697         if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5698                 probe_softc *softc;
5699
5700                 softc = (probe_softc *)old_periph->softc;
5701                 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5702                                   periph_links.tqe);
5703         } else {
5704                 status = cam_periph_alloc(proberegister, NULL, probecleanup,
5705                                           probestart, "probe",
5706                                           CAM_PERIPH_BIO,
5707                                           request_ccb->ccb_h.path, NULL, 0,
5708                                           request_ccb);
5709
5710                 if (status != CAM_REQ_CMP) {
5711                         xpt_print(path, "xpt_scan_lun: cam_alloc_periph "
5712                             "returned an error, can't continue probe\n");
5713                         request_ccb->ccb_h.status = status;
5714                         xpt_done(request_ccb);
5715                 }
5716         }
5717         splx(s);
5718 }
5719
5720 static void
5721 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5722 {
5723         xpt_release_path(done_ccb->ccb_h.path);
5724         free(done_ccb->ccb_h.path, M_TEMP);
5725         free(done_ccb, M_TEMP);
5726 }
5727
5728 static cam_status
5729 proberegister(struct cam_periph *periph, void *arg)
5730 {
5731         union ccb *request_ccb; /* CCB representing the probe request */
5732         cam_status status;
5733         probe_softc *softc;
5734
5735         request_ccb = (union ccb *)arg;
5736         if (periph == NULL) {
5737                 printf("proberegister: periph was NULL!!\n");
5738                 return(CAM_REQ_CMP_ERR);
5739         }
5740
5741         if (request_ccb == NULL) {
5742                 printf("proberegister: no probe CCB, "
5743                        "can't register device\n");
5744                 return(CAM_REQ_CMP_ERR);
5745         }
5746
5747         softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT);
5748
5749         if (softc == NULL) {
5750                 printf("proberegister: Unable to probe new device. "
5751                        "Unable to allocate softc\n");                           
5752                 return(CAM_REQ_CMP_ERR);
5753         }
5754         TAILQ_INIT(&softc->request_ccbs);
5755         TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5756                           periph_links.tqe);
5757         softc->flags = 0;
5758         periph->softc = softc;
5759         status = cam_periph_acquire(periph);
5760         if (status != CAM_REQ_CMP) {
5761                 return (status);
5762         }
5763
5764
5765         /*
5766          * Ensure we've waited at least a bus settle
5767          * delay before attempting to probe the device.
5768          * For HBAs that don't do bus resets, this won't make a difference.
5769          */
5770         cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5771                                       scsi_delay);
5772         probeschedule(periph);
5773         return(CAM_REQ_CMP);
5774 }
5775
5776 static void
5777 probeschedule(struct cam_periph *periph)
5778 {
5779         struct ccb_pathinq cpi;
5780         union ccb *ccb;
5781         probe_softc *softc;
5782
5783         softc = (probe_softc *)periph->softc;
5784         ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5785
5786         xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5787         cpi.ccb_h.func_code = XPT_PATH_INQ;
5788         xpt_action((union ccb *)&cpi);
5789
5790         /*
5791          * If a device has gone away and another device, or the same one,
5792          * is back in the same place, it should have a unit attention
5793          * condition pending.  It will not report the unit attention in
5794          * response to an inquiry, which may leave invalid transfer
5795          * negotiations in effect.  The TUR will reveal the unit attention
5796          * condition.  Only send the TUR for lun 0, since some devices 
5797          * will get confused by commands other than inquiry to non-existent
5798          * luns.  If you think a device has gone away start your scan from
5799          * lun 0.  This will insure that any bogus transfer settings are
5800          * invalidated.
5801          *
5802          * If we haven't seen the device before and the controller supports
5803          * some kind of transfer negotiation, negotiate with the first
5804          * sent command if no bus reset was performed at startup.  This
5805          * ensures that the device is not confused by transfer negotiation
5806          * settings left over by loader or BIOS action.
5807          */
5808         if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5809          && (ccb->ccb_h.target_lun == 0)) {
5810                 softc->action = PROBE_TUR;
5811         } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5812               && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5813                 proberequestdefaultnegotiation(periph);
5814                 softc->action = PROBE_INQUIRY;
5815         } else {
5816                 softc->action = PROBE_INQUIRY;
5817         }
5818
5819         if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5820                 softc->flags |= PROBE_NO_ANNOUNCE;
5821         else
5822                 softc->flags &= ~PROBE_NO_ANNOUNCE;
5823
5824         xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5825 }
5826
5827 static void
5828 probestart(struct cam_periph *periph, union ccb *start_ccb)
5829 {
5830         /* Probe the device that our peripheral driver points to */
5831         struct ccb_scsiio *csio;
5832         probe_softc *softc;
5833
5834         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5835
5836         softc = (probe_softc *)periph->softc;
5837         csio = &start_ccb->csio;
5838
5839         switch (softc->action) {
5840         case PROBE_TUR:
5841         case PROBE_TUR_FOR_NEGOTIATION:
5842         case PROBE_DV_EXIT:
5843         {
5844                 scsi_test_unit_ready(csio,
5845                                      /*retries*/4,
5846                                      probedone,
5847                                      MSG_SIMPLE_Q_TAG,
5848                                      SSD_FULL_SIZE,
5849                                      /*timeout*/60000);
5850                 break;
5851         }
5852         case PROBE_INQUIRY:
5853         case PROBE_FULL_INQUIRY:
5854         case PROBE_INQUIRY_BASIC_DV1:
5855         case PROBE_INQUIRY_BASIC_DV2:
5856         {
5857                 u_int inquiry_len;
5858                 struct scsi_inquiry_data *inq_buf;
5859
5860                 inq_buf = &periph->path->device->inq_data;
5861
5862                 /*
5863                  * If the device is currently configured, we calculate an
5864                  * MD5 checksum of the inquiry data, and if the serial number
5865                  * length is greater than 0, add the serial number data
5866                  * into the checksum as well.  Once the inquiry and the
5867                  * serial number check finish, we attempt to figure out
5868                  * whether we still have the same device.
5869                  */
5870                 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5871                         
5872                         MD5Init(&softc->context);
5873                         MD5Update(&softc->context, (unsigned char *)inq_buf,
5874                                   sizeof(struct scsi_inquiry_data));
5875                         softc->flags |= PROBE_INQUIRY_CKSUM;
5876                         if (periph->path->device->serial_num_len > 0) {
5877                                 MD5Update(&softc->context,
5878                                           periph->path->device->serial_num,
5879                                           periph->path->device->serial_num_len);
5880                                 softc->flags |= PROBE_SERIAL_CKSUM;
5881                         }
5882                         MD5Final(softc->digest, &softc->context);
5883                 } 
5884
5885                 if (softc->action == PROBE_INQUIRY)
5886                         inquiry_len = SHORT_INQUIRY_LENGTH;
5887                 else
5888                         inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf);
5889
5890                 /*
5891                  * Some parallel SCSI devices fail to send an
5892                  * ignore wide residue message when dealing with
5893                  * odd length inquiry requests.  Round up to be
5894                  * safe.
5895                  */
5896                 inquiry_len = roundup2(inquiry_len, 2);
5897         
5898                 if (softc->action == PROBE_INQUIRY_BASIC_DV1
5899                  || softc->action == PROBE_INQUIRY_BASIC_DV2) {
5900                         inq_buf = malloc(inquiry_len, M_TEMP, M_NOWAIT);
5901                 }
5902                 if (inq_buf == NULL) {
5903                         xpt_print(periph->path, "malloc failure- skipping Basic"
5904                             "Domain Validation\n");
5905                         softc->action = PROBE_DV_EXIT;
5906                         scsi_test_unit_ready(csio,
5907                                              /*retries*/4,
5908                                              probedone,
5909                                              MSG_SIMPLE_Q_TAG,
5910                                              SSD_FULL_SIZE,
5911                                              /*timeout*/60000);
5912                         break;
5913                 }
5914                 scsi_inquiry(csio,
5915                              /*retries*/4,
5916                              probedone,
5917                              MSG_SIMPLE_Q_TAG,
5918                              (u_int8_t *)inq_buf,
5919                              inquiry_len,
5920                              /*evpd*/FALSE,
5921                              /*page_code*/0,
5922                              SSD_MIN_SIZE,
5923                              /*timeout*/60 * 1000);
5924                 break;
5925         }
5926         case PROBE_MODE_SENSE:
5927         {
5928                 void  *mode_buf;
5929                 int    mode_buf_len;
5930
5931                 mode_buf_len = sizeof(struct scsi_mode_header_6)
5932                              + sizeof(struct scsi_mode_blk_desc)
5933                              + sizeof(struct scsi_control_page);
5934                 mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT);
5935                 if (mode_buf != NULL) {
5936                         scsi_mode_sense(csio,
5937                                         /*retries*/4,
5938                                         probedone,
5939                                         MSG_SIMPLE_Q_TAG,
5940                                         /*dbd*/FALSE,
5941                                         SMS_PAGE_CTRL_CURRENT,
5942                                         SMS_CONTROL_MODE_PAGE,
5943                                         mode_buf,
5944                                         mode_buf_len,
5945                                         SSD_FULL_SIZE,
5946                                         /*timeout*/60000);
5947                         break;
5948                 }
5949                 xpt_print(periph->path, "Unable to mode sense control page - "
5950                     "malloc failure\n");
5951                 softc->action = PROBE_SERIAL_NUM;
5952         }
5953         /* FALLTHROUGH */
5954         case PROBE_SERIAL_NUM:
5955         {
5956                 struct scsi_vpd_unit_serial_number *serial_buf;
5957                 struct cam_ed* device;
5958
5959                 serial_buf = NULL;
5960                 device = periph->path->device;
5961                 device->serial_num = NULL;
5962                 device->serial_num_len = 0;
5963
5964                 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0)
5965                         serial_buf = (struct scsi_vpd_unit_serial_number *)
5966                                 malloc(sizeof(*serial_buf), M_TEMP,
5967                                         M_NOWAIT | M_ZERO);
5968
5969                 if (serial_buf != NULL) {
5970                         scsi_inquiry(csio,
5971                                      /*retries*/4,
5972                                      probedone,
5973                                      MSG_SIMPLE_Q_TAG,
5974                                      (u_int8_t *)serial_buf,
5975                                      sizeof(*serial_buf),
5976                                      /*evpd*/TRUE,
5977                                      SVPD_UNIT_SERIAL_NUMBER,
5978                                      SSD_MIN_SIZE,
5979                                      /*timeout*/60 * 1000);
5980                         break;
5981                 }
5982                 /*
5983                  * We'll have to do without, let our probedone
5984                  * routine finish up for us.
5985                  */
5986                 start_ccb->csio.data_ptr = NULL;
5987                 probedone(periph, start_ccb);
5988                 return;
5989         }
5990         }
5991         xpt_action(start_ccb);
5992 }
5993
5994 static void
5995 proberequestdefaultnegotiation(struct cam_periph *periph)
5996 {
5997         struct ccb_trans_settings cts;
5998
5999         xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
6000         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6001         cts.type = CTS_TYPE_USER_SETTINGS;
6002         xpt_action((union ccb *)&cts);
6003         if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6004                 return;
6005         }
6006         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
6007         cts.type = CTS_TYPE_CURRENT_SETTINGS;
6008         xpt_action((union ccb *)&cts);
6009 }
6010
6011 /*
6012  * Backoff Negotiation Code- only pertinent for SPI devices.
6013  */
6014 static int
6015 proberequestbackoff(struct cam_periph *periph, struct cam_ed *device)
6016 {
6017         struct ccb_trans_settings cts;
6018         struct ccb_trans_settings_spi *spi;
6019
6020         memset(&cts, 0, sizeof (cts));
6021         xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
6022         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6023         cts.type = CTS_TYPE_CURRENT_SETTINGS;
6024         xpt_action((union ccb *)&cts);
6025         if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6026                 if (bootverbose) {
6027                         xpt_print(periph->path,
6028                             "failed to get current device settings\n");
6029                 }
6030                 return (0);
6031         }
6032         if (cts.transport != XPORT_SPI) {
6033                 if (bootverbose) {
6034                         xpt_print(periph->path, "not SPI transport\n");
6035                 }
6036                 return (0);
6037         }
6038         spi = &cts.xport_specific.spi;
6039
6040         /*
6041          * We cannot renegotiate sync rate if we don't have one.
6042          */
6043         if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
6044                 if (bootverbose) {
6045                         xpt_print(periph->path, "no sync rate known\n");
6046                 }
6047                 return (0);
6048         }
6049
6050         /*
6051          * We'll assert that we don't have to touch PPR options- the
6052          * SIM will see what we do with period and offset and adjust
6053          * the PPR options as appropriate.
6054          */
6055
6056         /*
6057          * A sync rate with unknown or zero offset is nonsensical.
6058          * A sync period of zero means Async.
6059          */
6060         if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0
6061          || spi->sync_offset == 0 || spi->sync_period == 0) {
6062                 if (bootverbose) {
6063                         xpt_print(periph->path, "no sync rate available\n");
6064                 }
6065                 return (0);
6066         }
6067
6068         if (device->flags & CAM_DEV_DV_HIT_BOTTOM) {
6069                 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6070                     ("hit async: giving up on DV\n"));
6071                 return (0);
6072         }
6073
6074
6075         /*
6076          * Jump sync_period up by one, but stop at 5MHz and fall back to Async.
6077          * We don't try to remember 'last' settings to see if the SIM actually
6078          * gets into the speed we want to set. We check on the SIM telling
6079          * us that a requested speed is bad, but otherwise don't try and
6080          * check the speed due to the asynchronous and handshake nature
6081          * of speed setting.
6082          */
6083         spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET;
6084         for (;;) {
6085                 spi->sync_period++;
6086                 if (spi->sync_period >= 0xf) {
6087                         spi->sync_period = 0;
6088                         spi->sync_offset = 0;
6089                         CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6090                             ("setting to async for DV\n"));
6091                         /*
6092                          * Once we hit async, we don't want to try
6093                          * any more settings.
6094                          */
6095                         device->flags |= CAM_DEV_DV_HIT_BOTTOM;
6096                 } else if (bootverbose) {
6097                         CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6098                             ("DV: period 0x%x\n", spi->sync_period));
6099                         printf("setting period to 0x%x\n", spi->sync_period);
6100                 }
6101                 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
6102                 cts.type = CTS_TYPE_CURRENT_SETTINGS;
6103                 xpt_action((union ccb *)&cts);
6104                 if ((cts.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6105                         break;
6106                 }
6107                 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6108                     ("DV: failed to set period 0x%x\n", spi->sync_period));
6109                 if (spi->sync_period == 0) {
6110                         return (0);
6111                 }
6112         }
6113         return (1);
6114 }
6115
6116 static void
6117 probedone(struct cam_periph *periph, union ccb *done_ccb)
6118 {
6119         probe_softc *softc;
6120         struct cam_path *path;
6121         u_int32_t  priority;
6122
6123         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
6124
6125         softc = (probe_softc *)periph->softc;
6126         path = done_ccb->ccb_h.path;
6127         priority = done_ccb->ccb_h.pinfo.priority;
6128
6129         switch (softc->action) {
6130         case PROBE_TUR:
6131         {
6132                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6133
6134                         if (cam_periph_error(done_ccb, 0,
6135                                              SF_NO_PRINT, NULL) == ERESTART)
6136                                 return;
6137                         else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
6138                                 /* Don't wedge the queue */
6139                                 xpt_release_devq(done_ccb->ccb_h.path,
6140                                                  /*count*/1,
6141                                                  /*run_queue*/TRUE);
6142                 }
6143                 softc->action = PROBE_INQUIRY;
6144                 xpt_release_ccb(done_ccb);
6145                 xpt_schedule(periph, priority);
6146                 return;
6147         }
6148         case PROBE_INQUIRY:
6149         case PROBE_FULL_INQUIRY:
6150         {
6151                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6152                         struct scsi_inquiry_data *inq_buf;
6153                         u_int8_t periph_qual;
6154
6155                         path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
6156                         inq_buf = &path->device->inq_data;
6157
6158                         periph_qual = SID_QUAL(inq_buf);
6159                         
6160                         switch(periph_qual) {
6161                         case SID_QUAL_LU_CONNECTED:
6162                         {
6163                                 u_int8_t len;
6164
6165                                 /*
6166                                  * We conservatively request only
6167                                  * SHORT_INQUIRY_LEN bytes of inquiry
6168                                  * information during our first try
6169                                  * at sending an INQUIRY. If the device
6170                                  * has more information to give,
6171                                  * perform a second request specifying
6172                                  * the amount of information the device
6173                                  * is willing to give.
6174                                  */
6175                                 len = inq_buf->additional_length
6176                                     + offsetof(struct scsi_inquiry_data,
6177                                                additional_length) + 1;
6178                                 if (softc->action == PROBE_INQUIRY
6179                                  && len > SHORT_INQUIRY_LENGTH) {
6180                                         softc->action = PROBE_FULL_INQUIRY;
6181                                         xpt_release_ccb(done_ccb);
6182                                         xpt_schedule(periph, priority);
6183                                         return;
6184                                 }
6185
6186                                 xpt_find_quirk(path->device);
6187
6188                                 xpt_devise_transport(path);
6189                                 if (INQ_DATA_TQ_ENABLED(inq_buf))
6190                                         softc->action = PROBE_MODE_SENSE;
6191                                 else
6192                                         softc->action = PROBE_SERIAL_NUM;
6193
6194                                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
6195
6196                                 xpt_release_ccb(done_ccb);
6197                                 xpt_schedule(periph, priority);
6198                                 return;
6199                         }
6200                         default:
6201                                 break;
6202                         }
6203                 } else if (cam_periph_error(done_ccb, 0,
6204                                             done_ccb->ccb_h.target_lun > 0
6205                                             ? SF_RETRY_UA|SF_QUIET_IR
6206                                             : SF_RETRY_UA,
6207                                             &softc->saved_ccb) == ERESTART) {
6208                         return;
6209                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6210                         /* Don't wedge the queue */
6211                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6212                                          /*run_queue*/TRUE);
6213                 }
6214                 /*
6215                  * If we get to this point, we got an error status back
6216                  * from the inquiry and the error status doesn't require
6217                  * automatically retrying the command.  Therefore, the
6218                  * inquiry failed.  If we had inquiry information before
6219                  * for this device, but this latest inquiry command failed,
6220                  * the device has probably gone away.  If this device isn't
6221                  * already marked unconfigured, notify the peripheral
6222                  * drivers that this device is no more.
6223                  */
6224                 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
6225                         /* Send the async notification. */
6226                         xpt_async(AC_LOST_DEVICE, path, NULL);
6227
6228                 xpt_release_ccb(done_ccb);
6229                 break;
6230         }
6231         case PROBE_MODE_SENSE:
6232         {
6233                 struct ccb_scsiio *csio;
6234                 struct scsi_mode_header_6 *mode_hdr;
6235
6236                 csio = &done_ccb->csio;
6237                 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
6238                 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6239                         struct scsi_control_page *page;
6240                         u_int8_t *offset;
6241
6242                         offset = ((u_int8_t *)&mode_hdr[1])
6243                             + mode_hdr->blk_desc_len;
6244                         page = (struct scsi_control_page *)offset;
6245                         path->device->queue_flags = page->queue_flags;
6246                 } else if (cam_periph_error(done_ccb, 0,
6247                                             SF_RETRY_UA|SF_NO_PRINT,
6248                                             &softc->saved_ccb) == ERESTART) {
6249                         return;
6250                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6251                         /* Don't wedge the queue */
6252                         xpt_release_devq(done_ccb->ccb_h.path,
6253                                          /*count*/1, /*run_queue*/TRUE);
6254                 }
6255                 xpt_release_ccb(done_ccb);
6256                 free(mode_hdr, M_TEMP);
6257                 softc->action = PROBE_SERIAL_NUM;
6258                 xpt_schedule(periph, priority);
6259                 return;
6260         }
6261         case PROBE_SERIAL_NUM:
6262         {
6263                 struct ccb_scsiio *csio;
6264                 struct scsi_vpd_unit_serial_number *serial_buf;
6265                 u_int32_t  priority;
6266                 int changed;
6267                 int have_serialnum;
6268
6269                 changed = 1;
6270                 have_serialnum = 0;
6271                 csio = &done_ccb->csio;
6272                 priority = done_ccb->ccb_h.pinfo.priority;
6273                 serial_buf =
6274                     (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
6275
6276                 /* Clean up from previous instance of this device */
6277                 if (path->device->serial_num != NULL) {
6278                         free(path->device->serial_num, M_CAMXPT);
6279                         path->device->serial_num = NULL;
6280                         path->device->serial_num_len = 0;
6281                 }
6282
6283                 if (serial_buf == NULL) {
6284                         /*
6285                          * Don't process the command as it was never sent
6286                          */
6287                 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
6288                         && (serial_buf->length > 0)) {
6289
6290                         have_serialnum = 1;
6291                         path->device->serial_num =
6292                                 (u_int8_t *)malloc((serial_buf->length + 1),
6293                                                    M_CAMXPT, M_NOWAIT);
6294                         if (path->device->serial_num != NULL) {
6295                                 bcopy(serial_buf->serial_num,
6296                                       path->device->serial_num,
6297                                       serial_buf->length);
6298                                 path->device->serial_num_len =
6299                                     serial_buf->length;
6300                                 path->device->serial_num[serial_buf->length]
6301                                     = '\0';
6302                         }
6303                 } else if (cam_periph_error(done_ccb, 0,
6304                                             SF_RETRY_UA|SF_NO_PRINT,
6305                                             &softc->saved_ccb) == ERESTART) {
6306                         return;
6307                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6308                         /* Don't wedge the queue */
6309                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6310                                          /*run_queue*/TRUE);
6311                 }
6312                 
6313                 /*
6314                  * Let's see if we have seen this device before.
6315                  */
6316                 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
6317                         MD5_CTX context;
6318                         u_int8_t digest[16];
6319
6320                         MD5Init(&context);
6321                         
6322                         MD5Update(&context,
6323                                   (unsigned char *)&path->device->inq_data,
6324                                   sizeof(struct scsi_inquiry_data));
6325
6326                         if (have_serialnum)
6327                                 MD5Update(&context, serial_buf->serial_num,
6328                                           serial_buf->length);
6329
6330                         MD5Final(digest, &context);
6331                         if (bcmp(softc->digest, digest, 16) == 0)
6332                                 changed = 0;
6333
6334                         /*
6335                          * XXX Do we need to do a TUR in order to ensure
6336                          *     that the device really hasn't changed???
6337                          */
6338                         if ((changed != 0)
6339                          && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
6340                                 xpt_async(AC_LOST_DEVICE, path, NULL);
6341                 }
6342                 if (serial_buf != NULL)
6343                         free(serial_buf, M_TEMP);
6344
6345                 if (changed != 0) {
6346                         /*
6347                          * Now that we have all the necessary
6348                          * information to safely perform transfer
6349                          * negotiations... Controllers don't perform
6350                          * any negotiation or tagged queuing until
6351                          * after the first XPT_SET_TRAN_SETTINGS ccb is
6352                          * received.  So, on a new device, just retrieve
6353                          * the user settings, and set them as the current
6354                          * settings to set the device up.
6355                          */
6356                         proberequestdefaultnegotiation(periph);
6357                         xpt_release_ccb(done_ccb);
6358
6359                         /*
6360                          * Perform a TUR to allow the controller to
6361                          * perform any necessary transfer negotiation.
6362                          */
6363                         softc->action = PROBE_TUR_FOR_NEGOTIATION;
6364                         xpt_schedule(periph, priority);
6365                         return;
6366                 }
6367                 xpt_release_ccb(done_ccb);
6368                 break;
6369         }
6370         case PROBE_TUR_FOR_NEGOTIATION:
6371         case PROBE_DV_EXIT:
6372                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6373                         /* Don't wedge the queue */
6374                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6375                                          /*run_queue*/TRUE);
6376                 }
6377                 /*
6378                  * Do Domain Validation for lun 0 on devices that claim
6379                  * to support Synchronous Transfer modes.
6380                  */
6381                 if (softc->action == PROBE_TUR_FOR_NEGOTIATION
6382                  && done_ccb->ccb_h.target_lun == 0
6383                  && (path->device->inq_data.flags & SID_Sync) != 0
6384                  && (path->device->flags & CAM_DEV_IN_DV) == 0) {
6385                         CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6386                             ("Begin Domain Validation\n"));
6387                         path->device->flags |= CAM_DEV_IN_DV;
6388                         xpt_release_ccb(done_ccb);
6389                         softc->action = PROBE_INQUIRY_BASIC_DV1;
6390                         xpt_schedule(periph, priority);
6391                         return;
6392                 }
6393                 if (softc->action == PROBE_DV_EXIT) {
6394                         CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6395                             ("Leave Domain Validation\n"));
6396                 }
6397                 path->device->flags &=
6398                     ~(CAM_DEV_UNCONFIGURED|CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
6399                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
6400                         /* Inform the XPT that a new device has been found */
6401                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
6402                         xpt_action(done_ccb);
6403                         xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
6404                                   done_ccb);
6405                 }
6406                 xpt_release_ccb(done_ccb);
6407                 break;
6408         case PROBE_INQUIRY_BASIC_DV1:
6409         case PROBE_INQUIRY_BASIC_DV2:
6410         {
6411                 struct scsi_inquiry_data *nbuf;
6412                 struct ccb_scsiio *csio;
6413
6414                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6415                         /* Don't wedge the queue */
6416                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6417                                          /*run_queue*/TRUE);
6418                 }
6419                 csio = &done_ccb->csio;
6420                 nbuf = (struct scsi_inquiry_data *)csio->data_ptr;
6421                 if (bcmp(nbuf, &path->device->inq_data, SHORT_INQUIRY_LENGTH)) {
6422                         xpt_print(path,
6423                             "inquiry data fails comparison at DV%d step\n",
6424                             softc->action == PROBE_INQUIRY_BASIC_DV1? 1 : 2);
6425                         if (proberequestbackoff(periph, path->device)) {
6426                                 path->device->flags &= ~CAM_DEV_IN_DV;
6427                                 softc->action = PROBE_TUR_FOR_NEGOTIATION;
6428                         } else {
6429                                 /* give up */
6430                                 softc->action = PROBE_DV_EXIT;
6431                         }
6432                         free(nbuf, M_TEMP);
6433                         xpt_release_ccb(done_ccb);
6434                         xpt_schedule(periph, priority);
6435                         return;
6436                 }
6437                 free(nbuf, M_TEMP);
6438                 if (softc->action == PROBE_INQUIRY_BASIC_DV1) {
6439                         softc->action = PROBE_INQUIRY_BASIC_DV2;
6440                         xpt_release_ccb(done_ccb);
6441                         xpt_schedule(periph, priority);
6442                         return;
6443                 }
6444                 if (softc->action == PROBE_DV_EXIT) {
6445                         CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6446                             ("Leave Domain Validation Successfully\n"));
6447                 }
6448                 path->device->flags &=
6449                     ~(CAM_DEV_UNCONFIGURED|CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
6450                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
6451                         /* Inform the XPT that a new device has been found */
6452                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
6453                         xpt_action(done_ccb);
6454                         xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
6455                                   done_ccb);
6456                 }
6457                 xpt_release_ccb(done_ccb);
6458                 break;
6459         }
6460         }
6461         done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
6462         TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
6463         done_ccb->ccb_h.status = CAM_REQ_CMP;
6464         xpt_done(done_ccb);
6465         if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
6466                 cam_periph_invalidate(periph);
6467                 cam_periph_release(periph);
6468         } else {
6469                 probeschedule(periph);
6470         }
6471 }
6472
6473 static void
6474 probecleanup(struct cam_periph *periph)
6475 {
6476         free(periph->softc, M_TEMP);
6477 }
6478
6479 static void
6480 xpt_find_quirk(struct cam_ed *device)
6481 {
6482         caddr_t match;
6483
6484         match = cam_quirkmatch((caddr_t)&device->inq_data,
6485                                (caddr_t)xpt_quirk_table,
6486                                sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
6487                                sizeof(*xpt_quirk_table), scsi_inquiry_match);
6488
6489         if (match == NULL)
6490                 panic("xpt_find_quirk: device didn't match wildcard entry!!");
6491
6492         device->quirk = (struct xpt_quirk_entry *)match;
6493 }
6494
6495 static int
6496 sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS)
6497 {
6498         int error, bool;
6499
6500         bool = cam_srch_hi;
6501         error = sysctl_handle_int(oidp, &bool, sizeof(bool), req);
6502         if (error != 0 || req->newptr == NULL)
6503                 return (error);
6504         if (bool == 0 || bool == 1) {
6505                 cam_srch_hi = bool;
6506                 return (0);
6507         } else {
6508                 return (EINVAL);
6509         }
6510 }
6511
6512
6513 static void
6514 xpt_devise_transport(struct cam_path *path)
6515 {
6516         struct ccb_pathinq cpi;
6517         struct ccb_trans_settings cts;
6518         struct scsi_inquiry_data *inq_buf;
6519
6520         /* Get transport information from the SIM */
6521         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
6522         cpi.ccb_h.func_code = XPT_PATH_INQ;
6523         xpt_action((union ccb *)&cpi);
6524
6525         inq_buf = NULL;
6526         if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
6527                 inq_buf = &path->device->inq_data;
6528         path->device->protocol = PROTO_SCSI;
6529         path->device->protocol_version =
6530             inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
6531         path->device->transport = cpi.transport;
6532         path->device->transport_version = cpi.transport_version;
6533
6534         /*
6535          * Any device not using SPI3 features should
6536          * be considered SPI2 or lower.
6537          */
6538         if (inq_buf != NULL) {
6539                 if (path->device->transport == XPORT_SPI
6540                  && (inq_buf->spi3data & SID_SPI_MASK) == 0
6541                  && path->device->transport_version > 2)
6542                         path->device->transport_version = 2;
6543         } else {
6544                 struct cam_ed* otherdev;
6545
6546                 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
6547                      otherdev != NULL;
6548                      otherdev = TAILQ_NEXT(otherdev, links)) {
6549                         if (otherdev != path->device)
6550                                 break;
6551                 }
6552                     
6553                 if (otherdev != NULL) {
6554                         /*
6555                          * Initially assume the same versioning as
6556                          * prior luns for this target.
6557                          */
6558                         path->device->protocol_version =
6559                             otherdev->protocol_version;
6560                         path->device->transport_version =
6561                             otherdev->transport_version;
6562                 } else {
6563                         /* Until we know better, opt for safty */
6564                         path->device->protocol_version = 2;
6565                         if (path->device->transport == XPORT_SPI)
6566                                 path->device->transport_version = 2;
6567                         else
6568                                 path->device->transport_version = 0;
6569                 }
6570         }
6571
6572         /*
6573          * XXX
6574          * For a device compliant with SPC-2 we should be able
6575          * to determine the transport version supported by
6576          * scrutinizing the version descriptors in the
6577          * inquiry buffer.
6578          */
6579
6580         /* Tell the controller what we think */
6581         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
6582         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
6583         cts.type = CTS_TYPE_CURRENT_SETTINGS;
6584         cts.transport = path->device->transport;
6585         cts.transport_version = path->device->transport_version;
6586         cts.protocol = path->device->protocol;
6587         cts.protocol_version = path->device->protocol_version;
6588         cts.proto_specific.valid = 0;
6589         cts.xport_specific.valid = 0;
6590         xpt_action((union ccb *)&cts);
6591 }
6592
6593 static void
6594 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6595                           int async_update)
6596 {
6597         struct  ccb_pathinq cpi;
6598         struct  ccb_trans_settings cur_cts;
6599         struct  ccb_trans_settings_scsi *scsi;
6600         struct  ccb_trans_settings_scsi *cur_scsi;
6601         struct  cam_sim *sim;
6602         struct  scsi_inquiry_data *inq_data;
6603
6604         if (device == NULL) {
6605                 cts->ccb_h.status = CAM_PATH_INVALID;
6606                 xpt_done((union ccb *)cts);
6607                 return;
6608         }
6609
6610         if (cts->protocol == PROTO_UNKNOWN
6611          || cts->protocol == PROTO_UNSPECIFIED) {
6612                 cts->protocol = device->protocol;
6613                 cts->protocol_version = device->protocol_version;
6614         }
6615
6616         if (cts->protocol_version == PROTO_VERSION_UNKNOWN
6617          || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
6618                 cts->protocol_version = device->protocol_version;
6619
6620         if (cts->protocol != device->protocol) {
6621                 xpt_print(cts->ccb_h.path, "Uninitialized Protocol %x:%x?\n",
6622                        cts->protocol, device->protocol);
6623                 cts->protocol = device->protocol;
6624         }
6625
6626         if (cts->protocol_version > device->protocol_version) {
6627                 if (bootverbose) {
6628                         xpt_print(cts->ccb_h.path, "Down reving Protocol "
6629                             "Version from %d to %d?\n", cts->protocol_version,
6630                             device->protocol_version);
6631                 }
6632                 cts->protocol_version = device->protocol_version;
6633         }
6634
6635         if (cts->transport == XPORT_UNKNOWN
6636          || cts->transport == XPORT_UNSPECIFIED) {
6637                 cts->transport = device->transport;
6638                 cts->transport_version = device->transport_version;
6639         }
6640
6641         if (cts->transport_version == XPORT_VERSION_UNKNOWN
6642          || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
6643                 cts->transport_version = device->transport_version;
6644
6645         if (cts->transport != device->transport) {
6646                 xpt_print(cts->ccb_h.path, "Uninitialized Transport %x:%x?\n",
6647                     cts->transport, device->transport);
6648                 cts->transport = device->transport;
6649         }
6650
6651         if (cts->transport_version > device->transport_version) {
6652                 if (bootverbose) {
6653                         xpt_print(cts->ccb_h.path, "Down reving Transport "
6654                             "Version from %d to %d?\n", cts->transport_version,
6655                             device->transport_version);
6656                 }
6657                 cts->transport_version = device->transport_version;
6658         }
6659
6660         sim = cts->ccb_h.path->bus->sim;
6661
6662         /*
6663          * Nothing more of interest to do unless
6664          * this is a device connected via the
6665          * SCSI protocol.
6666          */
6667         if (cts->protocol != PROTO_SCSI) {
6668                 if (async_update == FALSE) 
6669                         (*(sim->sim_action))(sim, (union ccb *)cts);
6670                 return;
6671         }
6672
6673         inq_data = &device->inq_data;
6674         scsi = &cts->proto_specific.scsi;
6675         xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6676         cpi.ccb_h.func_code = XPT_PATH_INQ;
6677         xpt_action((union ccb *)&cpi);
6678
6679         /* SCSI specific sanity checking */
6680         if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6681          || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
6682          || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6683          || (device->quirk->mintags == 0)) {
6684                 /*
6685                  * Can't tag on hardware that doesn't support tags,
6686                  * doesn't have it enabled, or has broken tag support.
6687                  */
6688                 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6689         }
6690
6691         if (async_update == FALSE) {
6692                 /*
6693                  * Perform sanity checking against what the
6694                  * controller and device can do.
6695                  */
6696                 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6697                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6698                 cur_cts.type = cts->type;
6699                 xpt_action((union ccb *)&cur_cts);
6700                 if ((cur_cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6701                         return;
6702                 }
6703                 cur_scsi = &cur_cts.proto_specific.scsi;
6704                 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
6705                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6706                         scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
6707                 }
6708                 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
6709                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6710         }
6711
6712         /* SPI specific sanity checking */
6713         if (cts->transport == XPORT_SPI && async_update == FALSE) {
6714                 u_int spi3caps;
6715                 struct ccb_trans_settings_spi *spi;
6716                 struct ccb_trans_settings_spi *cur_spi;
6717
6718                 spi = &cts->xport_specific.spi;
6719
6720                 cur_spi = &cur_cts.xport_specific.spi;
6721
6722                 /* Fill in any gaps in what the user gave us */
6723                 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6724                         spi->sync_period = cur_spi->sync_period;
6725                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6726                         spi->sync_period = 0;
6727                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6728                         spi->sync_offset = cur_spi->sync_offset;
6729                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6730                         spi->sync_offset = 0;
6731                 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6732                         spi->ppr_options = cur_spi->ppr_options;
6733                 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6734                         spi->ppr_options = 0;
6735                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6736                         spi->bus_width = cur_spi->bus_width;
6737                 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6738                         spi->bus_width = 0;
6739                 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
6740                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6741                         spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
6742                 }
6743                 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
6744                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6745                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6746                   && (inq_data->flags & SID_Sync) == 0
6747                   && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6748                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6749                  || (spi->sync_offset == 0)
6750                  || (spi->sync_period == 0)) {
6751                         /* Force async */
6752                         spi->sync_period = 0;
6753                         spi->sync_offset = 0;
6754                 }
6755
6756                 switch (spi->bus_width) {
6757                 case MSG_EXT_WDTR_BUS_32_BIT:
6758                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6759                           || (inq_data->flags & SID_WBus32) != 0
6760                           || cts->type == CTS_TYPE_USER_SETTINGS)
6761                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6762                                 break;
6763                         /* Fall Through to 16-bit */
6764                 case MSG_EXT_WDTR_BUS_16_BIT:
6765                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6766                           || (inq_data->flags & SID_WBus16) != 0
6767                           || cts->type == CTS_TYPE_USER_SETTINGS)
6768                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6769                                 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6770                                 break;
6771                         }
6772                         /* Fall Through to 8-bit */
6773                 default: /* New bus width?? */
6774                 case MSG_EXT_WDTR_BUS_8_BIT:
6775                         /* All targets can do this */
6776                         spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6777                         break;
6778                 }
6779
6780                 spi3caps = cpi.xport_specific.spi.ppr_options;
6781                 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6782                  && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6783                         spi3caps &= inq_data->spi3data;
6784
6785                 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
6786                         spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
6787
6788                 if ((spi3caps & SID_SPI_IUS) == 0)
6789                         spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
6790
6791                 if ((spi3caps & SID_SPI_QAS) == 0)
6792                         spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
6793
6794                 /* No SPI Transfer settings are allowed unless we are wide */
6795                 if (spi->bus_width == 0)
6796                         spi->ppr_options = 0;
6797
6798                 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
6799                         /*
6800                          * Can't tag queue without disconnection.
6801                          */
6802                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6803                         scsi->valid |= CTS_SCSI_VALID_TQ;
6804                 }
6805
6806                 /*
6807                  * If we are currently performing tagged transactions to
6808                  * this device and want to change its negotiation parameters,
6809                  * go non-tagged for a bit to give the controller a chance to
6810                  * negotiate unhampered by tag messages.
6811                  */
6812                 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6813                  && (device->inq_flags & SID_CmdQue) != 0
6814                  && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6815                  && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
6816                                    CTS_SPI_VALID_SYNC_OFFSET|
6817                                    CTS_SPI_VALID_BUS_WIDTH)) != 0)
6818                         xpt_toggle_tags(cts->ccb_h.path);
6819         }
6820
6821         if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6822          && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
6823                 int device_tagenb;
6824
6825                 /*
6826                  * If we are transitioning from tags to no-tags or
6827                  * vice-versa, we need to carefully freeze and restart
6828                  * the queue so that we don't overlap tagged and non-tagged
6829                  * commands.  We also temporarily stop tags if there is
6830                  * a change in transfer negotiation settings to allow
6831                  * "tag-less" negotiation.
6832                  */
6833                 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6834                  || (device->inq_flags & SID_CmdQue) != 0)
6835                         device_tagenb = TRUE;
6836                 else
6837                         device_tagenb = FALSE;
6838
6839                 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6840                   && device_tagenb == FALSE)
6841                  || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
6842                   && device_tagenb == TRUE)) {
6843
6844                         if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
6845                                 /*
6846                                  * Delay change to use tags until after a
6847                                  * few commands have gone to this device so
6848                                  * the controller has time to perform transfer
6849                                  * negotiations without tagged messages getting
6850                                  * in the way.
6851                                  */
6852                                 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6853                                 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6854                         } else {
6855                                 struct ccb_relsim crs;
6856
6857                                 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6858                                 device->inq_flags &= ~SID_CmdQue;
6859                                 xpt_dev_ccbq_resize(cts->ccb_h.path,
6860                                                     sim->max_dev_openings);
6861                                 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6862                                 device->tag_delay_count = 0;
6863
6864                                 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6865                                               /*priority*/1);
6866                                 crs.ccb_h.func_code = XPT_REL_SIMQ;
6867                                 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6868                                 crs.openings
6869                                     = crs.release_timeout 
6870                                     = crs.qfrozen_cnt
6871                                     = 0;
6872                                 xpt_action((union ccb *)&crs);
6873                         }
6874                 }
6875         }
6876         if (async_update == FALSE) 
6877                 (*(sim->sim_action))(sim, (union ccb *)cts);
6878 }
6879
6880
6881 static void
6882 xpt_toggle_tags(struct cam_path *path)
6883 {
6884         struct cam_ed *dev;
6885
6886         /*
6887          * Give controllers a chance to renegotiate
6888          * before starting tag operations.  We
6889          * "toggle" tagged queuing off then on
6890          * which causes the tag enable command delay
6891          * counter to come into effect.
6892          */
6893         dev = path->device;
6894         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6895          || ((dev->inq_flags & SID_CmdQue) != 0
6896           && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
6897                 struct ccb_trans_settings cts;
6898
6899                 xpt_setup_ccb(&cts.ccb_h, path, 1);
6900                 cts.protocol = PROTO_SCSI;
6901                 cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
6902                 cts.transport = XPORT_UNSPECIFIED;
6903                 cts.transport_version = XPORT_VERSION_UNSPECIFIED;
6904                 cts.proto_specific.scsi.flags = 0;
6905                 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
6906                 xpt_set_transfer_settings(&cts, path->device,
6907                                           /*async_update*/TRUE);
6908                 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
6909                 xpt_set_transfer_settings(&cts, path->device,
6910                                           /*async_update*/TRUE);
6911         }
6912 }
6913
6914 static void
6915 xpt_start_tags(struct cam_path *path)
6916 {
6917         struct ccb_relsim crs;
6918         struct cam_ed *device;
6919         struct cam_sim *sim;
6920         int    newopenings;
6921
6922         device = path->device;
6923         sim = path->bus->sim;
6924         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6925         xpt_freeze_devq(path, /*count*/1);
6926         device->inq_flags |= SID_CmdQue;
6927         if (device->tag_saved_openings != 0)
6928                 newopenings = device->tag_saved_openings;
6929         else
6930                 newopenings = min(device->quirk->maxtags,
6931                                   sim->max_tagged_dev_openings);
6932         xpt_dev_ccbq_resize(path, newopenings);
6933         xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
6934         crs.ccb_h.func_code = XPT_REL_SIMQ;
6935         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6936         crs.openings
6937             = crs.release_timeout 
6938             = crs.qfrozen_cnt
6939             = 0;
6940         xpt_action((union ccb *)&crs);
6941 }
6942
6943 static int busses_to_config;
6944 static int busses_to_reset;
6945
6946 static int
6947 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
6948 {
6949
6950         mtx_assert(bus->sim->mtx, MA_OWNED);
6951
6952         if (bus->path_id != CAM_XPT_PATH_ID) {
6953                 struct cam_path path;
6954                 struct ccb_pathinq cpi;
6955                 int can_negotiate;
6956
6957                 busses_to_config++;
6958                 xpt_compile_path(&path, NULL, bus->path_id,
6959                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
6960                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
6961                 cpi.ccb_h.func_code = XPT_PATH_INQ;
6962                 xpt_action((union ccb *)&cpi);
6963                 can_negotiate = cpi.hba_inquiry;
6964                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6965                 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
6966                  && can_negotiate)
6967                         busses_to_reset++;
6968                 xpt_release_path(&path);
6969         }
6970
6971         return(1);
6972 }
6973
6974 static int
6975 xptconfigfunc(struct cam_eb *bus, void *arg)
6976 {
6977         struct  cam_path *path;
6978         union   ccb *work_ccb;
6979
6980         mtx_assert(bus->sim->mtx, MA_OWNED);
6981
6982         if (bus->path_id != CAM_XPT_PATH_ID) {
6983                 cam_status status;
6984                 int can_negotiate;
6985
6986                 work_ccb = xpt_alloc_ccb_nowait();
6987                 if (work_ccb == NULL) {
6988                         busses_to_config--;
6989                         xpt_finishconfig(xpt_periph, NULL);
6990                         return(0);
6991                 }
6992                 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
6993                                               CAM_TARGET_WILDCARD,
6994                                               CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
6995                         printf("xptconfigfunc: xpt_create_path failed with "
6996                                "status %#x for bus %d\n", status, bus->path_id);
6997                         printf("xptconfigfunc: halting bus configuration\n");
6998                         xpt_free_ccb(work_ccb);
6999                         busses_to_config--;
7000                         xpt_finishconfig(xpt_periph, NULL);
7001                         return(0);
7002                 }
7003                 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
7004                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
7005                 xpt_action(work_ccb);
7006                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
7007                         printf("xptconfigfunc: CPI failed on bus %d "
7008                                "with status %d\n", bus->path_id,
7009                                work_ccb->ccb_h.status);
7010                         xpt_finishconfig(xpt_periph, work_ccb);
7011                         return(1);
7012                 }
7013
7014                 can_negotiate = work_ccb->cpi.hba_inquiry;
7015                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
7016                 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
7017                  && (can_negotiate != 0)) {
7018                         xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
7019                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
7020                         work_ccb->ccb_h.cbfcnp = NULL;
7021                         CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
7022                                   ("Resetting Bus\n"));
7023                         xpt_action(work_ccb);
7024                         xpt_finishconfig(xpt_periph, work_ccb);
7025                 } else {
7026                         /* Act as though we performed a successful BUS RESET */
7027                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
7028                         xpt_finishconfig(xpt_periph, work_ccb);
7029                 }
7030         }
7031
7032         return(1);
7033 }
7034
7035 static void
7036 xpt_config(void *arg)
7037 {
7038         /*
7039          * Now that interrupts are enabled, go find our devices
7040          */
7041
7042 #ifdef CAMDEBUG
7043         /* Setup debugging flags and path */
7044 #ifdef CAM_DEBUG_FLAGS
7045         cam_dflags = CAM_DEBUG_FLAGS;
7046 #else /* !CAM_DEBUG_FLAGS */
7047         cam_dflags = CAM_DEBUG_NONE;
7048 #endif /* CAM_DEBUG_FLAGS */
7049 #ifdef CAM_DEBUG_BUS
7050         if (cam_dflags != CAM_DEBUG_NONE) {
7051                 /*
7052                  * Locking is specifically omitted here.  No SIMs have
7053                  * registered yet, so xpt_create_path will only be searching
7054                  * empty lists of targets and devices.
7055                  */
7056                 if (xpt_create_path(&cam_dpath, xpt_periph,
7057                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
7058                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
7059                         printf("xpt_config: xpt_create_path() failed for debug"
7060                                " target %d:%d:%d, debugging disabled\n",
7061                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
7062                         cam_dflags = CAM_DEBUG_NONE;
7063                 }
7064         } else
7065                 cam_dpath = NULL;
7066 #else /* !CAM_DEBUG_BUS */
7067         cam_dpath = NULL;
7068 #endif /* CAM_DEBUG_BUS */
7069 #endif /* CAMDEBUG */
7070
7071         /*
7072          * Scan all installed busses.
7073          */
7074         xpt_for_all_busses(xptconfigbuscountfunc, NULL);
7075
7076         if (busses_to_config == 0) {
7077                 /* Call manually because we don't have any busses */
7078                 xpt_finishconfig(xpt_periph, NULL);
7079         } else  {
7080                 if (busses_to_reset > 0 && scsi_delay >= 2000) {
7081                         printf("Waiting %d seconds for SCSI "
7082                                "devices to settle\n", scsi_delay/1000);
7083                 }
7084                 xpt_for_all_busses(xptconfigfunc, NULL);
7085         }
7086 }
7087
7088 /*
7089  * If the given device only has one peripheral attached to it, and if that
7090  * peripheral is the passthrough driver, announce it.  This insures that the
7091  * user sees some sort of announcement for every peripheral in their system.
7092  */
7093 static int
7094 xptpassannouncefunc(struct cam_ed *device, void *arg)
7095 {
7096         struct cam_periph *periph;
7097         int i;
7098
7099         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
7100              periph = SLIST_NEXT(periph, periph_links), i++);
7101
7102         periph = SLIST_FIRST(&device->periphs);
7103         if ((i == 1)
7104          && (strncmp(periph->periph_name, "pass", 4) == 0))
7105                 xpt_announce_periph(periph, NULL);
7106
7107         return(1);
7108 }
7109
7110 static void
7111 xpt_finishconfig_task(void *context, int pending)
7112 {
7113         struct  periph_driver **p_drv;
7114         int     i;
7115
7116         if (busses_to_config == 0) {
7117                 /* Register all the peripheral drivers */
7118                 /* XXX This will have to change when we have loadable modules */
7119                 p_drv = periph_drivers;
7120                 for (i = 0; p_drv[i] != NULL; i++) {
7121                         (*p_drv[i]->init)();
7122                 }
7123
7124                 /*
7125                  * Check for devices with no "standard" peripheral driver
7126                  * attached.  For any devices like that, announce the
7127                  * passthrough driver so the user will see something.
7128                  */
7129                 xpt_for_all_devices(xptpassannouncefunc, NULL);
7130
7131                 /* Release our hook so that the boot can continue. */
7132                 config_intrhook_disestablish(xsoftc.xpt_config_hook);
7133                 free(xsoftc.xpt_config_hook, M_TEMP);
7134                 xsoftc.xpt_config_hook = NULL;
7135         }
7136
7137         free(context, M_CAMXPT);
7138 }
7139
7140 static void
7141 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
7142 {
7143         struct  xpt_task *task;
7144
7145         if (done_ccb != NULL) {
7146                 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
7147                           ("xpt_finishconfig\n"));
7148                 switch(done_ccb->ccb_h.func_code) {
7149                 case XPT_RESET_BUS:
7150                         if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
7151                                 done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
7152                                 done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
7153                                 done_ccb->crcn.flags = 0;
7154                                 xpt_action(done_ccb);
7155                                 return;
7156                         }
7157                         /* FALLTHROUGH */
7158                 case XPT_SCAN_BUS:
7159                 default:
7160                         xpt_free_path(done_ccb->ccb_h.path);
7161                         busses_to_config--;
7162                         break;
7163                 }
7164         }
7165
7166         task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
7167         if (task != NULL) {
7168                 TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
7169                 taskqueue_enqueue(taskqueue_thread, &task->task);
7170         }
7171
7172         if (done_ccb != NULL)
7173                 xpt_free_ccb(done_ccb);
7174 }
7175
7176 static void
7177 xptaction(struct cam_sim *sim, union ccb *work_ccb)
7178 {
7179         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
7180
7181         switch (work_ccb->ccb_h.func_code) {
7182         /* Common cases first */
7183         case XPT_PATH_INQ:              /* Path routing inquiry */
7184         {
7185                 struct ccb_pathinq *cpi;
7186
7187                 cpi = &work_ccb->cpi;
7188                 cpi->version_num = 1; /* XXX??? */
7189                 cpi->hba_inquiry = 0;
7190                 cpi->target_sprt = 0;
7191                 cpi->hba_misc = 0;
7192                 cpi->hba_eng_cnt = 0;
7193                 cpi->max_target = 0;
7194                 cpi->max_lun = 0;
7195                 cpi->initiator_id = 0;
7196                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
7197                 strncpy(cpi->hba_vid, "", HBA_IDLEN);
7198                 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
7199                 cpi->unit_number = sim->unit_number;
7200                 cpi->bus_id = sim->bus_id;
7201                 cpi->base_transfer_speed = 0;
7202                 cpi->protocol = PROTO_UNSPECIFIED;
7203                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
7204                 cpi->transport = XPORT_UNSPECIFIED;
7205                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
7206                 cpi->ccb_h.status = CAM_REQ_CMP;
7207                 xpt_done(work_ccb);
7208                 break;
7209         }
7210         default:
7211                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
7212                 xpt_done(work_ccb);
7213                 break;
7214         }
7215 }
7216
7217 /*
7218  * The xpt as a "controller" has no interrupt sources, so polling
7219  * is a no-op.
7220  */
7221 static void
7222 xptpoll(struct cam_sim *sim)
7223 {
7224 }
7225
7226 void
7227 xpt_lock_buses(void)
7228 {
7229         mtx_lock(&xsoftc.xpt_topo_lock);
7230 }
7231
7232 void
7233 xpt_unlock_buses(void)
7234 {
7235         mtx_unlock(&xsoftc.xpt_topo_lock);
7236 }
7237
7238 static void
7239 camisr(void *dummy)
7240 {
7241         cam_simq_t queue;
7242         struct cam_sim *sim;
7243
7244         mtx_lock(&cam_simq_lock);
7245         TAILQ_INIT(&queue);
7246         TAILQ_CONCAT(&queue, &cam_simq, links);
7247         mtx_unlock(&cam_simq_lock);
7248
7249         while ((sim = TAILQ_FIRST(&queue)) != NULL) {
7250                 TAILQ_REMOVE(&queue, sim, links);
7251                 CAM_SIM_LOCK(sim);
7252                 sim->flags &= ~CAM_SIM_ON_DONEQ;
7253                 camisr_runqueue(&sim->sim_doneq);
7254                 CAM_SIM_UNLOCK(sim);
7255         }
7256 }
7257
7258 static void
7259 camisr_runqueue(void *V_queue)
7260 {
7261         cam_isrq_t *queue = V_queue;
7262         struct  ccb_hdr *ccb_h;
7263
7264         while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
7265                 int     runq;
7266
7267                 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
7268                 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
7269
7270                 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
7271                           ("camisr\n"));
7272
7273                 runq = FALSE;
7274
7275                 if (ccb_h->flags & CAM_HIGH_POWER) {
7276                         struct highpowerlist    *hphead;
7277                         union ccb               *send_ccb;
7278
7279                         mtx_lock(&xsoftc.xpt_lock);
7280                         hphead = &xsoftc.highpowerq;
7281
7282                         send_ccb = (union ccb *)STAILQ_FIRST(hphead);
7283
7284                         /*
7285                          * Increment the count since this command is done.
7286                          */
7287                         xsoftc.num_highpower++;
7288
7289                         /* 
7290                          * Any high powered commands queued up?
7291                          */
7292                         if (send_ccb != NULL) {
7293
7294                                 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
7295                                 mtx_unlock(&xsoftc.xpt_lock);
7296
7297                                 xpt_release_devq(send_ccb->ccb_h.path,
7298                                                  /*count*/1, /*runqueue*/TRUE);
7299                         } else
7300                                 mtx_unlock(&xsoftc.xpt_lock);
7301                 }
7302
7303                 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
7304                         struct cam_ed *dev;
7305
7306                         dev = ccb_h->path->device;
7307
7308                         cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
7309
7310                         if (!SIM_DEAD(ccb_h->path->bus->sim)) {
7311                                 ccb_h->path->bus->sim->devq->send_active--;
7312                                 ccb_h->path->bus->sim->devq->send_openings++;
7313                         }
7314                         
7315                         if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
7316                           && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
7317                          || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
7318                           && (dev->ccbq.dev_active == 0))) {
7319                                 
7320                                 xpt_release_devq(ccb_h->path, /*count*/1,
7321                                                  /*run_queue*/TRUE);
7322                         }
7323
7324                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
7325                          && (--dev->tag_delay_count == 0))
7326                                 xpt_start_tags(ccb_h->path);
7327
7328                         if ((dev->ccbq.queue.entries > 0)
7329                          && (dev->qfrozen_cnt == 0)
7330                          && (device_is_send_queued(dev) == 0)) {
7331                                 runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
7332                                                               dev);
7333                         }
7334                 }
7335
7336                 if (ccb_h->status & CAM_RELEASE_SIMQ) {
7337                         xpt_release_simq(ccb_h->path->bus->sim,
7338                                          /*run_queue*/TRUE);
7339                         ccb_h->status &= ~CAM_RELEASE_SIMQ;
7340                         runq = FALSE;
7341                 } 
7342
7343                 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
7344                  && (ccb_h->status & CAM_DEV_QFRZN)) {
7345                         xpt_release_devq(ccb_h->path, /*count*/1,
7346                                          /*run_queue*/TRUE);
7347                         ccb_h->status &= ~CAM_DEV_QFRZN;
7348                 } else if (runq) {
7349                         xpt_run_dev_sendq(ccb_h->path->bus);
7350                 }
7351
7352                 /* Call the peripheral driver's callback */
7353                 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
7354         }
7355 }
7356
7357 static void
7358 dead_sim_action(struct cam_sim *sim, union ccb *ccb)
7359 {
7360
7361         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
7362         xpt_done(ccb);
7363 }
7364  
7365 static void
7366 dead_sim_poll(struct cam_sim *sim)
7367 {
7368 }