]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/cam/cam_xpt.c
This commit was generated by cvs2svn to compensate for changes in r168371,
[FreeBSD/FreeBSD.git] / sys / cam / cam_xpt.c
1 /*-
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/systm.h>
36 #include <sys/types.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/time.h>
40 #include <sys/conf.h>
41 #include <sys/fcntl.h>
42 #include <sys/md5.h>
43 #include <sys/interrupt.h>
44 #include <sys/sbuf.h>
45
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
49 #include <sys/kthread.h>
50
51 #ifdef PC98
52 #include <pc98/pc98/pc98_machdep.h>     /* geometry translation */
53 #endif
54
55 #include <cam/cam.h>
56 #include <cam/cam_ccb.h>
57 #include <cam/cam_periph.h>
58 #include <cam/cam_sim.h>
59 #include <cam/cam_xpt.h>
60 #include <cam/cam_xpt_sim.h>
61 #include <cam/cam_xpt_periph.h>
62 #include <cam/cam_debug.h>
63
64 #include <cam/scsi/scsi_all.h>
65 #include <cam/scsi/scsi_message.h>
66 #include <cam/scsi/scsi_pass.h>
67 #include <machine/stdarg.h>     /* for xpt_print below */
68 #include "opt_cam.h"
69
70 /* Datastructures internal to the xpt layer */
71 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
72
73 /*
74  * Definition of an async handler callback block.  These are used to add
75  * SIMs and peripherals to the async callback lists.
76  */
77 struct async_node {
78         SLIST_ENTRY(async_node) links;
79         u_int32_t       event_enable;   /* Async Event enables */
80         void            (*callback)(void *arg, u_int32_t code,
81                                     struct cam_path *path, void *args);
82         void            *callback_arg;
83 };
84
85 SLIST_HEAD(async_list, async_node);
86 SLIST_HEAD(periph_list, cam_periph);
87 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
88
89 /*
90  * This is the maximum number of high powered commands (e.g. start unit)
91  * that can be outstanding at a particular time.
92  */
93 #ifndef CAM_MAX_HIGHPOWER
94 #define CAM_MAX_HIGHPOWER  4
95 #endif
96
97 /* number of high powered commands that can go through right now */
98 static int num_highpower = CAM_MAX_HIGHPOWER;
99
100 /*
101  * Structure for queueing a device in a run queue.
102  * There is one run queue for allocating new ccbs,
103  * and another for sending ccbs to the controller.
104  */
105 struct cam_ed_qinfo {
106         cam_pinfo pinfo;
107         struct    cam_ed *device;
108 };
109
110 /*
111  * The CAM EDT (Existing Device Table) contains the device information for
112  * all devices for all busses in the system.  The table contains a
113  * cam_ed structure for each device on the bus.
114  */
115 struct cam_ed {
116         TAILQ_ENTRY(cam_ed) links;
117         struct  cam_ed_qinfo alloc_ccb_entry;
118         struct  cam_ed_qinfo send_ccb_entry;
119         struct  cam_et   *target;
120         lun_id_t         lun_id;
121         struct  camq drvq;              /*
122                                          * Queue of type drivers wanting to do
123                                          * work on this device.
124                                          */
125         struct  cam_ccbq ccbq;          /* Queue of pending ccbs */
126         struct  async_list asyncs;      /* Async callback info for this B/T/L */
127         struct  periph_list periphs;    /* All attached devices */
128         u_int   generation;             /* Generation number */
129         struct  cam_periph *owner;      /* Peripheral driver's ownership tag */
130         struct  xpt_quirk_entry *quirk; /* Oddities about this device */
131                                         /* Storage for the inquiry data */
132         cam_proto        protocol;
133         u_int            protocol_version;
134         cam_xport        transport;
135         u_int            transport_version;
136         struct           scsi_inquiry_data inq_data;
137         u_int8_t         inq_flags;     /*
138                                          * Current settings for inquiry flags.
139                                          * This allows us to override settings
140                                          * like disconnection and tagged
141                                          * queuing for a device.
142                                          */
143         u_int8_t         queue_flags;   /* Queue flags from the control page */
144         u_int8_t         serial_num_len;
145         u_int8_t        *serial_num;
146         u_int32_t        qfrozen_cnt;
147         u_int32_t        flags;
148 #define CAM_DEV_UNCONFIGURED            0x01
149 #define CAM_DEV_REL_TIMEOUT_PENDING     0x02
150 #define CAM_DEV_REL_ON_COMPLETE         0x04
151 #define CAM_DEV_REL_ON_QUEUE_EMPTY      0x08
152 #define CAM_DEV_RESIZE_QUEUE_NEEDED     0x10
153 #define CAM_DEV_TAG_AFTER_COUNT         0x20
154 #define CAM_DEV_INQUIRY_DATA_VALID      0x40
155 #define CAM_DEV_IN_DV                   0x80
156 #define CAM_DEV_DV_HIT_BOTTOM           0x100
157         u_int32_t        tag_delay_count;
158 #define CAM_TAG_DELAY_COUNT             5
159         u_int32_t        tag_saved_openings;
160         u_int32_t        refcount;
161         struct           callout_handle c_handle;
162 };
163
164 /*
165  * Each target is represented by an ET (Existing Target).  These
166  * entries are created when a target is successfully probed with an
167  * identify, and removed when a device fails to respond after a number
168  * of retries, or a bus rescan finds the device missing.
169  */
170 struct cam_et { 
171         TAILQ_HEAD(, cam_ed) ed_entries;
172         TAILQ_ENTRY(cam_et) links;
173         struct  cam_eb  *bus;   
174         target_id_t     target_id;
175         u_int32_t       refcount;       
176         u_int           generation;
177         struct          timeval last_reset;
178 };
179
180 /*
181  * Each bus is represented by an EB (Existing Bus).  These entries
182  * are created by calls to xpt_bus_register and deleted by calls to
183  * xpt_bus_deregister.
184  */
185 struct cam_eb { 
186         TAILQ_HEAD(, cam_et) et_entries;
187         TAILQ_ENTRY(cam_eb)  links;
188         path_id_t            path_id;
189         struct cam_sim       *sim;
190         struct timeval       last_reset;
191         u_int32_t            flags;
192 #define CAM_EB_RUNQ_SCHEDULED   0x01
193         u_int32_t            refcount;
194         u_int                generation;
195 };
196
197 struct cam_path {
198         struct cam_periph *periph;
199         struct cam_eb     *bus;
200         struct cam_et     *target;
201         struct cam_ed     *device;
202 };
203
204 struct xpt_quirk_entry {
205         struct scsi_inquiry_pattern inq_pat;
206         u_int8_t quirks;
207 #define CAM_QUIRK_NOLUNS        0x01
208 #define CAM_QUIRK_NOSERIAL      0x02
209 #define CAM_QUIRK_HILUNS        0x04
210 #define CAM_QUIRK_NOHILUNS      0x08
211         u_int mintags;
212         u_int maxtags;
213 };
214
215 static int cam_srch_hi = 0;
216 TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
217 static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
218 SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
219     sysctl_cam_search_luns, "I",
220     "allow search above LUN 7 for SCSI3 and greater devices");
221
222 #define CAM_SCSI2_MAXLUN        8
223 /*
224  * If we're not quirked to search <= the first 8 luns
225  * and we are either quirked to search above lun 8,
226  * or we're > SCSI-2 and we've enabled hilun searching,
227  * or we're > SCSI-2 and the last lun was a success,
228  * we can look for luns above lun 8.
229  */
230 #define CAN_SRCH_HI_SPARSE(dv)                          \
231   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0)      \
232   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)            \
233   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
234
235 #define CAN_SRCH_HI_DENSE(dv)                           \
236   (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0)      \
237   && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)            \
238   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
239
240 typedef enum {
241         XPT_FLAG_OPEN           = 0x01
242 } xpt_flags;
243
244 struct xpt_softc {
245         xpt_flags       flags;
246         u_int32_t       generation;
247 };
248
249 static const char quantum[] = "QUANTUM";
250 static const char sony[] = "SONY";
251 static const char west_digital[] = "WDIGTL";
252 static const char samsung[] = "SAMSUNG";
253 static const char seagate[] = "SEAGATE";
254 static const char microp[] = "MICROP";
255
256 static struct xpt_quirk_entry xpt_quirk_table[] = 
257 {
258         {
259                 /* Reports QUEUE FULL for temporary resource shortages */
260                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
261                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
262         },
263         {
264                 /* Reports QUEUE FULL for temporary resource shortages */
265                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
266                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
267         },
268         {
269                 /* Reports QUEUE FULL for temporary resource shortages */
270                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
271                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
272         },
273         {
274                 /* Broken tagged queuing drive */
275                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
276                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
277         },
278         {
279                 /* Broken tagged queuing drive */
280                 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
281                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
282         },
283         {
284                 /* Broken tagged queuing drive */
285                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
286                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
287         },
288         {
289                 /*
290                  * Unfortunately, the Quantum Atlas III has the same
291                  * problem as the Atlas II drives above.
292                  * Reported by: "Johan Granlund" <johan@granlund.nu>
293                  *
294                  * For future reference, the drive with the problem was:
295                  * QUANTUM QM39100TD-SW N1B0
296                  * 
297                  * It's possible that Quantum will fix the problem in later
298                  * firmware revisions.  If that happens, the quirk entry
299                  * will need to be made specific to the firmware revisions
300                  * with the problem.
301                  * 
302                  */
303                 /* Reports QUEUE FULL for temporary resource shortages */
304                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
305                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
306         },
307         {
308                 /*
309                  * 18 Gig Atlas III, same problem as the 9G version.
310                  * Reported by: Andre Albsmeier
311                  *              <andre.albsmeier@mchp.siemens.de>
312                  *
313                  * For future reference, the drive with the problem was:
314                  * QUANTUM QM318000TD-S N491
315                  */
316                 /* Reports QUEUE FULL for temporary resource shortages */
317                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
318                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
319         },
320         {
321                 /*
322                  * Broken tagged queuing drive
323                  * Reported by: Bret Ford <bford@uop.cs.uop.edu>
324                  *         and: Martin Renters <martin@tdc.on.ca>
325                  */
326                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
327                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
328         },
329                 /*
330                  * The Seagate Medalist Pro drives have very poor write
331                  * performance with anything more than 2 tags.
332                  * 
333                  * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
334                  * Drive:  <SEAGATE ST36530N 1444>
335                  *
336                  * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
337                  * Drive:  <SEAGATE ST34520W 1281>
338                  *
339                  * No one has actually reported that the 9G version
340                  * (ST39140*) of the Medalist Pro has the same problem, but
341                  * we're assuming that it does because the 4G and 6.5G
342                  * versions of the drive are broken.
343                  */
344         {
345                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
346                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
347         },
348         {
349                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
350                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
351         },
352         {
353                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
354                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
355         },
356         {
357                 /*
358                  * Slow when tagged queueing is enabled.  Write performance
359                  * steadily drops off with more and more concurrent
360                  * transactions.  Best sequential write performance with
361                  * tagged queueing turned off and write caching turned on.
362                  *
363                  * PR:  kern/10398
364                  * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
365                  * Drive:  DCAS-34330 w/ "S65A" firmware.
366                  *
367                  * The drive with the problem had the "S65A" firmware
368                  * revision, and has also been reported (by Stephen J.
369                  * Roznowski <sjr@home.net>) for a drive with the "S61A"
370                  * firmware revision.
371                  *
372                  * Although no one has reported problems with the 2 gig
373                  * version of the DCAS drive, the assumption is that it
374                  * has the same problems as the 4 gig version.  Therefore
375                  * this quirk entries disables tagged queueing for all
376                  * DCAS drives.
377                  */
378                 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
379                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
380         },
381         {
382                 /* Broken tagged queuing drive */
383                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
384                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
385         },
386         {
387                 /* Broken tagged queuing drive */ 
388                 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
389                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
390         },
391         {
392                 /* This does not support other than LUN 0 */
393                 { T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" },
394                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
395         },
396         {
397                 /*
398                  * Broken tagged queuing drive.
399                  * Submitted by:
400                  * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
401                  * in PR kern/9535
402                  */
403                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
404                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
405         },
406         {
407                 /*
408                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
409                  * 8MB/sec.)
410                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
411                  * Best performance with these drives is achieved with
412                  * tagged queueing turned off, and write caching turned on.
413                  */
414                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
415                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
416         },
417         {
418                 /*
419                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
420                  * 8MB/sec.)
421                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
422                  * Best performance with these drives is achieved with
423                  * tagged queueing turned off, and write caching turned on.
424                  */
425                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
426                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
427         },
428         {
429                 /*
430                  * Doesn't handle queue full condition correctly,
431                  * so we need to limit maxtags to what the device
432                  * can handle instead of determining this automatically.
433                  */
434                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
435                 /*quirks*/0, /*mintags*/2, /*maxtags*/32
436         },
437         {
438                 /* Really only one LUN */
439                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
440                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
441         },
442         {
443                 /* I can't believe we need a quirk for DPT volumes. */
444                 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
445                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
446                 /*mintags*/0, /*maxtags*/255
447         },
448         {
449                 /*
450                  * Many Sony CDROM drives don't like multi-LUN probing.
451                  */
452                 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
453                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
454         },
455         {
456                 /*
457                  * This drive doesn't like multiple LUN probing.
458                  * Submitted by:  Parag Patel <parag@cgt.com>
459                  */
460                 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
461                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
462         },
463         {
464                 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
465                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
466         },
467         {
468                 /*
469                  * The 8200 doesn't like multi-lun probing, and probably
470                  * don't like serial number requests either.
471                  */
472                 {
473                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
474                         "EXB-8200*", "*"
475                 },
476                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
477         },
478         {
479                 /*
480                  * Let's try the same as above, but for a drive that says
481                  * it's an IPL-6860 but is actually an EXB 8200.
482                  */
483                 {
484                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
485                         "IPL-6860*", "*"
486                 },
487                 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
488         },
489         {
490                 /*
491                  * These Hitachi drives don't like multi-lun probing.
492                  * The PR submitter has a DK319H, but says that the Linux
493                  * kernel has a similar work-around for the DK312 and DK314,
494                  * so all DK31* drives are quirked here.
495                  * PR:            misc/18793
496                  * Submitted by:  Paul Haddad <paul@pth.com>
497                  */
498                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
499                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
500         },
501         {
502                 /*
503                  * The Hitachi CJ series with J8A8 firmware apparantly has
504                  * problems with tagged commands.
505                  * PR: 23536
506                  * Reported by: amagai@nue.org
507                  */
508                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
509                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
510         },
511         {
512                 /*
513                  * These are the large storage arrays.
514                  * Submitted by:  William Carrel <william.carrel@infospace.com>
515                  */
516                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
517                 CAM_QUIRK_HILUNS, 2, 1024
518         },
519         {
520                 /*
521                  * This old revision of the TDC3600 is also SCSI-1, and
522                  * hangs upon serial number probing.
523                  */
524                 {
525                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
526                         " TDC 3600", "U07:"
527                 },
528                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
529         },
530         {
531                 /*
532                  * Maxtor Personal Storage 3000XT (Firewire)
533                  * hangs upon serial number probing.
534                  */
535                 {
536                         T_DIRECT, SIP_MEDIA_FIXED, "Maxtor",
537                         "1394 storage", "*"
538                 },
539                 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
540         },
541         {
542                 /*
543                  * Would repond to all LUNs if asked for.
544                  */
545                 {
546                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
547                         "CP150", "*"
548                 },
549                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
550         },
551         {
552                 /*
553                  * Would repond to all LUNs if asked for.
554                  */
555                 {
556                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
557                         "96X2*", "*"
558                 },
559                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
560         },
561         {
562                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
563                 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
564                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
565         },
566         {
567                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
568                 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
569                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
570         },
571         {
572                 /* TeraSolutions special settings for TRC-22 RAID */
573                 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
574                   /*quirks*/0, /*mintags*/55, /*maxtags*/255
575         },
576         {
577                 /* Veritas Storage Appliance */
578                 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
579                   CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
580         },
581         {
582                 /*
583                  * Would respond to all LUNs.  Device type and removable
584                  * flag are jumper-selectable.
585                  */
586                 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
587                   "Tahiti 1", "*"
588                 },
589                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
590         },
591         {
592                 /* EasyRAID E5A aka. areca ARC-6010 */
593                 { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" },
594                   CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255
595         },
596         {
597                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" },
598                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
599         },
600         {
601                 /* Default tagged queuing parameters for all devices */
602                 {
603                   T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
604                   /*vendor*/"*", /*product*/"*", /*revision*/"*"
605                 },
606                 /*quirks*/0, /*mintags*/2, /*maxtags*/255
607         },
608 };
609
610 static const int xpt_quirk_table_size =
611         sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
612
613 typedef enum {
614         DM_RET_COPY             = 0x01,
615         DM_RET_FLAG_MASK        = 0x0f,
616         DM_RET_NONE             = 0x00,
617         DM_RET_STOP             = 0x10,
618         DM_RET_DESCEND          = 0x20,
619         DM_RET_ERROR            = 0x30,
620         DM_RET_ACTION_MASK      = 0xf0
621 } dev_match_ret;
622
623 typedef enum {
624         XPT_DEPTH_BUS,
625         XPT_DEPTH_TARGET,
626         XPT_DEPTH_DEVICE,
627         XPT_DEPTH_PERIPH
628 } xpt_traverse_depth;
629
630 struct xpt_traverse_config {
631         xpt_traverse_depth      depth;
632         void                    *tr_func;
633         void                    *tr_arg;
634 };
635
636 typedef int     xpt_busfunc_t (struct cam_eb *bus, void *arg);
637 typedef int     xpt_targetfunc_t (struct cam_et *target, void *arg);
638 typedef int     xpt_devicefunc_t (struct cam_ed *device, void *arg);
639 typedef int     xpt_periphfunc_t (struct cam_periph *periph, void *arg);
640 typedef int     xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
641
642 /* Transport layer configuration information */
643 static struct xpt_softc xsoftc;
644
645 /* Queues for our software interrupt handler */
646 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
647 static cam_isrq_t cam_bioq;
648 static struct mtx cam_bioq_lock;
649
650 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
651 static SLIST_HEAD(,ccb_hdr) ccb_freeq;
652 static u_int xpt_max_ccbs;      /*
653                                  * Maximum size of ccb pool.  Modified as
654                                  * devices are added/removed or have their
655                                  * opening counts changed.
656                                  */
657 static u_int xpt_ccb_count;     /* Current count of allocated ccbs */
658
659 struct cam_periph *xpt_periph;
660
661 static periph_init_t xpt_periph_init;
662
663 static periph_init_t probe_periph_init;
664
665 static struct periph_driver xpt_driver =
666 {
667         xpt_periph_init, "xpt",
668         TAILQ_HEAD_INITIALIZER(xpt_driver.units)
669 };
670
671 static struct periph_driver probe_driver =
672 {
673         probe_periph_init, "probe",
674         TAILQ_HEAD_INITIALIZER(probe_driver.units)
675 };
676
677 PERIPHDRIVER_DECLARE(xpt, xpt_driver);
678 PERIPHDRIVER_DECLARE(probe, probe_driver);
679
680
681 static d_open_t xptopen;
682 static d_close_t xptclose;
683 static d_ioctl_t xptioctl;
684
685 static struct cdevsw xpt_cdevsw = {
686         .d_version =    D_VERSION,
687         .d_flags =      D_NEEDGIANT,
688         .d_open =       xptopen,
689         .d_close =      xptclose,
690         .d_ioctl =      xptioctl,
691         .d_name =       "xpt",
692 };
693
694 static struct intr_config_hook *xpt_config_hook;
695
696 static void dead_sim_action(struct cam_sim *sim, union ccb *ccb);
697 static void dead_sim_poll(struct cam_sim *sim);
698
699 /* Dummy SIM that is used when the real one has gone. */
700 static struct cam_sim cam_dead_sim = {
701         .sim_action =   dead_sim_action,
702         .sim_poll =     dead_sim_poll,
703         .sim_name =     "dead_sim",
704 };
705
706 #define SIM_DEAD(sim)   ((sim) == &cam_dead_sim)
707
708 /* Registered busses */
709 static TAILQ_HEAD(,cam_eb) xpt_busses;
710 static u_int bus_generation;
711
712 /* Storage for debugging datastructures */
713 #ifdef  CAMDEBUG
714 struct cam_path *cam_dpath;
715 u_int32_t cam_dflags;
716 u_int32_t cam_debug_delay;
717 #endif
718
719 /* Pointers to software interrupt handlers */
720 static void *cambio_ih;
721
722 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
723 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
724 #endif
725
726 /*
727  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
728  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
729  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
730  */
731 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
732     || defined(CAM_DEBUG_LUN)
733 #ifdef CAMDEBUG
734 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
735     || !defined(CAM_DEBUG_LUN)
736 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
737         and CAM_DEBUG_LUN"
738 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
739 #else /* !CAMDEBUG */
740 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
741 #endif /* CAMDEBUG */
742 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
743
744 /* Our boot-time initialization hook */
745 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
746
747 static moduledata_t cam_moduledata = {
748         "cam",
749         cam_module_event_handler,
750         NULL
751 };
752
753 static void     xpt_init(void *);
754
755 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
756 MODULE_VERSION(cam, 1);
757
758
759 static cam_status       xpt_compile_path(struct cam_path *new_path,
760                                          struct cam_periph *perph,
761                                          path_id_t path_id,
762                                          target_id_t target_id,
763                                          lun_id_t lun_id);
764
765 static void             xpt_release_path(struct cam_path *path);
766
767 static void             xpt_async_bcast(struct async_list *async_head,
768                                         u_int32_t async_code,
769                                         struct cam_path *path,
770                                         void *async_arg);
771 static void             xpt_dev_async(u_int32_t async_code,
772                                       struct cam_eb *bus,
773                                       struct cam_et *target,
774                                       struct cam_ed *device,
775                                       void *async_arg);
776 static path_id_t xptnextfreepathid(void);
777 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
778 static union ccb *xpt_get_ccb(struct cam_ed *device);
779 static int       xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
780                                   u_int32_t new_priority);
781 static void      xpt_run_dev_allocq(struct cam_eb *bus);
782 static void      xpt_run_dev_sendq(struct cam_eb *bus);
783 static timeout_t xpt_release_devq_timeout;
784 static timeout_t xpt_release_simq_timeout;
785 static void      xpt_release_bus(struct cam_eb *bus);
786 static void      xpt_release_devq_device(struct cam_ed *dev, u_int count,
787                                          int run_queue);
788 static struct cam_et*
789                  xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
790 static void      xpt_release_target(struct cam_eb *bus, struct cam_et *target);
791 static struct cam_ed*
792                  xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
793                                   lun_id_t lun_id);
794 static void      xpt_release_device(struct cam_eb *bus, struct cam_et *target,
795                                     struct cam_ed *device);
796 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
797 static struct cam_eb*
798                  xpt_find_bus(path_id_t path_id);
799 static struct cam_et*
800                  xpt_find_target(struct cam_eb *bus, target_id_t target_id);
801 static struct cam_ed*
802                  xpt_find_device(struct cam_et *target, lun_id_t lun_id);
803 static void      xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
804 static void      xpt_scan_lun(struct cam_periph *periph,
805                               struct cam_path *path, cam_flags flags,
806                               union ccb *ccb);
807 static void      xptscandone(struct cam_periph *periph, union ccb *done_ccb);
808 static xpt_busfunc_t    xptconfigbuscountfunc;
809 static xpt_busfunc_t    xptconfigfunc;
810 static void      xpt_config(void *arg);
811 static xpt_devicefunc_t xptpassannouncefunc;
812 static void      xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
813 static void      xptaction(struct cam_sim *sim, union ccb *work_ccb);
814 static void      xptpoll(struct cam_sim *sim);
815 static void      camisr(void *);
816 #if 0
817 static void      xptstart(struct cam_periph *periph, union ccb *work_ccb);
818 static void      xptasync(struct cam_periph *periph,
819                           u_int32_t code, cam_path *path);
820 #endif
821 static dev_match_ret    xptbusmatch(struct dev_match_pattern *patterns,
822                                     u_int num_patterns, struct cam_eb *bus);
823 static dev_match_ret    xptdevicematch(struct dev_match_pattern *patterns,
824                                        u_int num_patterns,
825                                        struct cam_ed *device);
826 static dev_match_ret    xptperiphmatch(struct dev_match_pattern *patterns,
827                                        u_int num_patterns,
828                                        struct cam_periph *periph);
829 static xpt_busfunc_t    xptedtbusfunc;
830 static xpt_targetfunc_t xptedttargetfunc;
831 static xpt_devicefunc_t xptedtdevicefunc;
832 static xpt_periphfunc_t xptedtperiphfunc;
833 static xpt_pdrvfunc_t   xptplistpdrvfunc;
834 static xpt_periphfunc_t xptplistperiphfunc;
835 static int              xptedtmatch(struct ccb_dev_match *cdm);
836 static int              xptperiphlistmatch(struct ccb_dev_match *cdm);
837 static int              xptbustraverse(struct cam_eb *start_bus,
838                                        xpt_busfunc_t *tr_func, void *arg);
839 static int              xpttargettraverse(struct cam_eb *bus,
840                                           struct cam_et *start_target,
841                                           xpt_targetfunc_t *tr_func, void *arg);
842 static int              xptdevicetraverse(struct cam_et *target,
843                                           struct cam_ed *start_device,
844                                           xpt_devicefunc_t *tr_func, void *arg);
845 static int              xptperiphtraverse(struct cam_ed *device,
846                                           struct cam_periph *start_periph,
847                                           xpt_periphfunc_t *tr_func, void *arg);
848 static int              xptpdrvtraverse(struct periph_driver **start_pdrv,
849                                         xpt_pdrvfunc_t *tr_func, void *arg);
850 static int              xptpdperiphtraverse(struct periph_driver **pdrv,
851                                             struct cam_periph *start_periph,
852                                             xpt_periphfunc_t *tr_func,
853                                             void *arg);
854 static xpt_busfunc_t    xptdefbusfunc;
855 static xpt_targetfunc_t xptdeftargetfunc;
856 static xpt_devicefunc_t xptdefdevicefunc;
857 static xpt_periphfunc_t xptdefperiphfunc;
858 static int              xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
859 #ifdef notusedyet
860 static int              xpt_for_all_targets(xpt_targetfunc_t *tr_func,
861                                             void *arg);
862 #endif
863 static int              xpt_for_all_devices(xpt_devicefunc_t *tr_func,
864                                             void *arg);
865 #ifdef notusedyet
866 static int              xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
867                                             void *arg);
868 #endif
869 static xpt_devicefunc_t xptsetasyncfunc;
870 static xpt_busfunc_t    xptsetasyncbusfunc;
871 static cam_status       xptregister(struct cam_periph *periph,
872                                     void *arg);
873 static cam_status       proberegister(struct cam_periph *periph,
874                                       void *arg);
875 static void      probeschedule(struct cam_periph *probe_periph);
876 static void      probestart(struct cam_periph *periph, union ccb *start_ccb);
877 static void      proberequestdefaultnegotiation(struct cam_periph *periph);
878 static int       proberequestbackoff(struct cam_periph *periph,
879                                      struct cam_ed *device);
880 static void      probedone(struct cam_periph *periph, union ccb *done_ccb);
881 static void      probecleanup(struct cam_periph *periph);
882 static void      xpt_find_quirk(struct cam_ed *device);
883 static void      xpt_devise_transport(struct cam_path *path);
884 static void      xpt_set_transfer_settings(struct ccb_trans_settings *cts,
885                                            struct cam_ed *device,
886                                            int async_update);
887 static void      xpt_toggle_tags(struct cam_path *path);
888 static void      xpt_start_tags(struct cam_path *path);
889 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
890                                             struct cam_ed *dev);
891 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
892                                            struct cam_ed *dev);
893 static __inline int periph_is_queued(struct cam_periph *periph);
894 static __inline int device_is_alloc_queued(struct cam_ed *device);
895 static __inline int device_is_send_queued(struct cam_ed *device);
896 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
897
898 static __inline int
899 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
900 {
901         int retval;
902
903         if (dev->ccbq.devq_openings > 0) {
904                 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
905                         cam_ccbq_resize(&dev->ccbq,
906                                         dev->ccbq.dev_openings
907                                         + dev->ccbq.dev_active);
908                         dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
909                 }
910                 /*
911                  * The priority of a device waiting for CCB resources
912                  * is that of the the highest priority peripheral driver
913                  * enqueued.
914                  */
915                 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
916                                           &dev->alloc_ccb_entry.pinfo,
917                                           CAMQ_GET_HEAD(&dev->drvq)->priority); 
918         } else {
919                 retval = 0;
920         }
921
922         return (retval);
923 }
924
925 static __inline int
926 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
927 {
928         int     retval;
929
930         if (dev->ccbq.dev_openings > 0) {
931                 /*
932                  * The priority of a device waiting for controller
933                  * resources is that of the the highest priority CCB
934                  * enqueued.
935                  */
936                 retval =
937                     xpt_schedule_dev(&bus->sim->devq->send_queue,
938                                      &dev->send_ccb_entry.pinfo,
939                                      CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
940         } else {
941                 retval = 0;
942         }
943         return (retval);
944 }
945
946 static __inline int
947 periph_is_queued(struct cam_periph *periph)
948 {
949         return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
950 }
951
952 static __inline int
953 device_is_alloc_queued(struct cam_ed *device)
954 {
955         return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
956 }
957
958 static __inline int
959 device_is_send_queued(struct cam_ed *device)
960 {
961         return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
962 }
963
964 static __inline int
965 dev_allocq_is_runnable(struct cam_devq *devq)
966 {
967         /*
968          * Have work to do.
969          * Have space to do more work.
970          * Allowed to do work.
971          */
972         return ((devq->alloc_queue.qfrozen_cnt == 0)
973              && (devq->alloc_queue.entries > 0)
974              && (devq->alloc_openings > 0));
975 }
976
977 static void
978 xpt_periph_init()
979 {
980         make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
981 }
982
983 static void
984 probe_periph_init()
985 {
986 }
987
988
989 static void
990 xptdone(struct cam_periph *periph, union ccb *done_ccb)
991 {
992         /* Caller will release the CCB */
993         wakeup(&done_ccb->ccb_h.cbfcnp);
994 }
995
996 static int
997 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
998 {
999         int unit;
1000
1001         unit = minor(dev) & 0xff;
1002
1003         /*
1004          * Only allow read-write access.
1005          */
1006         if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
1007                 return(EPERM);
1008
1009         /*
1010          * We don't allow nonblocking access.
1011          */
1012         if ((flags & O_NONBLOCK) != 0) {
1013                 printf("xpt%d: can't do nonblocking access\n", unit);
1014                 return(ENODEV);
1015         }
1016
1017         /*
1018          * We only have one transport layer right now.  If someone accesses
1019          * us via something other than minor number 1, point out their
1020          * mistake.
1021          */
1022         if (unit != 0) {
1023                 printf("xptopen: got invalid xpt unit %d\n", unit);
1024                 return(ENXIO);
1025         }
1026
1027         /* Mark ourselves open */
1028         xsoftc.flags |= XPT_FLAG_OPEN;
1029         
1030         return(0);
1031 }
1032
1033 static int
1034 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
1035 {
1036         int unit;
1037
1038         unit = minor(dev) & 0xff;
1039
1040         /*
1041          * We only have one transport layer right now.  If someone accesses
1042          * us via something other than minor number 1, point out their
1043          * mistake.
1044          */
1045         if (unit != 0) {
1046                 printf("xptclose: got invalid xpt unit %d\n", unit);
1047                 return(ENXIO);
1048         }
1049
1050         /* Mark ourselves closed */
1051         xsoftc.flags &= ~XPT_FLAG_OPEN;
1052
1053         return(0);
1054 }
1055
1056 static int
1057 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
1058 {
1059         int unit, error;
1060
1061         error = 0;
1062         unit = minor(dev) & 0xff;
1063
1064         /*
1065          * We only have one transport layer right now.  If someone accesses
1066          * us via something other than minor number 1, point out their
1067          * mistake.
1068          */
1069         if (unit != 0) {
1070                 printf("xptioctl: got invalid xpt unit %d\n", unit);
1071                 return(ENXIO);
1072         }
1073
1074         switch(cmd) {
1075         /*
1076          * For the transport layer CAMIOCOMMAND ioctl, we really only want
1077          * to accept CCB types that don't quite make sense to send through a
1078          * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
1079          * in the CAM spec.
1080          */
1081         case CAMIOCOMMAND: {
1082                 union ccb *ccb;
1083                 union ccb *inccb;
1084
1085                 inccb = (union ccb *)addr;
1086
1087                 switch(inccb->ccb_h.func_code) {
1088                 case XPT_SCAN_BUS:
1089                 case XPT_RESET_BUS:
1090                         if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
1091                          || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
1092                                 error = EINVAL;
1093                                 break;
1094                         }
1095                         /* FALLTHROUGH */
1096                 case XPT_PATH_INQ:
1097                 case XPT_ENG_INQ:
1098                 case XPT_SCAN_LUN:
1099
1100                         ccb = xpt_alloc_ccb();
1101
1102                         /*
1103                          * Create a path using the bus, target, and lun the
1104                          * user passed in.
1105                          */
1106                         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1107                                             inccb->ccb_h.path_id,
1108                                             inccb->ccb_h.target_id,
1109                                             inccb->ccb_h.target_lun) !=
1110                                             CAM_REQ_CMP){
1111                                 error = EINVAL;
1112                                 xpt_free_ccb(ccb);
1113                                 break;
1114                         }
1115                         /* Ensure all of our fields are correct */
1116                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1117                                       inccb->ccb_h.pinfo.priority);
1118                         xpt_merge_ccb(ccb, inccb);
1119                         ccb->ccb_h.cbfcnp = xptdone;
1120                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1121                         bcopy(ccb, inccb, sizeof(union ccb));
1122                         xpt_free_path(ccb->ccb_h.path);
1123                         xpt_free_ccb(ccb);
1124                         break;
1125
1126                 case XPT_DEBUG: {
1127                         union ccb ccb;
1128
1129                         /*
1130                          * This is an immediate CCB, so it's okay to
1131                          * allocate it on the stack.
1132                          */
1133
1134                         /*
1135                          * Create a path using the bus, target, and lun the
1136                          * user passed in.
1137                          */
1138                         if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1139                                             inccb->ccb_h.path_id,
1140                                             inccb->ccb_h.target_id,
1141                                             inccb->ccb_h.target_lun) !=
1142                                             CAM_REQ_CMP){
1143                                 error = EINVAL;
1144                                 break;
1145                         }
1146                         /* Ensure all of our fields are correct */
1147                         xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1148                                       inccb->ccb_h.pinfo.priority);
1149                         xpt_merge_ccb(&ccb, inccb);
1150                         ccb.ccb_h.cbfcnp = xptdone;
1151                         xpt_action(&ccb);
1152                         bcopy(&ccb, inccb, sizeof(union ccb));
1153                         xpt_free_path(ccb.ccb_h.path);
1154                         break;
1155
1156                 }
1157                 case XPT_DEV_MATCH: {
1158                         struct cam_periph_map_info mapinfo;
1159                         struct cam_path *old_path;
1160
1161                         /*
1162                          * We can't deal with physical addresses for this
1163                          * type of transaction.
1164                          */
1165                         if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1166                                 error = EINVAL;
1167                                 break;
1168                         }
1169
1170                         /*
1171                          * Save this in case the caller had it set to
1172                          * something in particular.
1173                          */
1174                         old_path = inccb->ccb_h.path;
1175
1176                         /*
1177                          * We really don't need a path for the matching
1178                          * code.  The path is needed because of the
1179                          * debugging statements in xpt_action().  They
1180                          * assume that the CCB has a valid path.
1181                          */
1182                         inccb->ccb_h.path = xpt_periph->path;
1183
1184                         bzero(&mapinfo, sizeof(mapinfo));
1185
1186                         /*
1187                          * Map the pattern and match buffers into kernel
1188                          * virtual address space.
1189                          */
1190                         error = cam_periph_mapmem(inccb, &mapinfo);
1191
1192                         if (error) {
1193                                 inccb->ccb_h.path = old_path;
1194                                 break;
1195                         }
1196
1197                         /*
1198                          * This is an immediate CCB, we can send it on directly.
1199                          */
1200                         xpt_action(inccb);
1201
1202                         /*
1203                          * Map the buffers back into user space.
1204                          */
1205                         cam_periph_unmapmem(inccb, &mapinfo);
1206
1207                         inccb->ccb_h.path = old_path;
1208
1209                         error = 0;
1210                         break;
1211                 }
1212                 default:
1213                         error = ENOTSUP;
1214                         break;
1215                 }
1216                 break;
1217         }
1218         /*
1219          * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1220          * with the periphal driver name and unit name filled in.  The other
1221          * fields don't really matter as input.  The passthrough driver name
1222          * ("pass"), and unit number are passed back in the ccb.  The current
1223          * device generation number, and the index into the device peripheral
1224          * driver list, and the status are also passed back.  Note that
1225          * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1226          * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
1227          * (or rather should be) impossible for the device peripheral driver
1228          * list to change since we look at the whole thing in one pass, and
1229          * we do it with splcam protection.
1230          * 
1231          */
1232         case CAMGETPASSTHRU: {
1233                 union ccb *ccb;
1234                 struct cam_periph *periph;
1235                 struct periph_driver **p_drv;
1236                 char   *name;
1237                 u_int unit;
1238                 u_int cur_generation;
1239                 int base_periph_found;
1240                 int splbreaknum;
1241                 int s;
1242
1243                 ccb = (union ccb *)addr;
1244                 unit = ccb->cgdl.unit_number;
1245                 name = ccb->cgdl.periph_name;
1246                 /*
1247                  * Every 100 devices, we want to drop our spl protection to
1248                  * give the software interrupt handler a chance to run.
1249                  * Most systems won't run into this check, but this should
1250                  * avoid starvation in the software interrupt handler in
1251                  * large systems.
1252                  */
1253                 splbreaknum = 100;
1254
1255                 ccb = (union ccb *)addr;
1256
1257                 base_periph_found = 0;
1258
1259                 /*
1260                  * Sanity check -- make sure we don't get a null peripheral
1261                  * driver name.
1262                  */
1263                 if (*ccb->cgdl.periph_name == '\0') {
1264                         error = EINVAL;
1265                         break;
1266                 }
1267
1268                 /* Keep the list from changing while we traverse it */
1269                 s = splcam();
1270 ptstartover:
1271                 cur_generation = xsoftc.generation;
1272
1273                 /* first find our driver in the list of drivers */
1274                 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
1275                         if (strcmp((*p_drv)->driver_name, name) == 0)
1276                                 break;
1277
1278                 if (*p_drv == NULL) {
1279                         splx(s);
1280                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1281                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1282                         *ccb->cgdl.periph_name = '\0';
1283                         ccb->cgdl.unit_number = 0;
1284                         error = ENOENT;
1285                         break;
1286                 }       
1287
1288                 /*
1289                  * Run through every peripheral instance of this driver
1290                  * and check to see whether it matches the unit passed
1291                  * in by the user.  If it does, get out of the loops and
1292                  * find the passthrough driver associated with that
1293                  * peripheral driver.
1294                  */
1295                 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1296                      periph = TAILQ_NEXT(periph, unit_links)) {
1297
1298                         if (periph->unit_number == unit) {
1299                                 break;
1300                         } else if (--splbreaknum == 0) {
1301                                 splx(s);
1302                                 s = splcam();
1303                                 splbreaknum = 100;
1304                                 if (cur_generation != xsoftc.generation)
1305                                        goto ptstartover;
1306                         }
1307                 }
1308                 /*
1309                  * If we found the peripheral driver that the user passed
1310                  * in, go through all of the peripheral drivers for that
1311                  * particular device and look for a passthrough driver.
1312                  */
1313                 if (periph != NULL) {
1314                         struct cam_ed *device;
1315                         int i;
1316
1317                         base_periph_found = 1;
1318                         device = periph->path->device;
1319                         for (i = 0, periph = SLIST_FIRST(&device->periphs);
1320                              periph != NULL;
1321                              periph = SLIST_NEXT(periph, periph_links), i++) {
1322                                 /*
1323                                  * Check to see whether we have a
1324                                  * passthrough device or not. 
1325                                  */
1326                                 if (strcmp(periph->periph_name, "pass") == 0) {
1327                                         /*
1328                                          * Fill in the getdevlist fields.
1329                                          */
1330                                         strcpy(ccb->cgdl.periph_name,
1331                                                periph->periph_name);
1332                                         ccb->cgdl.unit_number =
1333                                                 periph->unit_number;
1334                                         if (SLIST_NEXT(periph, periph_links))
1335                                                 ccb->cgdl.status =
1336                                                         CAM_GDEVLIST_MORE_DEVS;
1337                                         else
1338                                                 ccb->cgdl.status =
1339                                                        CAM_GDEVLIST_LAST_DEVICE;
1340                                         ccb->cgdl.generation =
1341                                                 device->generation;
1342                                         ccb->cgdl.index = i;
1343                                         /*
1344                                          * Fill in some CCB header fields
1345                                          * that the user may want.
1346                                          */
1347                                         ccb->ccb_h.path_id =
1348                                                 periph->path->bus->path_id;
1349                                         ccb->ccb_h.target_id =
1350                                                 periph->path->target->target_id;
1351                                         ccb->ccb_h.target_lun =
1352                                                 periph->path->device->lun_id;
1353                                         ccb->ccb_h.status = CAM_REQ_CMP;
1354                                         break;
1355                                 }
1356                         }
1357                 }
1358
1359                 /*
1360                  * If the periph is null here, one of two things has
1361                  * happened.  The first possibility is that we couldn't
1362                  * find the unit number of the particular peripheral driver
1363                  * that the user is asking about.  e.g. the user asks for
1364                  * the passthrough driver for "da11".  We find the list of
1365                  * "da" peripherals all right, but there is no unit 11.
1366                  * The other possibility is that we went through the list
1367                  * of peripheral drivers attached to the device structure,
1368                  * but didn't find one with the name "pass".  Either way,
1369                  * we return ENOENT, since we couldn't find something.
1370                  */
1371                 if (periph == NULL) {
1372                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1373                         ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1374                         *ccb->cgdl.periph_name = '\0';
1375                         ccb->cgdl.unit_number = 0;
1376                         error = ENOENT;
1377                         /*
1378                          * It is unfortunate that this is even necessary,
1379                          * but there are many, many clueless users out there.
1380                          * If this is true, the user is looking for the
1381                          * passthrough driver, but doesn't have one in his
1382                          * kernel.
1383                          */
1384                         if (base_periph_found == 1) {
1385                                 printf("xptioctl: pass driver is not in the "
1386                                        "kernel\n");
1387                                 printf("xptioctl: put \"device pass0\" in "
1388                                        "your kernel config file\n");
1389                         }
1390                 }
1391                 splx(s);
1392                 break;
1393                 }
1394         default:
1395                 error = ENOTTY;
1396                 break;
1397         }
1398
1399         return(error);
1400 }
1401
1402 static int
1403 cam_module_event_handler(module_t mod, int what, void *arg)
1404 {
1405         if (what == MOD_LOAD) {
1406                 xpt_init(NULL);
1407         } else if (what == MOD_UNLOAD) {
1408                 return EBUSY;
1409         } else {
1410                 return EOPNOTSUPP;
1411         }
1412
1413         return 0;
1414 }
1415
1416 /* thread to handle bus rescans */
1417 static TAILQ_HEAD(, ccb_hdr) ccb_scanq;
1418 static void
1419 xpt_scanner_thread(void *dummy)
1420 {
1421         mtx_lock(&Giant);
1422         for (;;) {
1423                 union ccb *ccb;
1424                 tsleep(&ccb_scanq, PRIBIO, "ccb_scanq", 0);
1425                 while ((ccb = (union ccb *)TAILQ_FIRST(&ccb_scanq)) != NULL) {
1426                         TAILQ_REMOVE(&ccb_scanq, &ccb->ccb_h, sim_links.tqe);
1427                         ccb->ccb_h.func_code = XPT_SCAN_BUS;
1428                         ccb->ccb_h.cbfcnp = xptdone;
1429                         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5);
1430                         cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1431                         xpt_free_path(ccb->ccb_h.path);
1432                         xpt_free_ccb(ccb);
1433                 }
1434         }
1435 }
1436
1437 void
1438 xpt_rescan(union ccb *ccb)
1439 {
1440         struct ccb_hdr *hdr;
1441         GIANT_REQUIRED;
1442         /*
1443          * Don't make duplicate entries for the same paths.
1444          */
1445         TAILQ_FOREACH(hdr, &ccb_scanq, sim_links.tqe) {
1446                 if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
1447                         xpt_print(ccb->ccb_h.path, "rescan already queued\n");
1448                         xpt_free_path(ccb->ccb_h.path);
1449                         xpt_free_ccb(ccb);
1450                         return;
1451                 }
1452         }
1453         TAILQ_INSERT_TAIL(&ccb_scanq, &ccb->ccb_h, sim_links.tqe);
1454         wakeup(&ccb_scanq);
1455 }
1456
1457 /* Functions accessed by the peripheral drivers */
1458 static void
1459 xpt_init(void *dummy)
1460 {
1461         struct cam_sim *xpt_sim;
1462         struct cam_path *path;
1463         struct cam_devq *devq;
1464         cam_status status;
1465
1466         TAILQ_INIT(&xpt_busses);
1467         TAILQ_INIT(&cam_bioq);
1468         SLIST_INIT(&ccb_freeq);
1469         TAILQ_INIT(&ccb_scanq);
1470         STAILQ_INIT(&highpowerq);
1471
1472         mtx_init(&cam_bioq_lock, "CAM BIOQ lock", NULL, MTX_DEF);
1473
1474         /*
1475          * The xpt layer is, itself, the equivelent of a SIM.
1476          * Allow 16 ccbs in the ccb pool for it.  This should
1477          * give decent parallelism when we probe busses and
1478          * perform other XPT functions.
1479          */
1480         devq = cam_simq_alloc(16);
1481         xpt_sim = cam_sim_alloc(xptaction,
1482                                 xptpoll,
1483                                 "xpt",
1484                                 /*softc*/NULL,
1485                                 /*unit*/0,
1486                                 /*max_dev_transactions*/0,
1487                                 /*max_tagged_dev_transactions*/0,
1488                                 devq);
1489         xpt_max_ccbs = 16;
1490                                 
1491         if ((status = xpt_bus_register(xpt_sim, /*bus #*/0)) != CAM_SUCCESS) {
1492                 printf("xpt_init: xpt_bus_register failed with status %#x,"
1493                        " failing attach\n", status);
1494                 return;
1495         }
1496
1497         /*
1498          * Looking at the XPT from the SIM layer, the XPT is
1499          * the equivelent of a peripheral driver.  Allocate
1500          * a peripheral driver entry for us.
1501          */
1502         if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1503                                       CAM_TARGET_WILDCARD,
1504                                       CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1505                 printf("xpt_init: xpt_create_path failed with status %#x,"
1506                        " failing attach\n", status);
1507                 return;
1508         }
1509
1510         cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1511                          path, NULL, 0, NULL);
1512         xpt_free_path(path);
1513
1514         xpt_sim->softc = xpt_periph;
1515
1516         /*
1517          * Register a callback for when interrupts are enabled.
1518          */
1519         xpt_config_hook =
1520             (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
1521                                               M_TEMP, M_NOWAIT | M_ZERO);
1522         if (xpt_config_hook == NULL) {
1523                 printf("xpt_init: Cannot malloc config hook "
1524                        "- failing attach\n");
1525                 return;
1526         }
1527
1528         xpt_config_hook->ich_func = xpt_config;
1529         if (config_intrhook_establish(xpt_config_hook) != 0) {
1530                 free (xpt_config_hook, M_TEMP);
1531                 printf("xpt_init: config_intrhook_establish failed "
1532                        "- failing attach\n");
1533         }
1534
1535         /* fire up rescan thread */
1536         if (kthread_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) {
1537                 printf("xpt_init: failed to create rescan thread\n");
1538         }
1539         /* Install our software interrupt handlers */
1540         swi_add(NULL, "cambio", camisr, &cam_bioq, SWI_CAMBIO, 0, &cambio_ih);
1541 }
1542
1543 static cam_status
1544 xptregister(struct cam_periph *periph, void *arg)
1545 {
1546         if (periph == NULL) {
1547                 printf("xptregister: periph was NULL!!\n");
1548                 return(CAM_REQ_CMP_ERR);
1549         }
1550
1551         periph->softc = NULL;
1552
1553         xpt_periph = periph;
1554
1555         return(CAM_REQ_CMP);
1556 }
1557
1558 int32_t
1559 xpt_add_periph(struct cam_periph *periph)
1560 {
1561         struct cam_ed *device;
1562         int32_t  status;
1563         struct periph_list *periph_head;
1564
1565         GIANT_REQUIRED;
1566
1567         device = periph->path->device;
1568
1569         periph_head = &device->periphs;
1570
1571         status = CAM_REQ_CMP;
1572
1573         if (device != NULL) {
1574                 int s;
1575
1576                 /*
1577                  * Make room for this peripheral
1578                  * so it will fit in the queue
1579                  * when it's scheduled to run
1580                  */
1581                 s = splsoftcam();
1582                 status = camq_resize(&device->drvq,
1583                                      device->drvq.array_size + 1);
1584
1585                 device->generation++;
1586
1587                 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1588
1589                 splx(s);
1590         }
1591
1592         xsoftc.generation++;
1593
1594         return (status);
1595 }
1596
1597 void
1598 xpt_remove_periph(struct cam_periph *periph)
1599 {
1600         struct cam_ed *device;
1601
1602         GIANT_REQUIRED;
1603
1604         device = periph->path->device;
1605
1606         if (device != NULL) {
1607                 int s;
1608                 struct periph_list *periph_head;
1609
1610                 periph_head = &device->periphs;
1611                 
1612                 /* Release the slot for this peripheral */
1613                 s = splsoftcam();
1614                 camq_resize(&device->drvq, device->drvq.array_size - 1);
1615
1616                 device->generation++;
1617
1618                 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1619
1620                 splx(s);
1621         }
1622
1623         xsoftc.generation++;
1624
1625 }
1626
1627
1628 void
1629 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1630 {
1631         struct  ccb_pathinq cpi;
1632         struct  ccb_trans_settings cts;
1633         struct  cam_path *path;
1634         u_int   speed;
1635         u_int   freq;
1636         u_int   mb;
1637         int     s;
1638
1639         GIANT_REQUIRED;
1640
1641         path = periph->path;
1642         /*
1643          * To ensure that this is printed in one piece,
1644          * mask out CAM interrupts.
1645          */
1646         s = splsoftcam();
1647         printf("%s%d at %s%d bus %d target %d lun %d\n",
1648                periph->periph_name, periph->unit_number,
1649                path->bus->sim->sim_name,
1650                path->bus->sim->unit_number,
1651                path->bus->sim->bus_id,
1652                path->target->target_id,
1653                path->device->lun_id);
1654         printf("%s%d: ", periph->periph_name, periph->unit_number);
1655         scsi_print_inquiry(&path->device->inq_data);
1656         if (bootverbose && path->device->serial_num_len > 0) {
1657                 /* Don't wrap the screen  - print only the first 60 chars */
1658                 printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1659                        periph->unit_number, path->device->serial_num);
1660         }
1661         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1662         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1663         cts.type = CTS_TYPE_CURRENT_SETTINGS;
1664         xpt_action((union ccb*)&cts);
1665         if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1666                 return;
1667         }
1668
1669         /* Ask the SIM for its base transfer speed */
1670         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1671         cpi.ccb_h.func_code = XPT_PATH_INQ;
1672         xpt_action((union ccb *)&cpi);
1673
1674         speed = cpi.base_transfer_speed;
1675         freq = 0;
1676         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1677                 struct  ccb_trans_settings_spi *spi;
1678
1679                 spi = &cts.xport_specific.spi;
1680                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1681                   && spi->sync_offset != 0) {
1682                         freq = scsi_calc_syncsrate(spi->sync_period);
1683                         speed = freq;
1684                 }
1685
1686                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1687                         speed *= (0x01 << spi->bus_width);
1688         }
1689
1690         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1691                 struct  ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
1692                 if (fc->valid & CTS_FC_VALID_SPEED) {
1693                         speed = fc->bitrate;
1694                 }
1695         }
1696
1697         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SAS) {
1698                 struct  ccb_trans_settings_sas *sas = &cts.xport_specific.sas;
1699                 if (sas->valid & CTS_SAS_VALID_SPEED) {
1700                         speed = sas->bitrate;
1701                 }
1702         }
1703
1704         mb = speed / 1000;
1705         if (mb > 0)
1706                 printf("%s%d: %d.%03dMB/s transfers",
1707                        periph->periph_name, periph->unit_number,
1708                        mb, speed % 1000);
1709         else
1710                 printf("%s%d: %dKB/s transfers", periph->periph_name,
1711                        periph->unit_number, speed);
1712         /* Report additional information about SPI connections */
1713         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1714                 struct  ccb_trans_settings_spi *spi;
1715
1716                 spi = &cts.xport_specific.spi;
1717                 if (freq != 0) {
1718                         printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
1719                                freq % 1000,
1720                                (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1721                              ? " DT" : "",
1722                                spi->sync_offset);
1723                 }
1724                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1725                  && spi->bus_width > 0) {
1726                         if (freq != 0) {
1727                                 printf(", ");
1728                         } else {
1729                                 printf(" (");
1730                         }
1731                         printf("%dbit)", 8 * (0x01 << spi->bus_width));
1732                 } else if (freq != 0) {
1733                         printf(")");
1734                 }
1735         }
1736         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1737                 struct  ccb_trans_settings_fc *fc;
1738
1739                 fc = &cts.xport_specific.fc;
1740                 if (fc->valid & CTS_FC_VALID_WWNN)
1741                         printf(" WWNN 0x%llx", (long long) fc->wwnn);
1742                 if (fc->valid & CTS_FC_VALID_WWPN)
1743                         printf(" WWPN 0x%llx", (long long) fc->wwpn);
1744                 if (fc->valid & CTS_FC_VALID_PORT)
1745                         printf(" PortID 0x%x", fc->port);
1746         }
1747
1748         if (path->device->inq_flags & SID_CmdQue
1749          || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1750                 printf("\n%s%d: Command Queueing Enabled",
1751                        periph->periph_name, periph->unit_number);
1752         }
1753         printf("\n");
1754
1755         /*
1756          * We only want to print the caller's announce string if they've
1757          * passed one in..
1758          */
1759         if (announce_string != NULL)
1760                 printf("%s%d: %s\n", periph->periph_name,
1761                        periph->unit_number, announce_string);
1762         splx(s);
1763 }
1764
1765 static dev_match_ret
1766 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1767             struct cam_eb *bus)
1768 {
1769         dev_match_ret retval;
1770         int i;
1771
1772         retval = DM_RET_NONE;
1773
1774         /*
1775          * If we aren't given something to match against, that's an error.
1776          */
1777         if (bus == NULL)
1778                 return(DM_RET_ERROR);
1779
1780         /*
1781          * If there are no match entries, then this bus matches no
1782          * matter what.
1783          */
1784         if ((patterns == NULL) || (num_patterns == 0))
1785                 return(DM_RET_DESCEND | DM_RET_COPY);
1786
1787         for (i = 0; i < num_patterns; i++) {
1788                 struct bus_match_pattern *cur_pattern;
1789
1790                 /*
1791                  * If the pattern in question isn't for a bus node, we
1792                  * aren't interested.  However, we do indicate to the
1793                  * calling routine that we should continue descending the
1794                  * tree, since the user wants to match against lower-level
1795                  * EDT elements.
1796                  */
1797                 if (patterns[i].type != DEV_MATCH_BUS) {
1798                         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1799                                 retval |= DM_RET_DESCEND;
1800                         continue;
1801                 }
1802
1803                 cur_pattern = &patterns[i].pattern.bus_pattern;
1804
1805                 /*
1806                  * If they want to match any bus node, we give them any
1807                  * device node.
1808                  */
1809                 if (cur_pattern->flags == BUS_MATCH_ANY) {
1810                         /* set the copy flag */
1811                         retval |= DM_RET_COPY;
1812
1813                         /*
1814                          * If we've already decided on an action, go ahead
1815                          * and return.
1816                          */
1817                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1818                                 return(retval);
1819                 }
1820
1821                 /*
1822                  * Not sure why someone would do this...
1823                  */
1824                 if (cur_pattern->flags == BUS_MATCH_NONE)
1825                         continue;
1826
1827                 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1828                  && (cur_pattern->path_id != bus->path_id))
1829                         continue;
1830
1831                 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1832                  && (cur_pattern->bus_id != bus->sim->bus_id))
1833                         continue;
1834
1835                 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1836                  && (cur_pattern->unit_number != bus->sim->unit_number))
1837                         continue;
1838
1839                 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1840                  && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1841                              DEV_IDLEN) != 0))
1842                         continue;
1843
1844                 /*
1845                  * If we get to this point, the user definitely wants 
1846                  * information on this bus.  So tell the caller to copy the
1847                  * data out.
1848                  */
1849                 retval |= DM_RET_COPY;
1850
1851                 /*
1852                  * If the return action has been set to descend, then we
1853                  * know that we've already seen a non-bus matching
1854                  * expression, therefore we need to further descend the tree.
1855                  * This won't change by continuing around the loop, so we
1856                  * go ahead and return.  If we haven't seen a non-bus
1857                  * matching expression, we keep going around the loop until
1858                  * we exhaust the matching expressions.  We'll set the stop
1859                  * flag once we fall out of the loop.
1860                  */
1861                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1862                         return(retval);
1863         }
1864
1865         /*
1866          * If the return action hasn't been set to descend yet, that means
1867          * we haven't seen anything other than bus matching patterns.  So
1868          * tell the caller to stop descending the tree -- the user doesn't
1869          * want to match against lower level tree elements.
1870          */
1871         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1872                 retval |= DM_RET_STOP;
1873
1874         return(retval);
1875 }
1876
1877 static dev_match_ret
1878 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1879                struct cam_ed *device)
1880 {
1881         dev_match_ret retval;
1882         int i;
1883
1884         retval = DM_RET_NONE;
1885
1886         /*
1887          * If we aren't given something to match against, that's an error.
1888          */
1889         if (device == NULL)
1890                 return(DM_RET_ERROR);
1891
1892         /*
1893          * If there are no match entries, then this device matches no
1894          * matter what.
1895          */
1896         if ((patterns == NULL) || (num_patterns == 0))
1897                 return(DM_RET_DESCEND | DM_RET_COPY);
1898
1899         for (i = 0; i < num_patterns; i++) {
1900                 struct device_match_pattern *cur_pattern;
1901
1902                 /*
1903                  * If the pattern in question isn't for a device node, we
1904                  * aren't interested.
1905                  */
1906                 if (patterns[i].type != DEV_MATCH_DEVICE) {
1907                         if ((patterns[i].type == DEV_MATCH_PERIPH)
1908                          && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1909                                 retval |= DM_RET_DESCEND;
1910                         continue;
1911                 }
1912
1913                 cur_pattern = &patterns[i].pattern.device_pattern;
1914
1915                 /*
1916                  * If they want to match any device node, we give them any
1917                  * device node.
1918                  */
1919                 if (cur_pattern->flags == DEV_MATCH_ANY) {
1920                         /* set the copy flag */
1921                         retval |= DM_RET_COPY;
1922
1923                         
1924                         /*
1925                          * If we've already decided on an action, go ahead
1926                          * and return.
1927                          */
1928                         if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1929                                 return(retval);
1930                 }
1931
1932                 /*
1933                  * Not sure why someone would do this...
1934                  */
1935                 if (cur_pattern->flags == DEV_MATCH_NONE)
1936                         continue;
1937
1938                 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1939                  && (cur_pattern->path_id != device->target->bus->path_id))
1940                         continue;
1941
1942                 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1943                  && (cur_pattern->target_id != device->target->target_id))
1944                         continue;
1945
1946                 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1947                  && (cur_pattern->target_lun != device->lun_id))
1948                         continue;
1949
1950                 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1951                  && (cam_quirkmatch((caddr_t)&device->inq_data,
1952                                     (caddr_t)&cur_pattern->inq_pat,
1953                                     1, sizeof(cur_pattern->inq_pat),
1954                                     scsi_static_inquiry_match) == NULL))
1955                         continue;
1956
1957                 /*
1958                  * If we get to this point, the user definitely wants 
1959                  * information on this device.  So tell the caller to copy
1960                  * the data out.
1961                  */
1962                 retval |= DM_RET_COPY;
1963
1964                 /*
1965                  * If the return action has been set to descend, then we
1966                  * know that we've already seen a peripheral matching
1967                  * expression, therefore we need to further descend the tree.
1968                  * This won't change by continuing around the loop, so we
1969                  * go ahead and return.  If we haven't seen a peripheral
1970                  * matching expression, we keep going around the loop until
1971                  * we exhaust the matching expressions.  We'll set the stop
1972                  * flag once we fall out of the loop.
1973                  */
1974                 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1975                         return(retval);
1976         }
1977
1978         /*
1979          * If the return action hasn't been set to descend yet, that means
1980          * we haven't seen any peripheral matching patterns.  So tell the
1981          * caller to stop descending the tree -- the user doesn't want to
1982          * match against lower level tree elements.
1983          */
1984         if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1985                 retval |= DM_RET_STOP;
1986
1987         return(retval);
1988 }
1989
1990 /*
1991  * Match a single peripheral against any number of match patterns.
1992  */
1993 static dev_match_ret
1994 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1995                struct cam_periph *periph)
1996 {
1997         dev_match_ret retval;
1998         int i;
1999
2000         /*
2001          * If we aren't given something to match against, that's an error.
2002          */
2003         if (periph == NULL)
2004                 return(DM_RET_ERROR);
2005
2006         /*
2007          * If there are no match entries, then this peripheral matches no
2008          * matter what.
2009          */
2010         if ((patterns == NULL) || (num_patterns == 0))
2011                 return(DM_RET_STOP | DM_RET_COPY);
2012
2013         /*
2014          * There aren't any nodes below a peripheral node, so there's no
2015          * reason to descend the tree any further.
2016          */
2017         retval = DM_RET_STOP;
2018
2019         for (i = 0; i < num_patterns; i++) {
2020                 struct periph_match_pattern *cur_pattern;
2021
2022                 /*
2023                  * If the pattern in question isn't for a peripheral, we
2024                  * aren't interested.
2025                  */
2026                 if (patterns[i].type != DEV_MATCH_PERIPH)
2027                         continue;
2028
2029                 cur_pattern = &patterns[i].pattern.periph_pattern;
2030
2031                 /*
2032                  * If they want to match on anything, then we will do so.
2033                  */
2034                 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
2035                         /* set the copy flag */
2036                         retval |= DM_RET_COPY;
2037
2038                         /*
2039                          * We've already set the return action to stop,
2040                          * since there are no nodes below peripherals in
2041                          * the tree.
2042                          */
2043                         return(retval);
2044                 }
2045
2046                 /*
2047                  * Not sure why someone would do this...
2048                  */
2049                 if (cur_pattern->flags == PERIPH_MATCH_NONE)
2050                         continue;
2051
2052                 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
2053                  && (cur_pattern->path_id != periph->path->bus->path_id))
2054                         continue;
2055
2056                 /*
2057                  * For the target and lun id's, we have to make sure the
2058                  * target and lun pointers aren't NULL.  The xpt peripheral
2059                  * has a wildcard target and device.
2060                  */
2061                 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
2062                  && ((periph->path->target == NULL)
2063                  ||(cur_pattern->target_id != periph->path->target->target_id)))
2064                         continue;
2065
2066                 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
2067                  && ((periph->path->device == NULL)
2068                  || (cur_pattern->target_lun != periph->path->device->lun_id)))
2069                         continue;
2070
2071                 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
2072                  && (cur_pattern->unit_number != periph->unit_number))
2073                         continue;
2074
2075                 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
2076                  && (strncmp(cur_pattern->periph_name, periph->periph_name,
2077                              DEV_IDLEN) != 0))
2078                         continue;
2079
2080                 /*
2081                  * If we get to this point, the user definitely wants 
2082                  * information on this peripheral.  So tell the caller to
2083                  * copy the data out.
2084                  */
2085                 retval |= DM_RET_COPY;
2086
2087                 /*
2088                  * The return action has already been set to stop, since
2089                  * peripherals don't have any nodes below them in the EDT.
2090                  */
2091                 return(retval);
2092         }
2093
2094         /*
2095          * If we get to this point, the peripheral that was passed in
2096          * doesn't match any of the patterns.
2097          */
2098         return(retval);
2099 }
2100
2101 static int
2102 xptedtbusfunc(struct cam_eb *bus, void *arg)
2103 {
2104         struct ccb_dev_match *cdm;
2105         dev_match_ret retval;
2106
2107         cdm = (struct ccb_dev_match *)arg;
2108
2109         /*
2110          * If our position is for something deeper in the tree, that means
2111          * that we've already seen this node.  So, we keep going down.
2112          */
2113         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2114          && (cdm->pos.cookie.bus == bus)
2115          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2116          && (cdm->pos.cookie.target != NULL))
2117                 retval = DM_RET_DESCEND;
2118         else
2119                 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
2120
2121         /*
2122          * If we got an error, bail out of the search.
2123          */
2124         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2125                 cdm->status = CAM_DEV_MATCH_ERROR;
2126                 return(0);
2127         }
2128
2129         /*
2130          * If the copy flag is set, copy this bus out.
2131          */
2132         if (retval & DM_RET_COPY) {
2133                 int spaceleft, j;
2134
2135                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2136                         sizeof(struct dev_match_result));
2137
2138                 /*
2139                  * If we don't have enough space to put in another
2140                  * match result, save our position and tell the
2141                  * user there are more devices to check.
2142                  */
2143                 if (spaceleft < sizeof(struct dev_match_result)) {
2144                         bzero(&cdm->pos, sizeof(cdm->pos));
2145                         cdm->pos.position_type = 
2146                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
2147
2148                         cdm->pos.cookie.bus = bus;
2149                         cdm->pos.generations[CAM_BUS_GENERATION]=
2150                                 bus_generation;
2151                         cdm->status = CAM_DEV_MATCH_MORE;
2152                         return(0);
2153                 }
2154                 j = cdm->num_matches;
2155                 cdm->num_matches++;
2156                 cdm->matches[j].type = DEV_MATCH_BUS;
2157                 cdm->matches[j].result.bus_result.path_id = bus->path_id;
2158                 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
2159                 cdm->matches[j].result.bus_result.unit_number =
2160                         bus->sim->unit_number;
2161                 strncpy(cdm->matches[j].result.bus_result.dev_name,
2162                         bus->sim->sim_name, DEV_IDLEN);
2163         }
2164
2165         /*
2166          * If the user is only interested in busses, there's no
2167          * reason to descend to the next level in the tree.
2168          */
2169         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2170                 return(1);
2171
2172         /*
2173          * If there is a target generation recorded, check it to
2174          * make sure the target list hasn't changed.
2175          */
2176         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2177          && (bus == cdm->pos.cookie.bus)
2178          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2179          && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
2180          && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
2181              bus->generation)) {
2182                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2183                 return(0);
2184         }
2185
2186         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2187          && (cdm->pos.cookie.bus == bus)
2188          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2189          && (cdm->pos.cookie.target != NULL))
2190                 return(xpttargettraverse(bus,
2191                                         (struct cam_et *)cdm->pos.cookie.target,
2192                                          xptedttargetfunc, arg));
2193         else
2194                 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
2195 }
2196
2197 static int
2198 xptedttargetfunc(struct cam_et *target, void *arg)
2199 {
2200         struct ccb_dev_match *cdm;
2201
2202         cdm = (struct ccb_dev_match *)arg;
2203
2204         /*
2205          * If there is a device list generation recorded, check it to
2206          * make sure the device list hasn't changed.
2207          */
2208         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2209          && (cdm->pos.cookie.bus == target->bus)
2210          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2211          && (cdm->pos.cookie.target == target)
2212          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2213          && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2214          && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2215              target->generation)) {
2216                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2217                 return(0);
2218         }
2219
2220         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2221          && (cdm->pos.cookie.bus == target->bus)
2222          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2223          && (cdm->pos.cookie.target == target)
2224          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2225          && (cdm->pos.cookie.device != NULL))
2226                 return(xptdevicetraverse(target,
2227                                         (struct cam_ed *)cdm->pos.cookie.device,
2228                                          xptedtdevicefunc, arg));
2229         else
2230                 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2231 }
2232
2233 static int
2234 xptedtdevicefunc(struct cam_ed *device, void *arg)
2235 {
2236
2237         struct ccb_dev_match *cdm;
2238         dev_match_ret retval;
2239
2240         cdm = (struct ccb_dev_match *)arg;
2241
2242         /*
2243          * If our position is for something deeper in the tree, that means
2244          * that we've already seen this node.  So, we keep going down.
2245          */
2246         if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2247          && (cdm->pos.cookie.device == device)
2248          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2249          && (cdm->pos.cookie.periph != NULL))
2250                 retval = DM_RET_DESCEND;
2251         else
2252                 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2253                                         device);
2254
2255         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2256                 cdm->status = CAM_DEV_MATCH_ERROR;
2257                 return(0);
2258         }
2259
2260         /*
2261          * If the copy flag is set, copy this device out.
2262          */
2263         if (retval & DM_RET_COPY) {
2264                 int spaceleft, j;
2265
2266                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2267                         sizeof(struct dev_match_result));
2268
2269                 /*
2270                  * If we don't have enough space to put in another
2271                  * match result, save our position and tell the
2272                  * user there are more devices to check.
2273                  */
2274                 if (spaceleft < sizeof(struct dev_match_result)) {
2275                         bzero(&cdm->pos, sizeof(cdm->pos));
2276                         cdm->pos.position_type = 
2277                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2278                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2279
2280                         cdm->pos.cookie.bus = device->target->bus;
2281                         cdm->pos.generations[CAM_BUS_GENERATION]=
2282                                 bus_generation;
2283                         cdm->pos.cookie.target = device->target;
2284                         cdm->pos.generations[CAM_TARGET_GENERATION] =
2285                                 device->target->bus->generation;
2286                         cdm->pos.cookie.device = device;
2287                         cdm->pos.generations[CAM_DEV_GENERATION] = 
2288                                 device->target->generation;
2289                         cdm->status = CAM_DEV_MATCH_MORE;
2290                         return(0);
2291                 }
2292                 j = cdm->num_matches;
2293                 cdm->num_matches++;
2294                 cdm->matches[j].type = DEV_MATCH_DEVICE;
2295                 cdm->matches[j].result.device_result.path_id =
2296                         device->target->bus->path_id;
2297                 cdm->matches[j].result.device_result.target_id =
2298                         device->target->target_id;
2299                 cdm->matches[j].result.device_result.target_lun =
2300                         device->lun_id;
2301                 bcopy(&device->inq_data,
2302                       &cdm->matches[j].result.device_result.inq_data,
2303                       sizeof(struct scsi_inquiry_data));
2304
2305                 /* Let the user know whether this device is unconfigured */
2306                 if (device->flags & CAM_DEV_UNCONFIGURED)
2307                         cdm->matches[j].result.device_result.flags =
2308                                 DEV_RESULT_UNCONFIGURED;
2309                 else
2310                         cdm->matches[j].result.device_result.flags =
2311                                 DEV_RESULT_NOFLAG;
2312         }
2313
2314         /*
2315          * If the user isn't interested in peripherals, don't descend
2316          * the tree any further.
2317          */
2318         if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2319                 return(1);
2320
2321         /*
2322          * If there is a peripheral list generation recorded, make sure
2323          * it hasn't changed.
2324          */
2325         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2326          && (device->target->bus == cdm->pos.cookie.bus)
2327          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2328          && (device->target == cdm->pos.cookie.target)
2329          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2330          && (device == cdm->pos.cookie.device)
2331          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2332          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2333          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2334              device->generation)){
2335                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2336                 return(0);
2337         }
2338
2339         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2340          && (cdm->pos.cookie.bus == device->target->bus)
2341          && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2342          && (cdm->pos.cookie.target == device->target)
2343          && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2344          && (cdm->pos.cookie.device == device)
2345          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2346          && (cdm->pos.cookie.periph != NULL))
2347                 return(xptperiphtraverse(device,
2348                                 (struct cam_periph *)cdm->pos.cookie.periph,
2349                                 xptedtperiphfunc, arg));
2350         else
2351                 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2352 }
2353
2354 static int
2355 xptedtperiphfunc(struct cam_periph *periph, void *arg)
2356 {
2357         struct ccb_dev_match *cdm;
2358         dev_match_ret retval;
2359
2360         cdm = (struct ccb_dev_match *)arg;
2361
2362         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2363
2364         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2365                 cdm->status = CAM_DEV_MATCH_ERROR;
2366                 return(0);
2367         }
2368
2369         /*
2370          * If the copy flag is set, copy this peripheral out.
2371          */
2372         if (retval & DM_RET_COPY) {
2373                 int spaceleft, j;
2374
2375                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2376                         sizeof(struct dev_match_result));
2377
2378                 /*
2379                  * If we don't have enough space to put in another
2380                  * match result, save our position and tell the
2381                  * user there are more devices to check.
2382                  */
2383                 if (spaceleft < sizeof(struct dev_match_result)) {
2384                         bzero(&cdm->pos, sizeof(cdm->pos));
2385                         cdm->pos.position_type = 
2386                                 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2387                                 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2388                                 CAM_DEV_POS_PERIPH;
2389
2390                         cdm->pos.cookie.bus = periph->path->bus;
2391                         cdm->pos.generations[CAM_BUS_GENERATION]=
2392                                 bus_generation;
2393                         cdm->pos.cookie.target = periph->path->target;
2394                         cdm->pos.generations[CAM_TARGET_GENERATION] =
2395                                 periph->path->bus->generation;
2396                         cdm->pos.cookie.device = periph->path->device;
2397                         cdm->pos.generations[CAM_DEV_GENERATION] = 
2398                                 periph->path->target->generation;
2399                         cdm->pos.cookie.periph = periph;
2400                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2401                                 periph->path->device->generation;
2402                         cdm->status = CAM_DEV_MATCH_MORE;
2403                         return(0);
2404                 }
2405
2406                 j = cdm->num_matches;
2407                 cdm->num_matches++;
2408                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2409                 cdm->matches[j].result.periph_result.path_id =
2410                         periph->path->bus->path_id;
2411                 cdm->matches[j].result.periph_result.target_id =
2412                         periph->path->target->target_id;
2413                 cdm->matches[j].result.periph_result.target_lun =
2414                         periph->path->device->lun_id;
2415                 cdm->matches[j].result.periph_result.unit_number =
2416                         periph->unit_number;
2417                 strncpy(cdm->matches[j].result.periph_result.periph_name,
2418                         periph->periph_name, DEV_IDLEN);
2419         }
2420
2421         return(1);
2422 }
2423
2424 static int
2425 xptedtmatch(struct ccb_dev_match *cdm)
2426 {
2427         int ret;
2428
2429         cdm->num_matches = 0;
2430
2431         /*
2432          * Check the bus list generation.  If it has changed, the user
2433          * needs to reset everything and start over.
2434          */
2435         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2436          && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2437          && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2438                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2439                 return(0);
2440         }
2441
2442         if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2443          && (cdm->pos.cookie.bus != NULL))
2444                 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2445                                      xptedtbusfunc, cdm);
2446         else
2447                 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2448
2449         /*
2450          * If we get back 0, that means that we had to stop before fully
2451          * traversing the EDT.  It also means that one of the subroutines
2452          * has set the status field to the proper value.  If we get back 1,
2453          * we've fully traversed the EDT and copied out any matching entries.
2454          */
2455         if (ret == 1)
2456                 cdm->status = CAM_DEV_MATCH_LAST;
2457
2458         return(ret);
2459 }
2460
2461 static int
2462 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2463 {
2464         struct ccb_dev_match *cdm;
2465
2466         cdm = (struct ccb_dev_match *)arg;
2467
2468         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2469          && (cdm->pos.cookie.pdrv == pdrv)
2470          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2471          && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2472          && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2473              (*pdrv)->generation)) {
2474                 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2475                 return(0);
2476         }
2477
2478         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2479          && (cdm->pos.cookie.pdrv == pdrv)
2480          && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2481          && (cdm->pos.cookie.periph != NULL))
2482                 return(xptpdperiphtraverse(pdrv,
2483                                 (struct cam_periph *)cdm->pos.cookie.periph,
2484                                 xptplistperiphfunc, arg));
2485         else
2486                 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2487 }
2488
2489 static int
2490 xptplistperiphfunc(struct cam_periph *periph, void *arg)
2491 {
2492         struct ccb_dev_match *cdm;
2493         dev_match_ret retval;
2494
2495         cdm = (struct ccb_dev_match *)arg;
2496
2497         retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2498
2499         if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2500                 cdm->status = CAM_DEV_MATCH_ERROR;
2501                 return(0);
2502         }
2503
2504         /*
2505          * If the copy flag is set, copy this peripheral out.
2506          */
2507         if (retval & DM_RET_COPY) {
2508                 int spaceleft, j;
2509
2510                 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2511                         sizeof(struct dev_match_result));
2512
2513                 /*
2514                  * If we don't have enough space to put in another
2515                  * match result, save our position and tell the
2516                  * user there are more devices to check.
2517                  */
2518                 if (spaceleft < sizeof(struct dev_match_result)) {
2519                         struct periph_driver **pdrv;
2520
2521                         pdrv = NULL;
2522                         bzero(&cdm->pos, sizeof(cdm->pos));
2523                         cdm->pos.position_type = 
2524                                 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2525                                 CAM_DEV_POS_PERIPH;
2526
2527                         /*
2528                          * This may look a bit non-sensical, but it is
2529                          * actually quite logical.  There are very few
2530                          * peripheral drivers, and bloating every peripheral
2531                          * structure with a pointer back to its parent
2532                          * peripheral driver linker set entry would cost
2533                          * more in the long run than doing this quick lookup.
2534                          */
2535                         for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2536                                 if (strcmp((*pdrv)->driver_name,
2537                                     periph->periph_name) == 0)
2538                                         break;
2539                         }
2540
2541                         if (*pdrv == NULL) {
2542                                 cdm->status = CAM_DEV_MATCH_ERROR;
2543                                 return(0);
2544                         }
2545
2546                         cdm->pos.cookie.pdrv = pdrv;
2547                         /*
2548                          * The periph generation slot does double duty, as
2549                          * does the periph pointer slot.  They are used for
2550                          * both edt and pdrv lookups and positioning.
2551                          */
2552                         cdm->pos.cookie.periph = periph;
2553                         cdm->pos.generations[CAM_PERIPH_GENERATION] =
2554                                 (*pdrv)->generation;
2555                         cdm->status = CAM_DEV_MATCH_MORE;
2556                         return(0);
2557                 }
2558
2559                 j = cdm->num_matches;
2560                 cdm->num_matches++;
2561                 cdm->matches[j].type = DEV_MATCH_PERIPH;
2562                 cdm->matches[j].result.periph_result.path_id =
2563                         periph->path->bus->path_id;
2564
2565                 /*
2566                  * The transport layer peripheral doesn't have a target or
2567                  * lun.
2568                  */
2569                 if (periph->path->target)
2570                         cdm->matches[j].result.periph_result.target_id =
2571                                 periph->path->target->target_id;
2572                 else
2573                         cdm->matches[j].result.periph_result.target_id = -1;
2574
2575                 if (periph->path->device)
2576                         cdm->matches[j].result.periph_result.target_lun =
2577                                 periph->path->device->lun_id;
2578                 else
2579                         cdm->matches[j].result.periph_result.target_lun = -1;
2580
2581                 cdm->matches[j].result.periph_result.unit_number =
2582                         periph->unit_number;
2583                 strncpy(cdm->matches[j].result.periph_result.periph_name,
2584                         periph->periph_name, DEV_IDLEN);
2585         }
2586
2587         return(1);
2588 }
2589
2590 static int
2591 xptperiphlistmatch(struct ccb_dev_match *cdm)
2592 {
2593         int ret;
2594
2595         cdm->num_matches = 0;
2596
2597         /*
2598          * At this point in the edt traversal function, we check the bus
2599          * list generation to make sure that no busses have been added or
2600          * removed since the user last sent a XPT_DEV_MATCH ccb through.
2601          * For the peripheral driver list traversal function, however, we
2602          * don't have to worry about new peripheral driver types coming or
2603          * going; they're in a linker set, and therefore can't change
2604          * without a recompile.
2605          */
2606
2607         if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2608          && (cdm->pos.cookie.pdrv != NULL))
2609                 ret = xptpdrvtraverse(
2610                                 (struct periph_driver **)cdm->pos.cookie.pdrv,
2611                                 xptplistpdrvfunc, cdm);
2612         else
2613                 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2614
2615         /*
2616          * If we get back 0, that means that we had to stop before fully
2617          * traversing the peripheral driver tree.  It also means that one of
2618          * the subroutines has set the status field to the proper value.  If
2619          * we get back 1, we've fully traversed the EDT and copied out any
2620          * matching entries.
2621          */
2622         if (ret == 1)
2623                 cdm->status = CAM_DEV_MATCH_LAST;
2624
2625         return(ret);
2626 }
2627
2628 static int
2629 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2630 {
2631         struct cam_eb *bus, *next_bus;
2632         int retval;
2633
2634         retval = 1;
2635
2636         for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2637              bus != NULL;
2638              bus = next_bus) {
2639                 next_bus = TAILQ_NEXT(bus, links);
2640
2641                 retval = tr_func(bus, arg);
2642                 if (retval == 0)
2643                         return(retval);
2644         }
2645
2646         return(retval);
2647 }
2648
2649 static int
2650 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2651                   xpt_targetfunc_t *tr_func, void *arg)
2652 {
2653         struct cam_et *target, *next_target;
2654         int retval;
2655
2656         retval = 1;
2657         for (target = (start_target ? start_target :
2658                        TAILQ_FIRST(&bus->et_entries));
2659              target != NULL; target = next_target) {
2660
2661                 next_target = TAILQ_NEXT(target, links);
2662
2663                 retval = tr_func(target, arg);
2664
2665                 if (retval == 0)
2666                         return(retval);
2667         }
2668
2669         return(retval);
2670 }
2671
2672 static int
2673 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2674                   xpt_devicefunc_t *tr_func, void *arg)
2675 {
2676         struct cam_ed *device, *next_device;
2677         int retval;
2678
2679         retval = 1;
2680         for (device = (start_device ? start_device :
2681                        TAILQ_FIRST(&target->ed_entries));
2682              device != NULL;
2683              device = next_device) {
2684
2685                 next_device = TAILQ_NEXT(device, links);
2686
2687                 retval = tr_func(device, arg);
2688
2689                 if (retval == 0)
2690                         return(retval);
2691         }
2692
2693         return(retval);
2694 }
2695
2696 static int
2697 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2698                   xpt_periphfunc_t *tr_func, void *arg)
2699 {
2700         struct cam_periph *periph, *next_periph;
2701         int retval;
2702
2703         retval = 1;
2704
2705         for (periph = (start_periph ? start_periph :
2706                        SLIST_FIRST(&device->periphs));
2707              periph != NULL;
2708              periph = next_periph) {
2709
2710                 next_periph = SLIST_NEXT(periph, periph_links);
2711
2712                 retval = tr_func(periph, arg);
2713                 if (retval == 0)
2714                         return(retval);
2715         }
2716
2717         return(retval);
2718 }
2719
2720 static int
2721 xptpdrvtraverse(struct periph_driver **start_pdrv,
2722                 xpt_pdrvfunc_t *tr_func, void *arg)
2723 {
2724         struct periph_driver **pdrv;
2725         int retval;
2726
2727         retval = 1;
2728
2729         /*
2730          * We don't traverse the peripheral driver list like we do the
2731          * other lists, because it is a linker set, and therefore cannot be
2732          * changed during runtime.  If the peripheral driver list is ever
2733          * re-done to be something other than a linker set (i.e. it can
2734          * change while the system is running), the list traversal should
2735          * be modified to work like the other traversal functions.
2736          */
2737         for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2738              *pdrv != NULL; pdrv++) {
2739                 retval = tr_func(pdrv, arg);
2740
2741                 if (retval == 0)
2742                         return(retval);
2743         }
2744
2745         return(retval);
2746 }
2747
2748 static int
2749 xptpdperiphtraverse(struct periph_driver **pdrv,
2750                     struct cam_periph *start_periph,
2751                     xpt_periphfunc_t *tr_func, void *arg)
2752 {
2753         struct cam_periph *periph, *next_periph;
2754         int retval;
2755
2756         retval = 1;
2757
2758         for (periph = (start_periph ? start_periph :
2759              TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2760              periph = next_periph) {
2761
2762                 next_periph = TAILQ_NEXT(periph, unit_links);
2763
2764                 retval = tr_func(periph, arg);
2765                 if (retval == 0)
2766                         return(retval);
2767         }
2768         return(retval);
2769 }
2770
2771 static int
2772 xptdefbusfunc(struct cam_eb *bus, void *arg)
2773 {
2774         struct xpt_traverse_config *tr_config;
2775
2776         tr_config = (struct xpt_traverse_config *)arg;
2777
2778         if (tr_config->depth == XPT_DEPTH_BUS) {
2779                 xpt_busfunc_t *tr_func;
2780
2781                 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2782
2783                 return(tr_func(bus, tr_config->tr_arg));
2784         } else
2785                 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2786 }
2787
2788 static int
2789 xptdeftargetfunc(struct cam_et *target, void *arg)
2790 {
2791         struct xpt_traverse_config *tr_config;
2792
2793         tr_config = (struct xpt_traverse_config *)arg;
2794
2795         if (tr_config->depth == XPT_DEPTH_TARGET) {
2796                 xpt_targetfunc_t *tr_func;
2797
2798                 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2799
2800                 return(tr_func(target, tr_config->tr_arg));
2801         } else
2802                 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2803 }
2804
2805 static int
2806 xptdefdevicefunc(struct cam_ed *device, void *arg)
2807 {
2808         struct xpt_traverse_config *tr_config;
2809
2810         tr_config = (struct xpt_traverse_config *)arg;
2811
2812         if (tr_config->depth == XPT_DEPTH_DEVICE) {
2813                 xpt_devicefunc_t *tr_func;
2814
2815                 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2816
2817                 return(tr_func(device, tr_config->tr_arg));
2818         } else
2819                 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2820 }
2821
2822 static int
2823 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2824 {
2825         struct xpt_traverse_config *tr_config;
2826         xpt_periphfunc_t *tr_func;
2827
2828         tr_config = (struct xpt_traverse_config *)arg;
2829
2830         tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2831
2832         /*
2833          * Unlike the other default functions, we don't check for depth
2834          * here.  The peripheral driver level is the last level in the EDT,
2835          * so if we're here, we should execute the function in question.
2836          */
2837         return(tr_func(periph, tr_config->tr_arg));
2838 }
2839
2840 /*
2841  * Execute the given function for every bus in the EDT.
2842  */
2843 static int
2844 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2845 {
2846         struct xpt_traverse_config tr_config;
2847
2848         tr_config.depth = XPT_DEPTH_BUS;
2849         tr_config.tr_func = tr_func;
2850         tr_config.tr_arg = arg;
2851
2852         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2853 }
2854
2855 #ifdef notusedyet
2856 /*
2857  * Execute the given function for every target in the EDT.
2858  */
2859 static int
2860 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2861 {
2862         struct xpt_traverse_config tr_config;
2863
2864         tr_config.depth = XPT_DEPTH_TARGET;
2865         tr_config.tr_func = tr_func;
2866         tr_config.tr_arg = arg;
2867
2868         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2869 }
2870 #endif /* notusedyet */
2871
2872 /*
2873  * Execute the given function for every device in the EDT.
2874  */
2875 static int
2876 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2877 {
2878         struct xpt_traverse_config tr_config;
2879
2880         tr_config.depth = XPT_DEPTH_DEVICE;
2881         tr_config.tr_func = tr_func;
2882         tr_config.tr_arg = arg;
2883
2884         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2885 }
2886
2887 #ifdef notusedyet
2888 /*
2889  * Execute the given function for every peripheral in the EDT.
2890  */
2891 static int
2892 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2893 {
2894         struct xpt_traverse_config tr_config;
2895
2896         tr_config.depth = XPT_DEPTH_PERIPH;
2897         tr_config.tr_func = tr_func;
2898         tr_config.tr_arg = arg;
2899
2900         return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2901 }
2902 #endif /* notusedyet */
2903
2904 static int
2905 xptsetasyncfunc(struct cam_ed *device, void *arg)
2906 {
2907         struct cam_path path;
2908         struct ccb_getdev cgd;
2909         struct async_node *cur_entry;
2910
2911         cur_entry = (struct async_node *)arg;
2912
2913         /*
2914          * Don't report unconfigured devices (Wildcard devs,
2915          * devices only for target mode, device instances
2916          * that have been invalidated but are waiting for
2917          * their last reference count to be released).
2918          */
2919         if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2920                 return (1);
2921
2922         xpt_compile_path(&path,
2923                          NULL,
2924                          device->target->bus->path_id,
2925                          device->target->target_id,
2926                          device->lun_id);
2927         xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2928         cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2929         xpt_action((union ccb *)&cgd);
2930         cur_entry->callback(cur_entry->callback_arg,
2931                             AC_FOUND_DEVICE,
2932                             &path, &cgd);
2933         xpt_release_path(&path);
2934
2935         return(1);
2936 }
2937
2938 static int
2939 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2940 {
2941         struct cam_path path;
2942         struct ccb_pathinq cpi;
2943         struct async_node *cur_entry;
2944
2945         cur_entry = (struct async_node *)arg;
2946
2947         xpt_compile_path(&path, /*periph*/NULL,
2948                          bus->sim->path_id,
2949                          CAM_TARGET_WILDCARD,
2950                          CAM_LUN_WILDCARD);
2951         xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2952         cpi.ccb_h.func_code = XPT_PATH_INQ;
2953         xpt_action((union ccb *)&cpi);
2954         cur_entry->callback(cur_entry->callback_arg,
2955                             AC_PATH_REGISTERED,
2956                             &path, &cpi);
2957         xpt_release_path(&path);
2958
2959         return(1);
2960 }
2961
2962 void
2963 xpt_action(union ccb *start_ccb)
2964 {
2965         int iopl;
2966
2967         GIANT_REQUIRED;
2968
2969         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2970
2971         start_ccb->ccb_h.status = CAM_REQ_INPROG;
2972
2973         iopl = splsoftcam();
2974         switch (start_ccb->ccb_h.func_code) {
2975         case XPT_SCSI_IO:
2976         {
2977                 struct cam_ed *device;
2978 #ifdef CAMDEBUG
2979                 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2980                 struct cam_path *path;
2981
2982                 path = start_ccb->ccb_h.path;
2983 #endif
2984
2985                 /*
2986                  * For the sake of compatibility with SCSI-1
2987                  * devices that may not understand the identify
2988                  * message, we include lun information in the
2989                  * second byte of all commands.  SCSI-1 specifies
2990                  * that luns are a 3 bit value and reserves only 3
2991                  * bits for lun information in the CDB.  Later
2992                  * revisions of the SCSI spec allow for more than 8
2993                  * luns, but have deprecated lun information in the
2994                  * CDB.  So, if the lun won't fit, we must omit.
2995                  *
2996                  * Also be aware that during initial probing for devices,
2997                  * the inquiry information is unknown but initialized to 0.
2998                  * This means that this code will be exercised while probing
2999                  * devices with an ANSI revision greater than 2.
3000                  */
3001                 device = start_ccb->ccb_h.path->device;
3002                 if (device->protocol_version <= SCSI_REV_2
3003                  && start_ccb->ccb_h.target_lun < 8
3004                  && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
3005
3006                         start_ccb->csio.cdb_io.cdb_bytes[1] |=
3007                             start_ccb->ccb_h.target_lun << 5;
3008                 }
3009                 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
3010                 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
3011                           scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
3012                                        &path->device->inq_data),
3013                           scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
3014                                           cdb_str, sizeof(cdb_str))));
3015         }
3016         /* FALLTHROUGH */
3017         case XPT_TARGET_IO:
3018         case XPT_CONT_TARGET_IO:
3019                 start_ccb->csio.sense_resid = 0;
3020                 start_ccb->csio.resid = 0;
3021                 /* FALLTHROUGH */
3022         case XPT_RESET_DEV:
3023         case XPT_ENG_EXEC:
3024         {
3025                 struct cam_path *path;
3026                 struct cam_sim *sim;
3027                 int s;
3028                 int runq;
3029
3030                 path = start_ccb->ccb_h.path;
3031                 s = splsoftcam();
3032
3033                 sim = path->bus->sim;
3034                 if (SIM_DEAD(sim)) {
3035                         /* The SIM has gone; just execute the CCB directly. */
3036                         cam_ccbq_send_ccb(&path->device->ccbq, start_ccb);
3037                         (*(sim->sim_action))(sim, start_ccb);
3038                         splx(s);
3039                         break;
3040                 }
3041
3042                 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
3043                 if (path->device->qfrozen_cnt == 0)
3044                         runq = xpt_schedule_dev_sendq(path->bus, path->device);
3045                 else
3046                         runq = 0;
3047                 splx(s);
3048                 if (runq != 0)
3049                         xpt_run_dev_sendq(path->bus);
3050                 break;
3051         }
3052         case XPT_SET_TRAN_SETTINGS:
3053         {
3054                 xpt_set_transfer_settings(&start_ccb->cts,
3055                                           start_ccb->ccb_h.path->device,
3056                                           /*async_update*/FALSE);
3057                 break;
3058         }
3059         case XPT_CALC_GEOMETRY:
3060         {
3061                 struct cam_sim *sim;
3062
3063                 /* Filter out garbage */
3064                 if (start_ccb->ccg.block_size == 0
3065                  || start_ccb->ccg.volume_size == 0) {
3066                         start_ccb->ccg.cylinders = 0;
3067                         start_ccb->ccg.heads = 0;
3068                         start_ccb->ccg.secs_per_track = 0;
3069                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3070                         break;
3071                 }
3072 #ifdef PC98
3073                 /*
3074                  * In a PC-98 system, geometry translation depens on
3075                  * the "real" device geometry obtained from mode page 4.
3076                  * SCSI geometry translation is performed in the
3077                  * initialization routine of the SCSI BIOS and the result
3078                  * stored in host memory.  If the translation is available
3079                  * in host memory, use it.  If not, rely on the default
3080                  * translation the device driver performs.
3081                  */
3082                 if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
3083                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3084                         break;
3085                 }
3086 #endif
3087                 sim = start_ccb->ccb_h.path->bus->sim;
3088                 (*(sim->sim_action))(sim, start_ccb);
3089                 break;
3090         }
3091         case XPT_ABORT:
3092         {
3093                 union ccb* abort_ccb;
3094                 int s;                          
3095
3096                 abort_ccb = start_ccb->cab.abort_ccb;
3097                 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
3098
3099                         if (abort_ccb->ccb_h.pinfo.index >= 0) {
3100                                 struct cam_ccbq *ccbq;
3101
3102                                 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
3103                                 cam_ccbq_remove_ccb(ccbq, abort_ccb);
3104                                 abort_ccb->ccb_h.status =
3105                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3106                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3107                                 s = splcam();
3108                                 xpt_done(abort_ccb);
3109                                 splx(s);
3110                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3111                                 break;
3112                         }
3113                         if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
3114                          && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
3115                                 /*
3116                                  * We've caught this ccb en route to
3117                                  * the SIM.  Flag it for abort and the
3118                                  * SIM will do so just before starting
3119                                  * real work on the CCB.
3120                                  */
3121                                 abort_ccb->ccb_h.status =
3122                                     CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3123                                 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3124                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3125                                 break;
3126                         }
3127                 } 
3128                 if (XPT_FC_IS_QUEUED(abort_ccb)
3129                  && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
3130                         /*
3131                          * It's already completed but waiting
3132                          * for our SWI to get to it.
3133                          */
3134                         start_ccb->ccb_h.status = CAM_UA_ABORT;
3135                         break;
3136                 }
3137                 /*
3138                  * If we weren't able to take care of the abort request
3139                  * in the XPT, pass the request down to the SIM for processing.
3140                  */
3141         }
3142         /* FALLTHROUGH */
3143         case XPT_ACCEPT_TARGET_IO:
3144         case XPT_EN_LUN:
3145         case XPT_IMMED_NOTIFY:
3146         case XPT_NOTIFY_ACK:
3147         case XPT_GET_TRAN_SETTINGS:
3148         case XPT_RESET_BUS:
3149         {
3150                 struct cam_sim *sim;
3151
3152                 sim = start_ccb->ccb_h.path->bus->sim;
3153                 (*(sim->sim_action))(sim, start_ccb);
3154                 break;
3155         }
3156         case XPT_PATH_INQ:
3157         {
3158                 struct cam_sim *sim;
3159
3160                 sim = start_ccb->ccb_h.path->bus->sim;
3161                 (*(sim->sim_action))(sim, start_ccb);
3162                 break;
3163         }
3164         case XPT_PATH_STATS:
3165                 start_ccb->cpis.last_reset =
3166                         start_ccb->ccb_h.path->bus->last_reset;
3167                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3168                 break;
3169         case XPT_GDEV_TYPE:
3170         {
3171                 struct cam_ed *dev;
3172                 int s;
3173
3174                 dev = start_ccb->ccb_h.path->device;
3175                 s = splcam();
3176                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3177                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3178                 } else {
3179                         struct ccb_getdev *cgd;
3180                         struct cam_eb *bus;
3181                         struct cam_et *tar;
3182
3183                         cgd = &start_ccb->cgd;
3184                         bus = cgd->ccb_h.path->bus;
3185                         tar = cgd->ccb_h.path->target;
3186                         cgd->inq_data = dev->inq_data;
3187                         cgd->ccb_h.status = CAM_REQ_CMP;
3188                         cgd->serial_num_len = dev->serial_num_len;
3189                         if ((dev->serial_num_len > 0)
3190                          && (dev->serial_num != NULL))
3191                                 bcopy(dev->serial_num, cgd->serial_num,
3192                                       dev->serial_num_len);
3193                 }
3194                 splx(s);
3195                 break; 
3196         }
3197         case XPT_GDEV_STATS:
3198         {
3199                 struct cam_ed *dev;
3200                 int s;
3201
3202                 dev = start_ccb->ccb_h.path->device;
3203                 s = splcam();
3204                 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3205                         start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3206                 } else {
3207                         struct ccb_getdevstats *cgds;
3208                         struct cam_eb *bus;
3209                         struct cam_et *tar;
3210
3211                         cgds = &start_ccb->cgds;
3212                         bus = cgds->ccb_h.path->bus;
3213                         tar = cgds->ccb_h.path->target;
3214                         cgds->dev_openings = dev->ccbq.dev_openings;
3215                         cgds->dev_active = dev->ccbq.dev_active;
3216                         cgds->devq_openings = dev->ccbq.devq_openings;
3217                         cgds->devq_queued = dev->ccbq.queue.entries;
3218                         cgds->held = dev->ccbq.held;
3219                         cgds->last_reset = tar->last_reset;
3220                         cgds->maxtags = dev->quirk->maxtags;
3221                         cgds->mintags = dev->quirk->mintags;
3222                         if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3223                                 cgds->last_reset = bus->last_reset;
3224                         cgds->ccb_h.status = CAM_REQ_CMP;
3225                 }
3226                 splx(s);
3227                 break;
3228         }
3229         case XPT_GDEVLIST:
3230         {
3231                 struct cam_periph       *nperiph;
3232                 struct periph_list      *periph_head;
3233                 struct ccb_getdevlist   *cgdl;
3234                 u_int                   i;
3235                 int                     s;
3236                 struct cam_ed           *device;
3237                 int                     found;
3238
3239
3240                 found = 0;
3241
3242                 /*
3243                  * Don't want anyone mucking with our data.
3244                  */
3245                 s = splcam();
3246                 device = start_ccb->ccb_h.path->device;
3247                 periph_head = &device->periphs;
3248                 cgdl = &start_ccb->cgdl;
3249
3250                 /*
3251                  * Check and see if the list has changed since the user
3252                  * last requested a list member.  If so, tell them that the
3253                  * list has changed, and therefore they need to start over 
3254                  * from the beginning.
3255                  */
3256                 if ((cgdl->index != 0) && 
3257                     (cgdl->generation != device->generation)) {
3258                         cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3259                         splx(s);
3260                         break;
3261                 }
3262
3263                 /*
3264                  * Traverse the list of peripherals and attempt to find 
3265                  * the requested peripheral.
3266                  */
3267                 for (nperiph = SLIST_FIRST(periph_head), i = 0;
3268                      (nperiph != NULL) && (i <= cgdl->index);
3269                      nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
3270                         if (i == cgdl->index) {
3271                                 strncpy(cgdl->periph_name,
3272                                         nperiph->periph_name,
3273                                         DEV_IDLEN);
3274                                 cgdl->unit_number = nperiph->unit_number;
3275                                 found = 1;
3276                         }
3277                 }
3278                 if (found == 0) {
3279                         cgdl->status = CAM_GDEVLIST_ERROR;
3280                         splx(s);
3281                         break;
3282                 }
3283
3284                 if (nperiph == NULL)
3285                         cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3286                 else
3287                         cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3288
3289                 cgdl->index++;
3290                 cgdl->generation = device->generation;
3291
3292                 splx(s);
3293                 cgdl->ccb_h.status = CAM_REQ_CMP;
3294                 break;
3295         }
3296         case XPT_DEV_MATCH:
3297         {
3298                 int s;
3299                 dev_pos_type position_type;
3300                 struct ccb_dev_match *cdm;
3301
3302                 cdm = &start_ccb->cdm;
3303
3304                 /*
3305                  * Prevent EDT changes while we traverse it.
3306                  */
3307                 s = splcam();
3308                 /*
3309                  * There are two ways of getting at information in the EDT.
3310                  * The first way is via the primary EDT tree.  It starts
3311                  * with a list of busses, then a list of targets on a bus,
3312                  * then devices/luns on a target, and then peripherals on a
3313                  * device/lun.  The "other" way is by the peripheral driver
3314                  * lists.  The peripheral driver lists are organized by
3315                  * peripheral driver.  (obviously)  So it makes sense to
3316                  * use the peripheral driver list if the user is looking
3317                  * for something like "da1", or all "da" devices.  If the
3318                  * user is looking for something on a particular bus/target
3319                  * or lun, it's generally better to go through the EDT tree.
3320                  */
3321
3322                 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3323                         position_type = cdm->pos.position_type;
3324                 else {
3325                         u_int i;
3326
3327                         position_type = CAM_DEV_POS_NONE;
3328
3329                         for (i = 0; i < cdm->num_patterns; i++) {
3330                                 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3331                                  ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3332                                         position_type = CAM_DEV_POS_EDT;
3333                                         break;
3334                                 }
3335                         }
3336
3337                         if (cdm->num_patterns == 0)
3338                                 position_type = CAM_DEV_POS_EDT;
3339                         else if (position_type == CAM_DEV_POS_NONE)
3340                                 position_type = CAM_DEV_POS_PDRV;
3341                 }
3342
3343                 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3344                 case CAM_DEV_POS_EDT:
3345                         xptedtmatch(cdm);
3346                         break;
3347                 case CAM_DEV_POS_PDRV:
3348                         xptperiphlistmatch(cdm);
3349                         break;
3350                 default:
3351                         cdm->status = CAM_DEV_MATCH_ERROR;
3352                         break;
3353                 }
3354
3355                 splx(s);
3356
3357                 if (cdm->status == CAM_DEV_MATCH_ERROR)
3358                         start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3359                 else
3360                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3361
3362                 break;
3363         }
3364         case XPT_SASYNC_CB:
3365         {
3366                 struct ccb_setasync *csa;
3367                 struct async_node *cur_entry;
3368                 struct async_list *async_head;
3369                 u_int32_t added;
3370                 int s;
3371
3372                 csa = &start_ccb->csa;
3373                 added = csa->event_enable;
3374                 async_head = &csa->ccb_h.path->device->asyncs;
3375
3376                 /*
3377                  * If there is already an entry for us, simply
3378                  * update it.
3379                  */
3380                 s = splcam();
3381                 cur_entry = SLIST_FIRST(async_head);
3382                 while (cur_entry != NULL) {
3383                         if ((cur_entry->callback_arg == csa->callback_arg)
3384                          && (cur_entry->callback == csa->callback))
3385                                 break;
3386                         cur_entry = SLIST_NEXT(cur_entry, links);
3387                 }
3388
3389                 if (cur_entry != NULL) {
3390                         /*
3391                          * If the request has no flags set,
3392                          * remove the entry.
3393                          */
3394                         added &= ~cur_entry->event_enable;
3395                         if (csa->event_enable == 0) {
3396                                 SLIST_REMOVE(async_head, cur_entry,
3397                                              async_node, links);
3398                                 csa->ccb_h.path->device->refcount--;
3399                                 free(cur_entry, M_CAMXPT);
3400                         } else {
3401                                 cur_entry->event_enable = csa->event_enable;
3402                         }
3403                 } else {
3404                         cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
3405                                            M_NOWAIT);
3406                         if (cur_entry == NULL) {
3407                                 splx(s);
3408                                 csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3409                                 break;
3410                         }
3411                         cur_entry->event_enable = csa->event_enable;
3412                         cur_entry->callback_arg = csa->callback_arg;
3413                         cur_entry->callback = csa->callback;
3414                         SLIST_INSERT_HEAD(async_head, cur_entry, links);
3415                         csa->ccb_h.path->device->refcount++;
3416                 }
3417
3418                 if ((added & AC_FOUND_DEVICE) != 0) {
3419                         /*
3420                          * Get this peripheral up to date with all
3421                          * the currently existing devices.
3422                          */
3423                         xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3424                 }
3425                 if ((added & AC_PATH_REGISTERED) != 0) {
3426                         /*
3427                          * Get this peripheral up to date with all
3428                          * the currently existing busses.
3429                          */
3430                         xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3431                 }
3432                 splx(s);
3433                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3434                 break;
3435         }
3436         case XPT_REL_SIMQ:
3437         {
3438                 struct ccb_relsim *crs;
3439                 struct cam_ed *dev;
3440                 int s;
3441
3442                 crs = &start_ccb->crs;
3443                 dev = crs->ccb_h.path->device;
3444                 if (dev == NULL) {
3445
3446                         crs->ccb_h.status = CAM_DEV_NOT_THERE;
3447                         break;
3448                 }
3449
3450                 s = splcam();
3451
3452                 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3453
3454                         if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
3455                                 /* Don't ever go below one opening */
3456                                 if (crs->openings > 0) {
3457                                         xpt_dev_ccbq_resize(crs->ccb_h.path,
3458                                                             crs->openings);
3459
3460                                         if (bootverbose) {
3461                                                 xpt_print(crs->ccb_h.path,
3462                                                     "tagged openings now %d\n",
3463                                                     crs->openings);
3464                                         }
3465                                 }
3466                         }
3467                 }
3468
3469                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3470
3471                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3472
3473                                 /*
3474                                  * Just extend the old timeout and decrement
3475                                  * the freeze count so that a single timeout
3476                                  * is sufficient for releasing the queue.
3477                                  */
3478                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3479                                 untimeout(xpt_release_devq_timeout,
3480                                           dev, dev->c_handle);
3481                         } else {
3482
3483                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3484                         }
3485
3486                         dev->c_handle =
3487                                 timeout(xpt_release_devq_timeout,
3488                                         dev,
3489                                         (crs->release_timeout * hz) / 1000);
3490
3491                         dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3492
3493                 }
3494
3495                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3496
3497                         if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3498                                 /*
3499                                  * Decrement the freeze count so that a single
3500                                  * completion is still sufficient to unfreeze
3501                                  * the queue.
3502                                  */
3503                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3504                         } else {
3505                                 
3506                                 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3507                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3508                         }
3509                 }
3510
3511                 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3512
3513                         if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3514                          || (dev->ccbq.dev_active == 0)) {
3515
3516                                 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3517                         } else {
3518                                 
3519                                 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3520                                 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3521                         }
3522                 }
3523                 splx(s);
3524                 
3525                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3526
3527                         xpt_release_devq(crs->ccb_h.path, /*count*/1,
3528                                          /*run_queue*/TRUE);
3529                 }
3530                 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3531                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3532                 break;
3533         }
3534         case XPT_SCAN_BUS:
3535                 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3536                 break;
3537         case XPT_SCAN_LUN:
3538                 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3539                              start_ccb->ccb_h.path, start_ccb->crcn.flags,
3540                              start_ccb);
3541                 break;
3542         case XPT_DEBUG: {
3543 #ifdef CAMDEBUG
3544                 int s;
3545                 
3546                 s = splcam();
3547 #ifdef CAM_DEBUG_DELAY
3548                 cam_debug_delay = CAM_DEBUG_DELAY;
3549 #endif
3550                 cam_dflags = start_ccb->cdbg.flags;
3551                 if (cam_dpath != NULL) {
3552                         xpt_free_path(cam_dpath);
3553                         cam_dpath = NULL;
3554                 }
3555
3556                 if (cam_dflags != CAM_DEBUG_NONE) {
3557                         if (xpt_create_path(&cam_dpath, xpt_periph,
3558                                             start_ccb->ccb_h.path_id,
3559                                             start_ccb->ccb_h.target_id,
3560                                             start_ccb->ccb_h.target_lun) !=
3561                                             CAM_REQ_CMP) {
3562                                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3563                                 cam_dflags = CAM_DEBUG_NONE;
3564                         } else {
3565                                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3566                                 xpt_print(cam_dpath, "debugging flags now %x\n",
3567                                     cam_dflags);
3568                         }
3569                 } else {
3570                         cam_dpath = NULL;
3571                         start_ccb->ccb_h.status = CAM_REQ_CMP;
3572                 }
3573                 splx(s);
3574 #else /* !CAMDEBUG */
3575                 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3576 #endif /* CAMDEBUG */
3577                 break;
3578         }
3579         case XPT_NOOP:
3580                 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3581                         xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3582                 start_ccb->ccb_h.status = CAM_REQ_CMP;
3583                 break;
3584         default:
3585         case XPT_SDEV_TYPE:
3586         case XPT_TERM_IO:
3587         case XPT_ENG_INQ:
3588                 /* XXX Implement */
3589                 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3590                 break;
3591         }
3592         splx(iopl);
3593 }
3594
3595 void
3596 xpt_polled_action(union ccb *start_ccb)
3597 {
3598         int       s;
3599         u_int32_t timeout;
3600         struct    cam_sim *sim; 
3601         struct    cam_devq *devq;
3602         struct    cam_ed *dev;
3603
3604         GIANT_REQUIRED;
3605
3606         timeout = start_ccb->ccb_h.timeout;
3607         sim = start_ccb->ccb_h.path->bus->sim;
3608         devq = sim->devq;
3609         dev = start_ccb->ccb_h.path->device;
3610
3611         s = splcam();
3612
3613         /*
3614          * Steal an opening so that no other queued requests
3615          * can get it before us while we simulate interrupts.
3616          */
3617         dev->ccbq.devq_openings--;
3618         dev->ccbq.dev_openings--;       
3619         
3620         while(((devq != NULL && devq->send_openings <= 0) ||
3621            dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
3622                 DELAY(1000);
3623                 (*(sim->sim_poll))(sim);
3624                 camisr(&cam_bioq);
3625         }
3626         
3627         dev->ccbq.devq_openings++;
3628         dev->ccbq.dev_openings++;
3629         
3630         if (timeout != 0) {
3631                 xpt_action(start_ccb);
3632                 while(--timeout > 0) {
3633                         (*(sim->sim_poll))(sim);
3634                         camisr(&cam_bioq);
3635                         if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3636                             != CAM_REQ_INPROG)
3637                                 break;
3638                         DELAY(1000);
3639                 }
3640                 if (timeout == 0) {
3641                         /*
3642                          * XXX Is it worth adding a sim_timeout entry
3643                          * point so we can attempt recovery?  If
3644                          * this is only used for dumps, I don't think
3645                          * it is.
3646                          */
3647                         start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3648                 }
3649         } else {
3650                 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3651         }
3652         splx(s);
3653 }
3654         
3655 /*
3656  * Schedule a peripheral driver to receive a ccb when it's
3657  * target device has space for more transactions.
3658  */
3659 void
3660 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3661 {
3662         struct cam_ed *device;
3663         union ccb *work_ccb;
3664         int s;
3665         int runq;
3666
3667         GIANT_REQUIRED;
3668
3669         CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3670         device = perph->path->device;
3671         s = splsoftcam();
3672         if (periph_is_queued(perph)) {
3673                 /* Simply reorder based on new priority */
3674                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3675                           ("   change priority to %d\n", new_priority));
3676                 if (new_priority < perph->pinfo.priority) {
3677                         camq_change_priority(&device->drvq,
3678                                              perph->pinfo.index,
3679                                              new_priority);
3680                 }
3681                 runq = 0;
3682         } else if (SIM_DEAD(perph->path->bus->sim)) {
3683                 /* The SIM is gone so just call periph_start directly. */
3684                 work_ccb = xpt_get_ccb(perph->path->device);
3685                 splx(s);
3686                 if (work_ccb == NULL)
3687                         return; /* XXX */
3688                 xpt_setup_ccb(&work_ccb->ccb_h, perph->path, new_priority);
3689                 perph->pinfo.priority = new_priority;
3690                 perph->periph_start(perph, work_ccb);
3691                 return;
3692         } else {
3693                 /* New entry on the queue */
3694                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3695                           ("   added periph to queue\n"));
3696                 perph->pinfo.priority = new_priority;
3697                 perph->pinfo.generation = ++device->drvq.generation;
3698                 camq_insert(&device->drvq, &perph->pinfo);
3699                 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3700         }
3701         splx(s);
3702         if (runq != 0) {
3703                 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3704                           ("   calling xpt_run_devq\n"));
3705                 xpt_run_dev_allocq(perph->path->bus);
3706         }
3707 }
3708
3709
3710 /*
3711  * Schedule a device to run on a given queue.
3712  * If the device was inserted as a new entry on the queue,
3713  * return 1 meaning the device queue should be run. If we
3714  * were already queued, implying someone else has already
3715  * started the queue, return 0 so the caller doesn't attempt
3716  * to run the queue.  Must be run at either splsoftcam
3717  * (or splcam since that encompases splsoftcam).
3718  */
3719 static int
3720 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3721                  u_int32_t new_priority)
3722 {
3723         int retval;
3724         u_int32_t old_priority;
3725
3726         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3727
3728         old_priority = pinfo->priority;
3729
3730         /*
3731          * Are we already queued?
3732          */
3733         if (pinfo->index != CAM_UNQUEUED_INDEX) {
3734                 /* Simply reorder based on new priority */
3735                 if (new_priority < old_priority) {
3736                         camq_change_priority(queue, pinfo->index,
3737                                              new_priority);
3738                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3739                                         ("changed priority to %d\n",
3740                                          new_priority));
3741                 }
3742                 retval = 0;
3743         } else {
3744                 /* New entry on the queue */
3745                 if (new_priority < old_priority)
3746                         pinfo->priority = new_priority;
3747
3748                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3749                                 ("Inserting onto queue\n"));
3750                 pinfo->generation = ++queue->generation;
3751                 camq_insert(queue, pinfo);
3752                 retval = 1;
3753         }
3754         return (retval);
3755 }
3756
3757 static void
3758 xpt_run_dev_allocq(struct cam_eb *bus)
3759 {
3760         struct  cam_devq *devq;
3761         int     s;
3762
3763         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3764         devq = bus->sim->devq;
3765
3766         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3767                         ("   qfrozen_cnt == 0x%x, entries == %d, "
3768                          "openings == %d, active == %d\n",
3769                          devq->alloc_queue.qfrozen_cnt,
3770                          devq->alloc_queue.entries,
3771                          devq->alloc_openings,
3772                          devq->alloc_active));
3773
3774         s = splsoftcam();
3775         devq->alloc_queue.qfrozen_cnt++;
3776         while ((devq->alloc_queue.entries > 0)
3777             && (devq->alloc_openings > 0)
3778             && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3779                 struct  cam_ed_qinfo *qinfo;
3780                 struct  cam_ed *device;
3781                 union   ccb *work_ccb;
3782                 struct  cam_periph *drv;
3783                 struct  camq *drvq;
3784                 
3785                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3786                                                            CAMQ_HEAD);
3787                 device = qinfo->device;
3788
3789                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3790                                 ("running device %p\n", device));
3791
3792                 drvq = &device->drvq;
3793
3794 #ifdef CAMDEBUG
3795                 if (drvq->entries <= 0) {
3796                         panic("xpt_run_dev_allocq: "
3797                               "Device on queue without any work to do");
3798                 }
3799 #endif
3800                 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3801                         devq->alloc_openings--;
3802                         devq->alloc_active++;
3803                         drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3804                         splx(s);
3805                         xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3806                                       drv->pinfo.priority);
3807                         CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3808                                         ("calling periph start\n"));
3809                         drv->periph_start(drv, work_ccb);
3810                 } else {
3811                         /*
3812                          * Malloc failure in alloc_ccb
3813                          */
3814                         /*
3815                          * XXX add us to a list to be run from free_ccb
3816                          * if we don't have any ccbs active on this
3817                          * device queue otherwise we may never get run
3818                          * again.
3819                          */
3820                         break;
3821                 }
3822         
3823                 /* Raise IPL for possible insertion and test at top of loop */
3824                 s = splsoftcam();
3825
3826                 if (drvq->entries > 0) {
3827                         /* We have more work.  Attempt to reschedule */
3828                         xpt_schedule_dev_allocq(bus, device);
3829                 }
3830         }
3831         devq->alloc_queue.qfrozen_cnt--;
3832         splx(s);
3833 }
3834
3835 static void
3836 xpt_run_dev_sendq(struct cam_eb *bus)
3837 {
3838         struct  cam_devq *devq;
3839         int     s;
3840
3841         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3842         
3843         devq = bus->sim->devq;
3844
3845         s = splcam();
3846         devq->send_queue.qfrozen_cnt++;
3847         splx(s);
3848         s = splsoftcam();
3849         while ((devq->send_queue.entries > 0)
3850             && (devq->send_openings > 0)) {
3851                 struct  cam_ed_qinfo *qinfo;
3852                 struct  cam_ed *device;
3853                 union ccb *work_ccb;
3854                 struct  cam_sim *sim;
3855                 int     ospl;
3856
3857                 ospl = splcam();
3858                 if (devq->send_queue.qfrozen_cnt > 1) {
3859                         splx(ospl);
3860                         break;
3861                 }
3862
3863                 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3864                                                            CAMQ_HEAD);
3865                 device = qinfo->device;
3866
3867                 /*
3868                  * If the device has been "frozen", don't attempt
3869                  * to run it.
3870                  */
3871                 if (device->qfrozen_cnt > 0) {
3872                         splx(ospl);
3873                         continue;
3874                 }
3875
3876                 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3877                                 ("running device %p\n", device));
3878
3879                 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3880                 if (work_ccb == NULL) {
3881                         printf("device on run queue with no ccbs???\n");
3882                         splx(ospl);
3883                         continue;
3884                 }
3885
3886                 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3887
3888                         if (num_highpower <= 0) {
3889                                 /*
3890                                  * We got a high power command, but we
3891                                  * don't have any available slots.  Freeze
3892                                  * the device queue until we have a slot
3893                                  * available.
3894                                  */
3895                                 device->qfrozen_cnt++;
3896                                 STAILQ_INSERT_TAIL(&highpowerq, 
3897                                                    &work_ccb->ccb_h, 
3898                                                    xpt_links.stqe);
3899
3900                                 splx(ospl);
3901                                 continue;
3902                         } else {
3903                                 /*
3904                                  * Consume a high power slot while
3905                                  * this ccb runs.
3906                                  */
3907                                 num_highpower--;
3908                         }
3909                 }
3910                 devq->active_dev = device;
3911                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3912
3913                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3914                 splx(ospl);
3915
3916                 devq->send_openings--;
3917                 devq->send_active++;            
3918                 
3919                 if (device->ccbq.queue.entries > 0)
3920                         xpt_schedule_dev_sendq(bus, device);
3921
3922                 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3923                         /*
3924                          * The client wants to freeze the queue
3925                          * after this CCB is sent.
3926                          */
3927                         ospl = splcam();
3928                         device->qfrozen_cnt++;
3929                         splx(ospl);
3930                 }
3931                 
3932                 splx(s);
3933
3934                 /* In Target mode, the peripheral driver knows best... */
3935                 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3936                         if ((device->inq_flags & SID_CmdQue) != 0
3937                          && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3938                                 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3939                         else
3940                                 /*
3941                                  * Clear this in case of a retried CCB that
3942                                  * failed due to a rejected tag.
3943                                  */
3944                                 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3945                 }
3946
3947                 /*
3948                  * Device queues can be shared among multiple sim instances
3949                  * that reside on different busses.  Use the SIM in the queue
3950                  * CCB's path, rather than the one in the bus that was passed
3951                  * into this function.
3952                  */
3953                 sim = work_ccb->ccb_h.path->bus->sim;
3954                 (*(sim->sim_action))(sim, work_ccb);
3955
3956                 ospl = splcam();
3957                 devq->active_dev = NULL;
3958                 splx(ospl);
3959                 /* Raise IPL for possible insertion and test at top of loop */
3960                 s = splsoftcam();
3961         }
3962         splx(s);
3963         s = splcam();
3964         devq->send_queue.qfrozen_cnt--;
3965         splx(s);
3966 }
3967
3968 /*
3969  * This function merges stuff from the slave ccb into the master ccb, while
3970  * keeping important fields in the master ccb constant.
3971  */
3972 void
3973 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3974 {
3975         GIANT_REQUIRED;
3976
3977         /*
3978          * Pull fields that are valid for peripheral drivers to set
3979          * into the master CCB along with the CCB "payload".
3980          */
3981         master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3982         master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3983         master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3984         master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3985         bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3986               sizeof(union ccb) - sizeof(struct ccb_hdr));
3987 }
3988
3989 void
3990 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3991 {
3992         GIANT_REQUIRED;
3993
3994         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3995         ccb_h->pinfo.priority = priority;
3996         ccb_h->path = path;
3997         ccb_h->path_id = path->bus->path_id;
3998         if (path->target)
3999                 ccb_h->target_id = path->target->target_id;
4000         else
4001                 ccb_h->target_id = CAM_TARGET_WILDCARD;
4002         if (path->device) {
4003                 ccb_h->target_lun = path->device->lun_id;
4004                 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
4005         } else {
4006                 ccb_h->target_lun = CAM_TARGET_WILDCARD;
4007         }
4008         ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
4009         ccb_h->flags = 0;
4010 }
4011
4012 /* Path manipulation functions */
4013 cam_status
4014 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
4015                 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
4016 {
4017         struct     cam_path *path;
4018         cam_status status;
4019
4020         GIANT_REQUIRED;
4021
4022         path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
4023
4024         if (path == NULL) {
4025                 status = CAM_RESRC_UNAVAIL;
4026                 return(status);
4027         }
4028         status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
4029         if (status != CAM_REQ_CMP) {
4030                 free(path, M_CAMXPT);
4031                 path = NULL;
4032         }
4033         *new_path_ptr = path;
4034         return (status);
4035 }
4036
4037 static cam_status
4038 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
4039                  path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
4040 {
4041         struct       cam_eb *bus;
4042         struct       cam_et *target;
4043         struct       cam_ed *device;
4044         cam_status   status;
4045         int          s;
4046
4047         status = CAM_REQ_CMP;   /* Completed without error */
4048         target = NULL;          /* Wildcarded */
4049         device = NULL;          /* Wildcarded */
4050
4051         /*
4052          * We will potentially modify the EDT, so block interrupts
4053          * that may attempt to create cam paths.
4054          */
4055         s = splcam();
4056         bus = xpt_find_bus(path_id);
4057         if (bus == NULL) {
4058                 status = CAM_PATH_INVALID;
4059         } else {
4060                 target = xpt_find_target(bus, target_id);
4061                 if (target == NULL) {
4062                         /* Create one */
4063                         struct cam_et *new_target;
4064
4065                         new_target = xpt_alloc_target(bus, target_id);
4066                         if (new_target == NULL) {
4067                                 status = CAM_RESRC_UNAVAIL;
4068                         } else {
4069                                 target = new_target;
4070                         }
4071                 }
4072                 if (target != NULL) {
4073                         device = xpt_find_device(target, lun_id);
4074                         if (device == NULL) {
4075                                 /* Create one */
4076                                 struct cam_ed *new_device;
4077
4078                                 new_device = xpt_alloc_device(bus,
4079                                                               target,
4080                                                               lun_id);
4081                                 if (new_device == NULL) {
4082                                         status = CAM_RESRC_UNAVAIL;
4083                                 } else {
4084                                         device = new_device;
4085                                 }
4086                         }
4087                 }
4088         }
4089         splx(s);
4090
4091         /*
4092          * Only touch the user's data if we are successful.
4093          */
4094         if (status == CAM_REQ_CMP) {
4095                 new_path->periph = perph;
4096                 new_path->bus = bus;
4097                 new_path->target = target;
4098                 new_path->device = device;
4099                 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
4100         } else {
4101                 if (device != NULL)
4102                         xpt_release_device(bus, target, device);
4103                 if (target != NULL)
4104                         xpt_release_target(bus, target);
4105                 if (bus != NULL)
4106                         xpt_release_bus(bus);
4107         }
4108         return (status);
4109 }
4110
4111 static void
4112 xpt_release_path(struct cam_path *path)
4113 {
4114         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
4115         if (path->device != NULL) {
4116                 xpt_release_device(path->bus, path->target, path->device);
4117                 path->device = NULL;
4118         }
4119         if (path->target != NULL) {
4120                 xpt_release_target(path->bus, path->target);
4121                 path->target = NULL;
4122         }
4123         if (path->bus != NULL) {
4124                 xpt_release_bus(path->bus);
4125                 path->bus = NULL;
4126         }
4127 }
4128
4129 void
4130 xpt_free_path(struct cam_path *path)
4131 {
4132         GIANT_REQUIRED;
4133
4134         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
4135         xpt_release_path(path);
4136         free(path, M_CAMXPT);
4137 }
4138
4139
4140 /*
4141  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
4142  * in path1, 2 for match with wildcards in path2.
4143  */
4144 int
4145 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
4146 {
4147         GIANT_REQUIRED;
4148
4149         int retval = 0;
4150
4151         if (path1->bus != path2->bus) {
4152                 if (path1->bus->path_id == CAM_BUS_WILDCARD)
4153                         retval = 1;
4154                 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
4155                         retval = 2;
4156                 else
4157                         return (-1);
4158         }
4159         if (path1->target != path2->target) {
4160                 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
4161                         if (retval == 0)
4162                                 retval = 1;
4163                 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
4164                         retval = 2;
4165                 else
4166                         return (-1);
4167         }
4168         if (path1->device != path2->device) {
4169                 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
4170                         if (retval == 0)
4171                                 retval = 1;
4172                 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
4173                         retval = 2;
4174                 else
4175                         return (-1);
4176         }
4177         return (retval);
4178 }
4179
4180 void
4181 xpt_print_path(struct cam_path *path)
4182 {
4183         GIANT_REQUIRED;
4184
4185         if (path == NULL)
4186                 printf("(nopath): ");
4187         else {
4188                 if (path->periph != NULL)
4189                         printf("(%s%d:", path->periph->periph_name,
4190                                path->periph->unit_number);
4191                 else
4192                         printf("(noperiph:");
4193
4194                 if (path->bus != NULL)
4195                         printf("%s%d:%d:", path->bus->sim->sim_name,
4196                                path->bus->sim->unit_number,
4197                                path->bus->sim->bus_id);
4198                 else
4199                         printf("nobus:");
4200
4201                 if (path->target != NULL)
4202                         printf("%d:", path->target->target_id);
4203                 else
4204                         printf("X:");
4205
4206                 if (path->device != NULL)
4207                         printf("%d): ", path->device->lun_id);
4208                 else
4209                         printf("X): ");
4210         }
4211 }
4212
4213 void
4214 xpt_print(struct cam_path *path, const char *fmt, ...)
4215 {
4216         va_list ap;
4217         xpt_print_path(path);
4218         va_start(ap, fmt);
4219         vprintf(fmt, ap);
4220         va_end(ap);
4221 }
4222
4223 int
4224 xpt_path_string(struct cam_path *path, char *str, size_t str_len)
4225 {
4226         struct sbuf sb;
4227
4228         GIANT_REQUIRED;
4229
4230         sbuf_new(&sb, str, str_len, 0);
4231
4232         if (path == NULL)
4233                 sbuf_printf(&sb, "(nopath): ");
4234         else {
4235                 if (path->periph != NULL)
4236                         sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
4237                                     path->periph->unit_number);
4238                 else
4239                         sbuf_printf(&sb, "(noperiph:");
4240
4241                 if (path->bus != NULL)
4242                         sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
4243                                     path->bus->sim->unit_number,
4244                                     path->bus->sim->bus_id);
4245                 else
4246                         sbuf_printf(&sb, "nobus:");
4247
4248                 if (path->target != NULL)
4249                         sbuf_printf(&sb, "%d:", path->target->target_id);
4250                 else
4251                         sbuf_printf(&sb, "X:");
4252
4253                 if (path->device != NULL)
4254                         sbuf_printf(&sb, "%d): ", path->device->lun_id);
4255                 else
4256                         sbuf_printf(&sb, "X): ");
4257         }
4258         sbuf_finish(&sb);
4259
4260         return(sbuf_len(&sb));
4261 }
4262
4263 path_id_t
4264 xpt_path_path_id(struct cam_path *path)
4265 {
4266         GIANT_REQUIRED;
4267
4268         return(path->bus->path_id);
4269 }
4270
4271 target_id_t
4272 xpt_path_target_id(struct cam_path *path)
4273 {
4274         GIANT_REQUIRED;
4275
4276         if (path->target != NULL)
4277                 return (path->target->target_id);
4278         else
4279                 return (CAM_TARGET_WILDCARD);
4280 }
4281
4282 lun_id_t
4283 xpt_path_lun_id(struct cam_path *path)
4284 {
4285         GIANT_REQUIRED;
4286
4287         if (path->device != NULL)
4288                 return (path->device->lun_id);
4289         else
4290                 return (CAM_LUN_WILDCARD);
4291 }
4292
4293 struct cam_sim *
4294 xpt_path_sim(struct cam_path *path)
4295 {
4296         GIANT_REQUIRED;
4297
4298         return (path->bus->sim);
4299 }
4300
4301 struct cam_periph*
4302 xpt_path_periph(struct cam_path *path)
4303 {
4304         GIANT_REQUIRED;
4305
4306         return (path->periph);
4307 }
4308
4309 /*
4310  * Release a CAM control block for the caller.  Remit the cost of the structure
4311  * to the device referenced by the path.  If the this device had no 'credits'
4312  * and peripheral drivers have registered async callbacks for this notification
4313  * call them now.
4314  */
4315 void
4316 xpt_release_ccb(union ccb *free_ccb)
4317 {
4318         int      s;
4319         struct   cam_path *path;
4320         struct   cam_ed *device;
4321         struct   cam_eb *bus;
4322
4323         GIANT_REQUIRED;
4324
4325         CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4326         path = free_ccb->ccb_h.path;
4327         device = path->device;
4328         bus = path->bus;
4329         s = splsoftcam();
4330         cam_ccbq_release_opening(&device->ccbq);
4331         if (xpt_ccb_count > xpt_max_ccbs) {
4332                 xpt_free_ccb(free_ccb);
4333                 xpt_ccb_count--;
4334         } else {
4335                 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
4336         }
4337         if (bus->sim->devq == NULL) {
4338                 splx(s);
4339                 return;
4340         }
4341         bus->sim->devq->alloc_openings++;
4342         bus->sim->devq->alloc_active--;
4343         /* XXX Turn this into an inline function - xpt_run_device?? */
4344         if ((device_is_alloc_queued(device) == 0)
4345          && (device->drvq.entries > 0)) {
4346                 xpt_schedule_dev_allocq(bus, device);
4347         }
4348         splx(s);
4349         if (dev_allocq_is_runnable(bus->sim->devq))
4350                 xpt_run_dev_allocq(bus);
4351 }
4352
4353 /* Functions accessed by SIM drivers */
4354
4355 /*
4356  * A sim structure, listing the SIM entry points and instance
4357  * identification info is passed to xpt_bus_register to hook the SIM
4358  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
4359  * for this new bus and places it in the array of busses and assigns
4360  * it a path_id.  The path_id may be influenced by "hard wiring"
4361  * information specified by the user.  Once interrupt services are
4362  * availible, the bus will be probed.
4363  */
4364 int32_t
4365 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4366 {
4367         struct cam_eb *new_bus;
4368         struct cam_eb *old_bus;
4369         struct ccb_pathinq cpi;
4370         int s;
4371
4372         GIANT_REQUIRED;
4373
4374         sim->bus_id = bus;
4375         new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
4376                                           M_CAMXPT, M_NOWAIT);
4377         if (new_bus == NULL) {
4378                 /* Couldn't satisfy request */
4379                 return (CAM_RESRC_UNAVAIL);
4380         }
4381
4382         if (strcmp(sim->sim_name, "xpt") != 0) {
4383
4384                 sim->path_id =
4385                     xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4386         }
4387
4388         TAILQ_INIT(&new_bus->et_entries);
4389         new_bus->path_id = sim->path_id;
4390         new_bus->sim = sim;
4391         timevalclear(&new_bus->last_reset);
4392         new_bus->flags = 0;
4393         new_bus->refcount = 1;  /* Held until a bus_deregister event */
4394         new_bus->generation = 0;
4395         s = splcam();
4396         old_bus = TAILQ_FIRST(&xpt_busses);
4397         while (old_bus != NULL
4398             && old_bus->path_id < new_bus->path_id)
4399                 old_bus = TAILQ_NEXT(old_bus, links);
4400         if (old_bus != NULL)
4401                 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4402         else
4403                 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
4404         bus_generation++;
4405         splx(s);
4406
4407         /* Notify interested parties */
4408         if (sim->path_id != CAM_XPT_PATH_ID) {
4409                 struct cam_path path;
4410
4411                 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4412                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4413                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4414                 cpi.ccb_h.func_code = XPT_PATH_INQ;
4415                 xpt_action((union ccb *)&cpi);
4416                 xpt_async(AC_PATH_REGISTERED, &path, &cpi);
4417                 xpt_release_path(&path);
4418         }
4419         return (CAM_SUCCESS);
4420 }
4421
4422 int32_t
4423 xpt_bus_deregister(path_id_t pathid)
4424 {
4425         struct cam_path bus_path;
4426         struct cam_ed *device;
4427         struct cam_ed_qinfo *qinfo;
4428         struct cam_devq *devq;
4429         struct cam_periph *periph;
4430         struct cam_sim *ccbsim;
4431         union ccb *work_ccb;
4432         cam_status status;
4433
4434         GIANT_REQUIRED;
4435
4436         status = xpt_compile_path(&bus_path, NULL, pathid,
4437                                   CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4438         if (status != CAM_REQ_CMP)
4439                 return (status);
4440
4441         xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4442         xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4443
4444         /* The SIM may be gone, so use a dummy SIM for any stray operations. */
4445         devq = bus_path.bus->sim->devq;
4446         bus_path.bus->sim = &cam_dead_sim;
4447
4448         /* Execute any pending operations now. */
4449         while ((qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
4450             CAMQ_HEAD)) != NULL ||
4451             (qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
4452             CAMQ_HEAD)) != NULL) {
4453                 do {
4454                         device = qinfo->device;
4455                         work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
4456                         if (work_ccb != NULL) {
4457                                 devq->active_dev = device;
4458                                 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
4459                                 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
4460                                 ccbsim = work_ccb->ccb_h.path->bus->sim;
4461                                 (*(ccbsim->sim_action))(ccbsim, work_ccb);
4462                         }
4463
4464                         periph = (struct cam_periph *)camq_remove(&device->drvq,
4465                             CAMQ_HEAD);
4466                         if (periph != NULL)
4467                                 xpt_schedule(periph, periph->pinfo.priority);
4468                 } while (work_ccb != NULL || periph != NULL);
4469         }
4470
4471         /* Make sure all completed CCBs are processed. */
4472         while (!TAILQ_EMPTY(&cam_bioq)) {
4473                 camisr(&cam_bioq);
4474
4475                 /* Repeat the async's for the benefit of any new devices. */
4476                 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4477                 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4478         }
4479
4480         /* Release the reference count held while registered. */
4481         xpt_release_bus(bus_path.bus);
4482         xpt_release_path(&bus_path);
4483
4484         /* Recheck for more completed CCBs. */
4485         while (!TAILQ_EMPTY(&cam_bioq))
4486                 camisr(&cam_bioq);
4487
4488         return (CAM_REQ_CMP);
4489 }
4490
4491 static path_id_t
4492 xptnextfreepathid(void)
4493 {
4494         struct cam_eb *bus;
4495         path_id_t pathid;
4496         const char *strval;
4497
4498         pathid = 0;
4499         bus = TAILQ_FIRST(&xpt_busses);
4500 retry:
4501         /* Find an unoccupied pathid */
4502         while (bus != NULL && bus->path_id <= pathid) {
4503                 if (bus->path_id == pathid)
4504                         pathid++;
4505                 bus = TAILQ_NEXT(bus, links);
4506         }
4507
4508         /*
4509          * Ensure that this pathid is not reserved for
4510          * a bus that may be registered in the future.
4511          */
4512         if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4513                 ++pathid;
4514                 /* Start the search over */
4515                 goto retry;
4516         }
4517         return (pathid);
4518 }
4519
4520 static path_id_t
4521 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4522 {
4523         path_id_t pathid;
4524         int i, dunit, val;
4525         char buf[32];
4526         const char *dname;
4527
4528         pathid = CAM_XPT_PATH_ID;
4529         snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4530         i = 0;
4531         while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4532                 if (strcmp(dname, "scbus")) {
4533                         /* Avoid a bit of foot shooting. */
4534                         continue;
4535                 }
4536                 if (dunit < 0)          /* unwired?! */
4537                         continue;
4538                 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4539                         if (sim_bus == val) {
4540                                 pathid = dunit;
4541                                 break;
4542                         }
4543                 } else if (sim_bus == 0) {
4544                         /* Unspecified matches bus 0 */
4545                         pathid = dunit;
4546                         break;
4547                 } else {
4548                         printf("Ambiguous scbus configuration for %s%d "
4549                                "bus %d, cannot wire down.  The kernel "
4550                                "config entry for scbus%d should "
4551                                "specify a controller bus.\n"
4552                                "Scbus will be assigned dynamically.\n",
4553                                sim_name, sim_unit, sim_bus, dunit);
4554                         break;
4555                 }
4556         }
4557
4558         if (pathid == CAM_XPT_PATH_ID)
4559                 pathid = xptnextfreepathid();
4560         return (pathid);
4561 }
4562
4563 void
4564 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4565 {
4566         struct cam_eb *bus;
4567         struct cam_et *target, *next_target;
4568         struct cam_ed *device, *next_device;
4569         int s;
4570
4571         GIANT_REQUIRED;
4572
4573         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4574
4575         /*
4576          * Most async events come from a CAM interrupt context.  In
4577          * a few cases, the error recovery code at the peripheral layer,
4578          * which may run from our SWI or a process context, may signal
4579          * deferred events with a call to xpt_async. Ensure async
4580          * notifications are serialized by blocking cam interrupts.
4581          */
4582         s = splcam();
4583
4584         bus = path->bus;
4585
4586         if (async_code == AC_BUS_RESET) { 
4587                 int s;
4588
4589                 s = splclock();
4590                 /* Update our notion of when the last reset occurred */
4591                 microtime(&bus->last_reset);
4592                 splx(s);
4593         }
4594
4595         for (target = TAILQ_FIRST(&bus->et_entries);
4596              target != NULL;
4597              target = next_target) {
4598
4599                 next_target = TAILQ_NEXT(target, links);
4600
4601                 if (path->target != target
4602                  && path->target->target_id != CAM_TARGET_WILDCARD
4603                  && target->target_id != CAM_TARGET_WILDCARD)
4604                         continue;
4605
4606                 if (async_code == AC_SENT_BDR) {
4607                         int s;
4608
4609                         /* Update our notion of when the last reset occurred */
4610                         s = splclock();
4611                         microtime(&path->target->last_reset);
4612                         splx(s);
4613                 }
4614
4615                 for (device = TAILQ_FIRST(&target->ed_entries);
4616                      device != NULL;
4617                      device = next_device) {
4618
4619                         next_device = TAILQ_NEXT(device, links);
4620
4621                         if (path->device != device 
4622                          && path->device->lun_id != CAM_LUN_WILDCARD
4623                          && device->lun_id != CAM_LUN_WILDCARD)
4624                                 continue;
4625
4626                         xpt_dev_async(async_code, bus, target,
4627                                       device, async_arg);
4628
4629                         xpt_async_bcast(&device->asyncs, async_code,
4630                                         path, async_arg);
4631                 }
4632         }
4633         
4634         /*
4635          * If this wasn't a fully wildcarded async, tell all
4636          * clients that want all async events.
4637          */
4638         if (bus != xpt_periph->path->bus)
4639                 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4640                                 path, async_arg);
4641         splx(s);
4642 }
4643
4644 static void
4645 xpt_async_bcast(struct async_list *async_head,
4646                 u_int32_t async_code,
4647                 struct cam_path *path, void *async_arg)
4648 {
4649         struct async_node *cur_entry;
4650
4651         cur_entry = SLIST_FIRST(async_head);
4652         while (cur_entry != NULL) {
4653                 struct async_node *next_entry;
4654                 /*
4655                  * Grab the next list entry before we call the current
4656                  * entry's callback.  This is because the callback function
4657                  * can delete its async callback entry.
4658                  */
4659                 next_entry = SLIST_NEXT(cur_entry, links);
4660                 if ((cur_entry->event_enable & async_code) != 0)
4661                         cur_entry->callback(cur_entry->callback_arg,
4662                                             async_code, path,
4663                                             async_arg);
4664                 cur_entry = next_entry;
4665         }
4666 }
4667
4668 /*
4669  * Handle any per-device event notifications that require action by the XPT.
4670  */
4671 static void
4672 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
4673               struct cam_ed *device, void *async_arg)
4674 {
4675         cam_status status;
4676         struct cam_path newpath;
4677
4678         /*
4679          * We only need to handle events for real devices.
4680          */
4681         if (target->target_id == CAM_TARGET_WILDCARD
4682          || device->lun_id == CAM_LUN_WILDCARD)
4683                 return;
4684
4685         /*
4686          * We need our own path with wildcards expanded to
4687          * handle certain types of events.
4688          */
4689         if ((async_code == AC_SENT_BDR)
4690          || (async_code == AC_BUS_RESET)
4691          || (async_code == AC_INQ_CHANGED))
4692                 status = xpt_compile_path(&newpath, NULL,
4693                                           bus->path_id,
4694                                           target->target_id,
4695                                           device->lun_id);
4696         else
4697                 status = CAM_REQ_CMP_ERR;
4698
4699         if (status == CAM_REQ_CMP) {
4700
4701                 /*
4702                  * Allow transfer negotiation to occur in a
4703                  * tag free environment.
4704                  */
4705                 if (async_code == AC_SENT_BDR
4706                  || async_code == AC_BUS_RESET)
4707                         xpt_toggle_tags(&newpath);
4708
4709                 if (async_code == AC_INQ_CHANGED) {
4710                         /*
4711                          * We've sent a start unit command, or
4712                          * something similar to a device that
4713                          * may have caused its inquiry data to
4714                          * change. So we re-scan the device to
4715                          * refresh the inquiry data for it.
4716                          */
4717                         xpt_scan_lun(newpath.periph, &newpath,
4718                                      CAM_EXPECT_INQ_CHANGE, NULL);
4719                 }
4720                 xpt_release_path(&newpath);
4721         } else if (async_code == AC_LOST_DEVICE) {
4722                 device->flags |= CAM_DEV_UNCONFIGURED;
4723         } else if (async_code == AC_TRANSFER_NEG) {
4724                 struct ccb_trans_settings *settings;
4725
4726                 settings = (struct ccb_trans_settings *)async_arg;
4727                 xpt_set_transfer_settings(settings, device,
4728                                           /*async_update*/TRUE);
4729         }
4730 }
4731
4732 u_int32_t
4733 xpt_freeze_devq(struct cam_path *path, u_int count)
4734 {
4735         int s;
4736         struct ccb_hdr *ccbh;
4737
4738         GIANT_REQUIRED;
4739
4740         s = splcam();
4741         path->device->qfrozen_cnt += count;
4742
4743         /*
4744          * Mark the last CCB in the queue as needing
4745          * to be requeued if the driver hasn't
4746          * changed it's state yet.  This fixes a race
4747          * where a ccb is just about to be queued to
4748          * a controller driver when it's interrupt routine
4749          * freezes the queue.  To completly close the
4750          * hole, controller drives must check to see
4751          * if a ccb's status is still CAM_REQ_INPROG
4752          * under spl protection just before they queue
4753          * the CCB.  See ahc_action/ahc_freeze_devq for
4754          * an example.
4755          */
4756         ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4757         if (ccbh && ccbh->status == CAM_REQ_INPROG)
4758                 ccbh->status = CAM_REQUEUE_REQ;
4759         splx(s);
4760         return (path->device->qfrozen_cnt);
4761 }
4762
4763 u_int32_t
4764 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4765 {
4766         GIANT_REQUIRED;
4767
4768         sim->devq->send_queue.qfrozen_cnt += count;
4769         if (sim->devq->active_dev != NULL) {
4770                 struct ccb_hdr *ccbh;
4771                 
4772                 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4773                                   ccb_hdr_tailq);
4774                 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4775                         ccbh->status = CAM_REQUEUE_REQ;
4776         }
4777         return (sim->devq->send_queue.qfrozen_cnt);
4778 }
4779
4780 static void
4781 xpt_release_devq_timeout(void *arg)
4782 {
4783         struct cam_ed *device;
4784
4785         device = (struct cam_ed *)arg;
4786
4787         xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4788 }
4789
4790 void
4791 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4792 {
4793         GIANT_REQUIRED;
4794
4795         xpt_release_devq_device(path->device, count, run_queue);
4796 }
4797
4798 static void
4799 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4800 {
4801         int     rundevq;
4802         int     s0, s1;
4803
4804         rundevq = 0;
4805         s0 = splsoftcam();
4806         s1 = splcam();
4807         if (dev->qfrozen_cnt > 0) {
4808
4809                 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4810                 dev->qfrozen_cnt -= count;
4811                 if (dev->qfrozen_cnt == 0) {
4812
4813                         /*
4814                          * No longer need to wait for a successful
4815                          * command completion.
4816                          */
4817                         dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4818
4819                         /*
4820                          * Remove any timeouts that might be scheduled
4821                          * to release this queue.
4822                          */
4823                         if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4824                                 untimeout(xpt_release_devq_timeout, dev,
4825                                           dev->c_handle);
4826                                 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4827                         }
4828
4829                         /*
4830                          * Now that we are unfrozen schedule the
4831                          * device so any pending transactions are
4832                          * run.
4833                          */
4834                         if ((dev->ccbq.queue.entries > 0)
4835                          && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4836                          && (run_queue != 0)) {
4837                                 rundevq = 1;
4838                         }
4839                 }
4840         }
4841         splx(s1);
4842         if (rundevq != 0)
4843                 xpt_run_dev_sendq(dev->target->bus);
4844         splx(s0);
4845 }
4846
4847 void
4848 xpt_release_simq(struct cam_sim *sim, int run_queue)
4849 {
4850         int     s;
4851         struct  camq *sendq;
4852
4853         GIANT_REQUIRED;
4854
4855         sendq = &(sim->devq->send_queue);
4856         s = splcam();
4857         if (sendq->qfrozen_cnt > 0) {
4858
4859                 sendq->qfrozen_cnt--;
4860                 if (sendq->qfrozen_cnt == 0) {
4861                         struct cam_eb *bus;
4862
4863                         /*
4864                          * If there is a timeout scheduled to release this
4865                          * sim queue, remove it.  The queue frozen count is
4866                          * already at 0.
4867                          */
4868                         if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4869                                 untimeout(xpt_release_simq_timeout, sim,
4870                                           sim->c_handle);
4871                                 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4872                         }
4873                         bus = xpt_find_bus(sim->path_id);
4874                         splx(s);
4875
4876                         if (run_queue) {
4877                                 /*
4878                                  * Now that we are unfrozen run the send queue.
4879                                  */
4880                                 xpt_run_dev_sendq(bus);
4881                         }
4882                         xpt_release_bus(bus);
4883                 } else
4884                         splx(s);
4885         } else
4886                 splx(s);
4887 }
4888
4889 static void
4890 xpt_release_simq_timeout(void *arg)
4891 {
4892         struct cam_sim *sim;
4893
4894         sim = (struct cam_sim *)arg;
4895         xpt_release_simq(sim, /* run_queue */ TRUE);
4896 }
4897
4898 void
4899 xpt_done(union ccb *done_ccb)
4900 {
4901         int s;
4902
4903         s = splcam();
4904
4905         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4906         if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4907                 /*
4908                  * Queue up the request for handling by our SWI handler
4909                  * any of the "non-immediate" type of ccbs.
4910                  */
4911                 switch (done_ccb->ccb_h.path->periph->type) {
4912                 case CAM_PERIPH_BIO:
4913                         mtx_lock(&cam_bioq_lock);
4914                         TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
4915                                           sim_links.tqe);
4916                         done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4917                         mtx_unlock(&cam_bioq_lock);
4918                         swi_sched(cambio_ih, 0);
4919                         break;
4920                 default:
4921                         panic("unknown periph type %d",
4922                             done_ccb->ccb_h.path->periph->type);
4923                 }
4924         }
4925         splx(s);
4926 }
4927
4928 union ccb *
4929 xpt_alloc_ccb()
4930 {
4931         union ccb *new_ccb;
4932
4933         GIANT_REQUIRED;
4934
4935         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_WAITOK);
4936         return (new_ccb);
4937 }
4938
4939 union ccb *
4940 xpt_alloc_ccb_nowait()
4941 {
4942         union ccb *new_ccb;
4943
4944         GIANT_REQUIRED;
4945
4946         new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_NOWAIT);
4947         return (new_ccb);
4948 }
4949
4950 void
4951 xpt_free_ccb(union ccb *free_ccb)
4952 {
4953         free(free_ccb, M_CAMXPT);
4954 }
4955
4956
4957
4958 /* Private XPT functions */
4959
4960 /*
4961  * Get a CAM control block for the caller. Charge the structure to the device
4962  * referenced by the path.  If the this device has no 'credits' then the
4963  * device already has the maximum number of outstanding operations under way
4964  * and we return NULL. If we don't have sufficient resources to allocate more
4965  * ccbs, we also return NULL.
4966  */
4967 static union ccb *
4968 xpt_get_ccb(struct cam_ed *device)
4969 {
4970         union ccb *new_ccb;
4971         int s;
4972
4973         s = splsoftcam();
4974         if ((new_ccb = (union ccb *)SLIST_FIRST(&ccb_freeq)) == NULL) {
4975                 new_ccb = xpt_alloc_ccb_nowait();
4976                 if (new_ccb == NULL) {
4977                         splx(s);
4978                         return (NULL);
4979                 }
4980                 callout_handle_init(&new_ccb->ccb_h.timeout_ch);
4981                 SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
4982                                   xpt_links.sle);
4983                 xpt_ccb_count++;
4984         }
4985         cam_ccbq_take_opening(&device->ccbq);
4986         SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
4987         splx(s);
4988         return (new_ccb);
4989 }
4990
4991 static void
4992 xpt_release_bus(struct cam_eb *bus)
4993 {
4994         int s;
4995
4996         s = splcam();
4997         if ((--bus->refcount == 0)
4998          && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
4999                 TAILQ_REMOVE(&xpt_busses, bus, links);
5000                 bus_generation++;
5001                 splx(s);
5002                 free(bus, M_CAMXPT);
5003         } else
5004                 splx(s);
5005 }
5006
5007 static struct cam_et *
5008 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
5009 {
5010         struct cam_et *target;
5011
5012         target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT);
5013         if (target != NULL) {
5014                 struct cam_et *cur_target;
5015
5016                 TAILQ_INIT(&target->ed_entries);
5017                 target->bus = bus;
5018                 target->target_id = target_id;
5019                 target->refcount = 1;
5020                 target->generation = 0;
5021                 timevalclear(&target->last_reset);
5022                 /*
5023                  * Hold a reference to our parent bus so it
5024                  * will not go away before we do.
5025                  */
5026                 bus->refcount++;
5027
5028                 /* Insertion sort into our bus's target list */
5029                 cur_target = TAILQ_FIRST(&bus->et_entries);
5030                 while (cur_target != NULL && cur_target->target_id < target_id)
5031                         cur_target = TAILQ_NEXT(cur_target, links);
5032
5033                 if (cur_target != NULL) {
5034                         TAILQ_INSERT_BEFORE(cur_target, target, links);
5035                 } else {
5036                         TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
5037                 }
5038                 bus->generation++;
5039         }
5040         return (target);
5041 }
5042
5043 static void
5044 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
5045 {
5046         int s;
5047
5048         s = splcam();
5049         if ((--target->refcount == 0)
5050          && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
5051                 TAILQ_REMOVE(&bus->et_entries, target, links);
5052                 bus->generation++;
5053                 splx(s);
5054                 free(target, M_CAMXPT);
5055                 xpt_release_bus(bus);
5056         } else
5057                 splx(s);
5058 }
5059
5060 static struct cam_ed *
5061 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
5062 {
5063         struct     cam_path path;
5064         struct     cam_ed *device;
5065         struct     cam_devq *devq;
5066         cam_status status;
5067
5068         if (SIM_DEAD(bus->sim))
5069                 return (NULL);
5070
5071         /* Make space for us in the device queue on our bus */
5072         devq = bus->sim->devq;
5073         status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
5074
5075         if (status != CAM_REQ_CMP) {
5076                 device = NULL;
5077         } else {
5078                 device = (struct cam_ed *)malloc(sizeof(*device),
5079                                                  M_CAMXPT, M_NOWAIT);
5080         }
5081
5082         if (device != NULL) {
5083                 struct cam_ed *cur_device;
5084
5085                 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
5086                 device->alloc_ccb_entry.device = device;
5087                 cam_init_pinfo(&device->send_ccb_entry.pinfo);
5088                 device->send_ccb_entry.device = device;
5089                 device->target = target;
5090                 device->lun_id = lun_id;
5091                 /* Initialize our queues */
5092                 if (camq_init(&device->drvq, 0) != 0) {
5093                         free(device, M_CAMXPT);
5094                         return (NULL);
5095                 }
5096                 if (cam_ccbq_init(&device->ccbq,
5097                                   bus->sim->max_dev_openings) != 0) {
5098                         camq_fini(&device->drvq);
5099                         free(device, M_CAMXPT);
5100                         return (NULL);
5101                 }
5102                 SLIST_INIT(&device->asyncs);
5103                 SLIST_INIT(&device->periphs);
5104                 device->generation = 0;
5105                 device->owner = NULL;
5106                 /*
5107                  * Take the default quirk entry until we have inquiry
5108                  * data and can determine a better quirk to use.
5109                  */
5110                 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
5111                 bzero(&device->inq_data, sizeof(device->inq_data));
5112                 device->inq_flags = 0;
5113                 device->queue_flags = 0;
5114                 device->serial_num = NULL;
5115                 device->serial_num_len = 0;
5116                 device->qfrozen_cnt = 0;
5117                 device->flags = CAM_DEV_UNCONFIGURED;
5118                 device->tag_delay_count = 0;
5119                 device->tag_saved_openings = 0;
5120                 device->refcount = 1;
5121                 callout_handle_init(&device->c_handle);
5122
5123                 /*
5124                  * Hold a reference to our parent target so it
5125                  * will not go away before we do.
5126                  */
5127                 target->refcount++;
5128
5129                 /*
5130                  * XXX should be limited by number of CCBs this bus can
5131                  * do.
5132                  */
5133                 xpt_max_ccbs += device->ccbq.devq_openings;
5134                 /* Insertion sort into our target's device list */
5135                 cur_device = TAILQ_FIRST(&target->ed_entries);
5136                 while (cur_device != NULL && cur_device->lun_id < lun_id)
5137                         cur_device = TAILQ_NEXT(cur_device, links);
5138                 if (cur_device != NULL) {
5139                         TAILQ_INSERT_BEFORE(cur_device, device, links);
5140                 } else {
5141                         TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
5142                 }
5143                 target->generation++;
5144                 if (lun_id != CAM_LUN_WILDCARD) {
5145                         xpt_compile_path(&path,
5146                                          NULL,
5147                                          bus->path_id,
5148                                          target->target_id,
5149                                          lun_id);
5150                         xpt_devise_transport(&path);
5151                         xpt_release_path(&path);
5152                 }
5153         }
5154         return (device);
5155 }
5156
5157 static void
5158 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
5159                    struct cam_ed *device)
5160 {
5161         int s;
5162
5163         s = splcam();
5164         if ((--device->refcount == 0)
5165          && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
5166                 struct cam_devq *devq;
5167
5168                 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
5169                  || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
5170                         panic("Removing device while still queued for ccbs");
5171
5172                 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
5173                                 untimeout(xpt_release_devq_timeout, device,
5174                                           device->c_handle);
5175
5176                 TAILQ_REMOVE(&target->ed_entries, device,links);
5177                 target->generation++;
5178                 xpt_max_ccbs -= device->ccbq.devq_openings;
5179                 if (!SIM_DEAD(bus->sim)) {
5180                         /* Release our slot in the devq */
5181                         devq = bus->sim->devq;
5182                         cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
5183                 }
5184                 splx(s);
5185                 camq_fini(&device->drvq);
5186                 camq_fini(&device->ccbq.queue);
5187                 free(device, M_CAMXPT);
5188                 xpt_release_target(bus, target);
5189         } else
5190                 splx(s);
5191 }
5192
5193 static u_int32_t
5194 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
5195 {
5196         int     s;
5197         int     diff;
5198         int     result;
5199         struct  cam_ed *dev;
5200
5201         dev = path->device;
5202         s = splsoftcam();
5203
5204         diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
5205         result = cam_ccbq_resize(&dev->ccbq, newopenings);
5206         if (result == CAM_REQ_CMP && (diff < 0)) {
5207                 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
5208         }
5209         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5210          || (dev->inq_flags & SID_CmdQue) != 0)
5211                 dev->tag_saved_openings = newopenings;
5212         /* Adjust the global limit */
5213         xpt_max_ccbs += diff;
5214         splx(s);
5215         return (result);
5216 }
5217
5218 static struct cam_eb *
5219 xpt_find_bus(path_id_t path_id)
5220 {
5221         struct cam_eb *bus;
5222
5223         for (bus = TAILQ_FIRST(&xpt_busses);
5224              bus != NULL;
5225              bus = TAILQ_NEXT(bus, links)) {
5226                 if (bus->path_id == path_id) {
5227                         bus->refcount++;
5228                         break;
5229                 }
5230         }
5231         return (bus);
5232 }
5233
5234 static struct cam_et *
5235 xpt_find_target(struct cam_eb *bus, target_id_t target_id)
5236 {
5237         struct cam_et *target;
5238
5239         for (target = TAILQ_FIRST(&bus->et_entries);
5240              target != NULL;
5241              target = TAILQ_NEXT(target, links)) {
5242                 if (target->target_id == target_id) {
5243                         target->refcount++;
5244                         break;
5245                 }
5246         }
5247         return (target);
5248 }
5249
5250 static struct cam_ed *
5251 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
5252 {
5253         struct cam_ed *device;
5254
5255         for (device = TAILQ_FIRST(&target->ed_entries);
5256              device != NULL;
5257              device = TAILQ_NEXT(device, links)) {
5258                 if (device->lun_id == lun_id) {
5259                         device->refcount++;
5260                         break;
5261                 }
5262         }
5263         return (device);
5264 }
5265
5266 typedef struct {
5267         union   ccb *request_ccb;
5268         struct  ccb_pathinq *cpi;
5269         int     counter;
5270 } xpt_scan_bus_info;
5271
5272 /*
5273  * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5274  * As the scan progresses, xpt_scan_bus is used as the
5275  * callback on completion function.
5276  */
5277 static void
5278 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
5279 {
5280         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5281                   ("xpt_scan_bus\n"));
5282         switch (request_ccb->ccb_h.func_code) {
5283         case XPT_SCAN_BUS:
5284         {
5285                 xpt_scan_bus_info *scan_info;
5286                 union   ccb *work_ccb;
5287                 struct  cam_path *path;
5288                 u_int   i;
5289                 u_int   max_target;
5290                 u_int   initiator_id;
5291
5292                 /* Find out the characteristics of the bus */
5293                 work_ccb = xpt_alloc_ccb();
5294                 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
5295                               request_ccb->ccb_h.pinfo.priority);
5296                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
5297                 xpt_action(work_ccb);
5298                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
5299                         request_ccb->ccb_h.status = work_ccb->ccb_h.status;
5300                         xpt_free_ccb(work_ccb);
5301                         xpt_done(request_ccb);
5302                         return;
5303                 }
5304
5305                 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5306                         /*
5307                          * Can't scan the bus on an adapter that
5308                          * cannot perform the initiator role.
5309                          */
5310                         request_ccb->ccb_h.status = CAM_REQ_CMP;
5311                         xpt_free_ccb(work_ccb);
5312                         xpt_done(request_ccb);
5313                         return;
5314                 }
5315
5316                 /* Save some state for use while we probe for devices */
5317                 scan_info = (xpt_scan_bus_info *)
5318                     malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK);
5319                 scan_info->request_ccb = request_ccb;
5320                 scan_info->cpi = &work_ccb->cpi;
5321
5322                 /* Cache on our stack so we can work asynchronously */
5323                 max_target = scan_info->cpi->max_target;
5324                 initiator_id = scan_info->cpi->initiator_id;
5325
5326
5327                 /*
5328                  * We can scan all targets in parallel, or do it sequentially.
5329                  */
5330                 if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5331                         max_target = 0;
5332                         scan_info->counter = 0;
5333                 } else {
5334                         scan_info->counter = scan_info->cpi->max_target + 1;
5335                         if (scan_info->cpi->initiator_id < scan_info->counter) {
5336                                 scan_info->counter--;
5337                         }
5338                 }
5339                 
5340                 for (i = 0; i <= max_target; i++) {
5341                         cam_status status;
5342                         if (i == initiator_id)
5343                                 continue;
5344
5345                         status = xpt_create_path(&path, xpt_periph,
5346                                                  request_ccb->ccb_h.path_id,
5347                                                  i, 0);
5348                         if (status != CAM_REQ_CMP) {
5349                                 printf("xpt_scan_bus: xpt_create_path failed"
5350                                        " with status %#x, bus scan halted\n",
5351                                        status);
5352                                 free(scan_info, M_TEMP);
5353                                 request_ccb->ccb_h.status = status;
5354                                 xpt_free_ccb(work_ccb);
5355                                 xpt_done(request_ccb);
5356                                 break;
5357                         }
5358                         work_ccb = xpt_alloc_ccb();
5359                         xpt_setup_ccb(&work_ccb->ccb_h, path,
5360                                       request_ccb->ccb_h.pinfo.priority);
5361                         work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5362                         work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5363                         work_ccb->ccb_h.ppriv_ptr0 = scan_info;
5364                         work_ccb->crcn.flags = request_ccb->crcn.flags;
5365                         xpt_action(work_ccb);
5366                 }
5367                 break;
5368         }
5369         case XPT_SCAN_LUN:
5370         {
5371                 cam_status status;
5372                 struct cam_path *path;
5373                 xpt_scan_bus_info *scan_info;
5374                 path_id_t path_id;
5375                 target_id_t target_id;
5376                 lun_id_t lun_id;
5377
5378                 /* Reuse the same CCB to query if a device was really found */
5379                 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
5380                 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
5381                               request_ccb->ccb_h.pinfo.priority);
5382                 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5383
5384                 path_id = request_ccb->ccb_h.path_id;
5385                 target_id = request_ccb->ccb_h.target_id;
5386                 lun_id = request_ccb->ccb_h.target_lun;
5387                 xpt_action(request_ccb);
5388
5389                 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
5390                         struct cam_ed *device;
5391                         struct cam_et *target;
5392                         int s, phl;
5393
5394                         /*
5395                          * If we already probed lun 0 successfully, or
5396                          * we have additional configured luns on this
5397                          * target that might have "gone away", go onto
5398                          * the next lun.
5399                          */
5400                         target = request_ccb->ccb_h.path->target;
5401                         /*
5402                          * We may touch devices that we don't
5403                          * hold references too, so ensure they
5404                          * don't disappear out from under us.
5405                          * The target above is referenced by the
5406                          * path in the request ccb.
5407                          */
5408                         phl = 0;
5409                         s = splcam();
5410                         device = TAILQ_FIRST(&target->ed_entries);
5411                         if (device != NULL) {
5412                                 phl = CAN_SRCH_HI_SPARSE(device);
5413                                 if (device->lun_id == 0)
5414                                         device = TAILQ_NEXT(device, links);
5415                         }
5416                         splx(s);
5417                         if ((lun_id != 0) || (device != NULL)) {
5418                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
5419                                         lun_id++;
5420                         }
5421                 } else {
5422                         struct cam_ed *device;
5423                         
5424                         device = request_ccb->ccb_h.path->device;
5425
5426                         if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
5427                                 /* Try the next lun */
5428                                 if (lun_id < (CAM_SCSI2_MAXLUN-1)
5429                                   || CAN_SRCH_HI_DENSE(device))
5430                                         lun_id++;
5431                         }
5432                 }
5433
5434                 /*
5435                  * Free the current request path- we're done with it.
5436                  */
5437                 xpt_free_path(request_ccb->ccb_h.path);
5438
5439                 /*
5440                  * Check to see if we scan any further luns.
5441                  */
5442                 if (lun_id == request_ccb->ccb_h.target_lun
5443                  || lun_id > scan_info->cpi->max_lun) {
5444                         int done;
5445
5446  hop_again:
5447                         done = 0;
5448                         if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5449                                 scan_info->counter++;
5450                                 if (scan_info->counter == 
5451                                     scan_info->cpi->initiator_id) {
5452                                         scan_info->counter++;
5453                                 }
5454                                 if (scan_info->counter >=
5455                                     scan_info->cpi->max_target+1) {
5456                                         done = 1;
5457                                 }
5458                         } else {
5459                                 scan_info->counter--;
5460                                 if (scan_info->counter == 0) {
5461                                         done = 1;
5462                                 }
5463                         }
5464                         if (done) {
5465                                 xpt_free_ccb(request_ccb);
5466                                 xpt_free_ccb((union ccb *)scan_info->cpi);
5467                                 request_ccb = scan_info->request_ccb;
5468                                 free(scan_info, M_TEMP);
5469                                 request_ccb->ccb_h.status = CAM_REQ_CMP;
5470                                 xpt_done(request_ccb);
5471                                 break;
5472                         }
5473
5474                         if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) {
5475                                 break;
5476                         }
5477                         status = xpt_create_path(&path, xpt_periph,
5478                             scan_info->request_ccb->ccb_h.path_id,
5479                             scan_info->counter, 0);
5480                         if (status != CAM_REQ_CMP) {
5481                                 printf("xpt_scan_bus: xpt_create_path failed"
5482                                     " with status %#x, bus scan halted\n",
5483                                     status);
5484                                 xpt_free_ccb(request_ccb);
5485                                 xpt_free_ccb((union ccb *)scan_info->cpi);
5486                                 request_ccb = scan_info->request_ccb;
5487                                 free(scan_info, M_TEMP);
5488                                 request_ccb->ccb_h.status = status;
5489                                 xpt_done(request_ccb);
5490                                 break;
5491                         }
5492                         xpt_setup_ccb(&request_ccb->ccb_h, path,
5493                             request_ccb->ccb_h.pinfo.priority);
5494                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5495                         request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5496                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5497                         request_ccb->crcn.flags =
5498                             scan_info->request_ccb->crcn.flags;
5499                 } else {
5500                         status = xpt_create_path(&path, xpt_periph,
5501                                                  path_id, target_id, lun_id);
5502                         if (status != CAM_REQ_CMP) {
5503                                 printf("xpt_scan_bus: xpt_create_path failed "
5504                                        "with status %#x, halting LUN scan\n",
5505                                        status);
5506                                 goto hop_again;
5507                         }
5508                         xpt_setup_ccb(&request_ccb->ccb_h, path,
5509                                       request_ccb->ccb_h.pinfo.priority);
5510                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5511                         request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5512                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5513                         request_ccb->crcn.flags =
5514                                 scan_info->request_ccb->crcn.flags;
5515                 }
5516                 xpt_action(request_ccb);
5517                 break;
5518         }
5519         default:
5520                 break;
5521         }
5522 }
5523
5524 typedef enum {
5525         PROBE_TUR,
5526         PROBE_INQUIRY,  /* this counts as DV0 for Basic Domain Validation */
5527         PROBE_FULL_INQUIRY,
5528         PROBE_MODE_SENSE,
5529         PROBE_SERIAL_NUM,
5530         PROBE_TUR_FOR_NEGOTIATION,
5531         PROBE_INQUIRY_BASIC_DV1,
5532         PROBE_INQUIRY_BASIC_DV2,
5533         PROBE_DV_EXIT
5534 } probe_action;
5535
5536 typedef enum {
5537         PROBE_INQUIRY_CKSUM     = 0x01,
5538         PROBE_SERIAL_CKSUM      = 0x02,
5539         PROBE_NO_ANNOUNCE       = 0x04
5540 } probe_flags;
5541
5542 typedef struct {
5543         TAILQ_HEAD(, ccb_hdr) request_ccbs;
5544         probe_action    action;
5545         union ccb       saved_ccb;
5546         probe_flags     flags;
5547         MD5_CTX         context;
5548         u_int8_t        digest[16];
5549 } probe_softc;
5550
5551 static void
5552 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5553              cam_flags flags, union ccb *request_ccb)
5554 {
5555         struct ccb_pathinq cpi;
5556         cam_status status;
5557         struct cam_path *new_path;
5558         struct cam_periph *old_periph;
5559         int s;
5560         
5561         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5562                   ("xpt_scan_lun\n"));
5563         
5564         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5565         cpi.ccb_h.func_code = XPT_PATH_INQ;
5566         xpt_action((union ccb *)&cpi);
5567
5568         if (cpi.ccb_h.status != CAM_REQ_CMP) {
5569                 if (request_ccb != NULL) {
5570                         request_ccb->ccb_h.status = cpi.ccb_h.status;
5571                         xpt_done(request_ccb);
5572                 }
5573                 return;
5574         }
5575
5576         if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5577                 /*
5578                  * Can't scan the bus on an adapter that
5579                  * cannot perform the initiator role.
5580                  */
5581                 if (request_ccb != NULL) {
5582                         request_ccb->ccb_h.status = CAM_REQ_CMP;
5583                         xpt_done(request_ccb);
5584                 }
5585                 return;
5586         }
5587
5588         if (request_ccb == NULL) {
5589                 request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT);
5590                 if (request_ccb == NULL) {
5591                         xpt_print(path, "xpt_scan_lun: can't allocate CCB, "
5592                             "can't continue\n");
5593                         return;
5594                 }
5595                 new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT);
5596                 if (new_path == NULL) {
5597                         xpt_print(path, "xpt_scan_lun: can't allocate path, "
5598                             "can't continue\n");
5599                         free(request_ccb, M_TEMP);
5600                         return;
5601                 }
5602                 status = xpt_compile_path(new_path, xpt_periph,
5603                                           path->bus->path_id,
5604                                           path->target->target_id,
5605                                           path->device->lun_id);
5606
5607                 if (status != CAM_REQ_CMP) {
5608                         xpt_print(path, "xpt_scan_lun: can't compile path, "
5609                             "can't continue\n");
5610                         free(request_ccb, M_TEMP);
5611                         free(new_path, M_TEMP);
5612                         return;
5613                 }
5614                 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5615                 request_ccb->ccb_h.cbfcnp = xptscandone;
5616                 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5617                 request_ccb->crcn.flags = flags;
5618         }
5619
5620         s = splsoftcam();
5621         if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5622                 probe_softc *softc;
5623
5624                 softc = (probe_softc *)old_periph->softc;
5625                 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5626                                   periph_links.tqe);
5627         } else {
5628                 status = cam_periph_alloc(proberegister, NULL, probecleanup,
5629                                           probestart, "probe",
5630                                           CAM_PERIPH_BIO,
5631                                           request_ccb->ccb_h.path, NULL, 0,
5632                                           request_ccb);
5633
5634                 if (status != CAM_REQ_CMP) {
5635                         xpt_print(path, "xpt_scan_lun: cam_alloc_periph "
5636                             "returned an error, can't continue probe\n");
5637                         request_ccb->ccb_h.status = status;
5638                         xpt_done(request_ccb);
5639                 }
5640         }
5641         splx(s);
5642 }
5643
5644 static void
5645 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5646 {
5647         xpt_release_path(done_ccb->ccb_h.path);
5648         free(done_ccb->ccb_h.path, M_TEMP);
5649         free(done_ccb, M_TEMP);
5650 }
5651
5652 static cam_status
5653 proberegister(struct cam_periph *periph, void *arg)
5654 {
5655         union ccb *request_ccb; /* CCB representing the probe request */
5656         cam_status status;
5657         probe_softc *softc;
5658
5659         request_ccb = (union ccb *)arg;
5660         if (periph == NULL) {
5661                 printf("proberegister: periph was NULL!!\n");
5662                 return(CAM_REQ_CMP_ERR);
5663         }
5664
5665         if (request_ccb == NULL) {
5666                 printf("proberegister: no probe CCB, "
5667                        "can't register device\n");
5668                 return(CAM_REQ_CMP_ERR);
5669         }
5670
5671         softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT);
5672
5673         if (softc == NULL) {
5674                 printf("proberegister: Unable to probe new device. "
5675                        "Unable to allocate softc\n");                           
5676                 return(CAM_REQ_CMP_ERR);
5677         }
5678         TAILQ_INIT(&softc->request_ccbs);
5679         TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5680                           periph_links.tqe);
5681         softc->flags = 0;
5682         periph->softc = softc;
5683         status = cam_periph_acquire(periph);
5684         if (status != CAM_REQ_CMP) {
5685                 return (status);
5686         }
5687
5688
5689         /*
5690          * Ensure we've waited at least a bus settle
5691          * delay before attempting to probe the device.
5692          * For HBAs that don't do bus resets, this won't make a difference.
5693          */
5694         cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5695                                       scsi_delay);
5696         probeschedule(periph);
5697         return(CAM_REQ_CMP);
5698 }
5699
5700 static void
5701 probeschedule(struct cam_periph *periph)
5702 {
5703         struct ccb_pathinq cpi;
5704         union ccb *ccb;
5705         probe_softc *softc;
5706
5707         softc = (probe_softc *)periph->softc;
5708         ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5709
5710         xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5711         cpi.ccb_h.func_code = XPT_PATH_INQ;
5712         xpt_action((union ccb *)&cpi);
5713
5714         /*
5715          * If a device has gone away and another device, or the same one,
5716          * is back in the same place, it should have a unit attention
5717          * condition pending.  It will not report the unit attention in
5718          * response to an inquiry, which may leave invalid transfer
5719          * negotiations in effect.  The TUR will reveal the unit attention
5720          * condition.  Only send the TUR for lun 0, since some devices 
5721          * will get confused by commands other than inquiry to non-existent
5722          * luns.  If you think a device has gone away start your scan from
5723          * lun 0.  This will insure that any bogus transfer settings are
5724          * invalidated.
5725          *
5726          * If we haven't seen the device before and the controller supports
5727          * some kind of transfer negotiation, negotiate with the first
5728          * sent command if no bus reset was performed at startup.  This
5729          * ensures that the device is not confused by transfer negotiation
5730          * settings left over by loader or BIOS action.
5731          */
5732         if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5733          && (ccb->ccb_h.target_lun == 0)) {
5734                 softc->action = PROBE_TUR;
5735         } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5736               && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5737                 proberequestdefaultnegotiation(periph);
5738                 softc->action = PROBE_INQUIRY;
5739         } else {
5740                 softc->action = PROBE_INQUIRY;
5741         }
5742
5743         if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5744                 softc->flags |= PROBE_NO_ANNOUNCE;
5745         else
5746                 softc->flags &= ~PROBE_NO_ANNOUNCE;
5747
5748         xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5749 }
5750
5751 static void
5752 probestart(struct cam_periph *periph, union ccb *start_ccb)
5753 {
5754         /* Probe the device that our peripheral driver points to */
5755         struct ccb_scsiio *csio;
5756         probe_softc *softc;
5757
5758         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5759
5760         softc = (probe_softc *)periph->softc;
5761         csio = &start_ccb->csio;
5762
5763         switch (softc->action) {
5764         case PROBE_TUR:
5765         case PROBE_TUR_FOR_NEGOTIATION:
5766         case PROBE_DV_EXIT:
5767         {
5768                 scsi_test_unit_ready(csio,
5769                                      /*retries*/4,
5770                                      probedone,
5771                                      MSG_SIMPLE_Q_TAG,
5772                                      SSD_FULL_SIZE,
5773                                      /*timeout*/60000);
5774                 break;
5775         }
5776         case PROBE_INQUIRY:
5777         case PROBE_FULL_INQUIRY:
5778         case PROBE_INQUIRY_BASIC_DV1:
5779         case PROBE_INQUIRY_BASIC_DV2:
5780         {
5781                 u_int inquiry_len;
5782                 struct scsi_inquiry_data *inq_buf;
5783
5784                 inq_buf = &periph->path->device->inq_data;
5785
5786                 /*
5787                  * If the device is currently configured, we calculate an
5788                  * MD5 checksum of the inquiry data, and if the serial number
5789                  * length is greater than 0, add the serial number data
5790                  * into the checksum as well.  Once the inquiry and the
5791                  * serial number check finish, we attempt to figure out
5792                  * whether we still have the same device.
5793                  */
5794                 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5795                         
5796                         MD5Init(&softc->context);
5797                         MD5Update(&softc->context, (unsigned char *)inq_buf,
5798                                   sizeof(struct scsi_inquiry_data));
5799                         softc->flags |= PROBE_INQUIRY_CKSUM;
5800                         if (periph->path->device->serial_num_len > 0) {
5801                                 MD5Update(&softc->context,
5802                                           periph->path->device->serial_num,
5803                                           periph->path->device->serial_num_len);
5804                                 softc->flags |= PROBE_SERIAL_CKSUM;
5805                         }
5806                         MD5Final(softc->digest, &softc->context);
5807                 } 
5808
5809                 if (softc->action == PROBE_INQUIRY)
5810                         inquiry_len = SHORT_INQUIRY_LENGTH;
5811                 else
5812                         inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf);
5813
5814                 /*
5815                  * Some parallel SCSI devices fail to send an
5816                  * ignore wide residue message when dealing with
5817                  * odd length inquiry requests.  Round up to be
5818                  * safe.
5819                  */
5820                 inquiry_len = roundup2(inquiry_len, 2);
5821         
5822                 if (softc->action == PROBE_INQUIRY_BASIC_DV1
5823                  || softc->action == PROBE_INQUIRY_BASIC_DV2) {
5824                         inq_buf = malloc(inquiry_len, M_TEMP, M_NOWAIT);
5825                 }
5826                 if (inq_buf == NULL) {
5827                         xpt_print(periph->path, "malloc failure- skipping Basic"
5828                             "Domain Validation\n");
5829                         softc->action = PROBE_DV_EXIT;
5830                         scsi_test_unit_ready(csio,
5831                                              /*retries*/4,
5832                                              probedone,
5833                                              MSG_SIMPLE_Q_TAG,
5834                                              SSD_FULL_SIZE,
5835                                              /*timeout*/60000);
5836                         break;
5837                 }
5838                 scsi_inquiry(csio,
5839                              /*retries*/4,
5840                              probedone,
5841                              MSG_SIMPLE_Q_TAG,
5842                              (u_int8_t *)inq_buf,
5843                              inquiry_len,
5844                              /*evpd*/FALSE,
5845                              /*page_code*/0,
5846                              SSD_MIN_SIZE,
5847                              /*timeout*/60 * 1000);
5848                 break;
5849         }
5850         case PROBE_MODE_SENSE:
5851         {
5852                 void  *mode_buf;
5853                 int    mode_buf_len;
5854
5855                 mode_buf_len = sizeof(struct scsi_mode_header_6)
5856                              + sizeof(struct scsi_mode_blk_desc)
5857                              + sizeof(struct scsi_control_page);
5858                 mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT);
5859                 if (mode_buf != NULL) {
5860                         scsi_mode_sense(csio,
5861                                         /*retries*/4,
5862                                         probedone,
5863                                         MSG_SIMPLE_Q_TAG,
5864                                         /*dbd*/FALSE,
5865                                         SMS_PAGE_CTRL_CURRENT,
5866                                         SMS_CONTROL_MODE_PAGE,
5867                                         mode_buf,
5868                                         mode_buf_len,
5869                                         SSD_FULL_SIZE,
5870                                         /*timeout*/60000);
5871                         break;
5872                 }
5873                 xpt_print(periph->path, "Unable to mode sense control page - "
5874                     "malloc failure\n");
5875                 softc->action = PROBE_SERIAL_NUM;
5876         }
5877         /* FALLTHROUGH */
5878         case PROBE_SERIAL_NUM:
5879         {
5880                 struct scsi_vpd_unit_serial_number *serial_buf;
5881                 struct cam_ed* device;
5882
5883                 serial_buf = NULL;
5884                 device = periph->path->device;
5885                 device->serial_num = NULL;
5886                 device->serial_num_len = 0;
5887
5888                 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0)
5889                         serial_buf = (struct scsi_vpd_unit_serial_number *)
5890                                 malloc(sizeof(*serial_buf), M_TEMP,
5891                                         M_NOWAIT | M_ZERO);
5892
5893                 if (serial_buf != NULL) {
5894                         scsi_inquiry(csio,
5895                                      /*retries*/4,
5896                                      probedone,
5897                                      MSG_SIMPLE_Q_TAG,
5898                                      (u_int8_t *)serial_buf,
5899                                      sizeof(*serial_buf),
5900                                      /*evpd*/TRUE,
5901                                      SVPD_UNIT_SERIAL_NUMBER,
5902                                      SSD_MIN_SIZE,
5903                                      /*timeout*/60 * 1000);
5904                         break;
5905                 }
5906                 /*
5907                  * We'll have to do without, let our probedone
5908                  * routine finish up for us.
5909                  */
5910                 start_ccb->csio.data_ptr = NULL;
5911                 probedone(periph, start_ccb);
5912                 return;
5913         }
5914         }
5915         xpt_action(start_ccb);
5916 }
5917
5918 static void
5919 proberequestdefaultnegotiation(struct cam_periph *periph)
5920 {
5921         struct ccb_trans_settings cts;
5922
5923         xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5924         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5925         cts.type = CTS_TYPE_USER_SETTINGS;
5926         xpt_action((union ccb *)&cts);
5927         if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5928                 return;
5929         }
5930         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5931         cts.type = CTS_TYPE_CURRENT_SETTINGS;
5932         xpt_action((union ccb *)&cts);
5933 }
5934
5935 /*
5936  * Backoff Negotiation Code- only pertinent for SPI devices.
5937  */
5938 static int
5939 proberequestbackoff(struct cam_periph *periph, struct cam_ed *device)
5940 {
5941         struct ccb_trans_settings cts;
5942         struct ccb_trans_settings_spi *spi;
5943
5944         memset(&cts, 0, sizeof (cts));
5945         xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5946         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5947         cts.type = CTS_TYPE_CURRENT_SETTINGS;
5948         xpt_action((union ccb *)&cts);
5949         if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5950                 if (bootverbose) {
5951                         xpt_print(periph->path,
5952                             "failed to get current device settings\n");
5953                 }
5954                 return (0);
5955         }
5956         if (cts.transport != XPORT_SPI) {
5957                 if (bootverbose) {
5958                         xpt_print(periph->path, "not SPI transport\n");
5959                 }
5960                 return (0);
5961         }
5962         spi = &cts.xport_specific.spi;
5963
5964         /*
5965          * We cannot renegotiate sync rate if we don't have one.
5966          */
5967         if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
5968                 if (bootverbose) {
5969                         xpt_print(periph->path, "no sync rate known\n");
5970                 }
5971                 return (0);
5972         }
5973
5974         /*
5975          * We'll assert that we don't have to touch PPR options- the
5976          * SIM will see what we do with period and offset and adjust
5977          * the PPR options as appropriate.
5978          */
5979
5980         /*
5981          * A sync rate with unknown or zero offset is nonsensical.
5982          * A sync period of zero means Async.
5983          */
5984         if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0
5985          || spi->sync_offset == 0 || spi->sync_period == 0) {
5986                 if (bootverbose) {
5987                         xpt_print(periph->path, "no sync rate available\n");
5988                 }
5989                 return (0);
5990         }
5991
5992         if (device->flags & CAM_DEV_DV_HIT_BOTTOM) {
5993                 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5994                     ("hit async: giving up on DV\n"));
5995                 return (0);
5996         }
5997
5998
5999         /*
6000          * Jump sync_period up by one, but stop at 5MHz and fall back to Async.
6001          * We don't try to remember 'last' settings to see if the SIM actually
6002          * gets into the speed we want to set. We check on the SIM telling
6003          * us that a requested speed is bad, but otherwise don't try and
6004          * check the speed due to the asynchronous and handshake nature
6005          * of speed setting.
6006          */
6007         spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET;
6008         for (;;) {
6009                 spi->sync_period++;
6010                 if (spi->sync_period >= 0xf) {
6011                         spi->sync_period = 0;
6012                         spi->sync_offset = 0;
6013                         CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6014                             ("setting to async for DV\n"));
6015                         /*
6016                          * Once we hit async, we don't want to try
6017                          * any more settings.
6018                          */
6019                         device->flags |= CAM_DEV_DV_HIT_BOTTOM;
6020                 } else if (bootverbose) {
6021                         CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6022                             ("DV: period 0x%x\n", spi->sync_period));
6023                         printf("setting period to 0x%x\n", spi->sync_period);
6024                 }
6025                 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
6026                 cts.type = CTS_TYPE_CURRENT_SETTINGS;
6027                 xpt_action((union ccb *)&cts);
6028                 if ((cts.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6029                         break;
6030                 }
6031                 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6032                     ("DV: failed to set period 0x%x\n", spi->sync_period));
6033                 if (spi->sync_period == 0) {
6034                         return (0);
6035                 }
6036         }
6037         return (1);
6038 }
6039
6040 static void
6041 probedone(struct cam_periph *periph, union ccb *done_ccb)
6042 {
6043         probe_softc *softc;
6044         struct cam_path *path;
6045         u_int32_t  priority;
6046
6047         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
6048
6049         softc = (probe_softc *)periph->softc;
6050         path = done_ccb->ccb_h.path;
6051         priority = done_ccb->ccb_h.pinfo.priority;
6052
6053         switch (softc->action) {
6054         case PROBE_TUR:
6055         {
6056                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6057
6058                         if (cam_periph_error(done_ccb, 0,
6059                                              SF_NO_PRINT, NULL) == ERESTART)
6060                                 return;
6061                         else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
6062                                 /* Don't wedge the queue */
6063                                 xpt_release_devq(done_ccb->ccb_h.path,
6064                                                  /*count*/1,
6065                                                  /*run_queue*/TRUE);
6066                 }
6067                 softc->action = PROBE_INQUIRY;
6068                 xpt_release_ccb(done_ccb);
6069                 xpt_schedule(periph, priority);
6070                 return;
6071         }
6072         case PROBE_INQUIRY:
6073         case PROBE_FULL_INQUIRY:
6074         {
6075                 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6076                         struct scsi_inquiry_data *inq_buf;
6077                         u_int8_t periph_qual;
6078
6079                         path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
6080                         inq_buf = &path->device->inq_data;
6081
6082                         periph_qual = SID_QUAL(inq_buf);
6083                         
6084                         switch(periph_qual) {
6085                         case SID_QUAL_LU_CONNECTED:
6086                         {
6087                                 u_int8_t len;
6088
6089                                 /*
6090                                  * We conservatively request only
6091                                  * SHORT_INQUIRY_LEN bytes of inquiry
6092                                  * information during our first try
6093                                  * at sending an INQUIRY. If the device
6094                                  * has more information to give,
6095                                  * perform a second request specifying
6096                                  * the amount of information the device
6097                                  * is willing to give.
6098                                  */
6099                                 len = inq_buf->additional_length
6100                                     + offsetof(struct scsi_inquiry_data,
6101                                                additional_length) + 1;
6102                                 if (softc->action == PROBE_INQUIRY
6103                                  && len > SHORT_INQUIRY_LENGTH) {
6104                                         softc->action = PROBE_FULL_INQUIRY;
6105                                         xpt_release_ccb(done_ccb);
6106                                         xpt_schedule(periph, priority);
6107                                         return;
6108                                 }
6109
6110                                 xpt_find_quirk(path->device);
6111
6112                                 xpt_devise_transport(path);
6113                                 if (INQ_DATA_TQ_ENABLED(inq_buf))
6114                                         softc->action = PROBE_MODE_SENSE;
6115                                 else
6116                                         softc->action = PROBE_SERIAL_NUM;
6117
6118                                 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
6119
6120                                 xpt_release_ccb(done_ccb);
6121                                 xpt_schedule(periph, priority);
6122                                 return;
6123                         }
6124                         default:
6125                                 break;
6126                         }
6127                 } else if (cam_periph_error(done_ccb, 0,
6128                                             done_ccb->ccb_h.target_lun > 0
6129                                             ? SF_RETRY_UA|SF_QUIET_IR
6130                                             : SF_RETRY_UA,
6131                                             &softc->saved_ccb) == ERESTART) {
6132                         return;
6133                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6134                         /* Don't wedge the queue */
6135                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6136                                          /*run_queue*/TRUE);
6137                 }
6138                 /*
6139                  * If we get to this point, we got an error status back
6140                  * from the inquiry and the error status doesn't require
6141                  * automatically retrying the command.  Therefore, the
6142                  * inquiry failed.  If we had inquiry information before
6143                  * for this device, but this latest inquiry command failed,
6144                  * the device has probably gone away.  If this device isn't
6145                  * already marked unconfigured, notify the peripheral
6146                  * drivers that this device is no more.
6147                  */
6148                 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
6149                         /* Send the async notification. */
6150                         xpt_async(AC_LOST_DEVICE, path, NULL);
6151
6152                 xpt_release_ccb(done_ccb);
6153                 break;
6154         }
6155         case PROBE_MODE_SENSE:
6156         {
6157                 struct ccb_scsiio *csio;
6158                 struct scsi_mode_header_6 *mode_hdr;
6159
6160                 csio = &done_ccb->csio;
6161                 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
6162                 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6163                         struct scsi_control_page *page;
6164                         u_int8_t *offset;
6165
6166                         offset = ((u_int8_t *)&mode_hdr[1])
6167                             + mode_hdr->blk_desc_len;
6168                         page = (struct scsi_control_page *)offset;
6169                         path->device->queue_flags = page->queue_flags;
6170                 } else if (cam_periph_error(done_ccb, 0,
6171                                             SF_RETRY_UA|SF_NO_PRINT,
6172                                             &softc->saved_ccb) == ERESTART) {
6173                         return;
6174                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6175                         /* Don't wedge the queue */
6176                         xpt_release_devq(done_ccb->ccb_h.path,
6177                                          /*count*/1, /*run_queue*/TRUE);
6178                 }
6179                 xpt_release_ccb(done_ccb);
6180                 free(mode_hdr, M_TEMP);
6181                 softc->action = PROBE_SERIAL_NUM;
6182                 xpt_schedule(periph, priority);
6183                 return;
6184         }
6185         case PROBE_SERIAL_NUM:
6186         {
6187                 struct ccb_scsiio *csio;
6188                 struct scsi_vpd_unit_serial_number *serial_buf;
6189                 u_int32_t  priority;
6190                 int changed;
6191                 int have_serialnum;
6192
6193                 changed = 1;
6194                 have_serialnum = 0;
6195                 csio = &done_ccb->csio;
6196                 priority = done_ccb->ccb_h.pinfo.priority;
6197                 serial_buf =
6198                     (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
6199
6200                 /* Clean up from previous instance of this device */
6201                 if (path->device->serial_num != NULL) {
6202                         free(path->device->serial_num, M_CAMXPT);
6203                         path->device->serial_num = NULL;
6204                         path->device->serial_num_len = 0;
6205                 }
6206
6207                 if (serial_buf == NULL) {
6208                         /*
6209                          * Don't process the command as it was never sent
6210                          */
6211                 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
6212                         && (serial_buf->length > 0)) {
6213
6214                         have_serialnum = 1;
6215                         path->device->serial_num =
6216                                 (u_int8_t *)malloc((serial_buf->length + 1),
6217                                                    M_CAMXPT, M_NOWAIT);
6218                         if (path->device->serial_num != NULL) {
6219                                 bcopy(serial_buf->serial_num,
6220                                       path->device->serial_num,
6221                                       serial_buf->length);
6222                                 path->device->serial_num_len =
6223                                     serial_buf->length;
6224                                 path->device->serial_num[serial_buf->length]
6225                                     = '\0';
6226                         }
6227                 } else if (cam_periph_error(done_ccb, 0,
6228                                             SF_RETRY_UA|SF_NO_PRINT,
6229                                             &softc->saved_ccb) == ERESTART) {
6230                         return;
6231                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6232                         /* Don't wedge the queue */
6233                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6234                                          /*run_queue*/TRUE);
6235                 }
6236                 
6237                 /*
6238                  * Let's see if we have seen this device before.
6239                  */
6240                 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
6241                         MD5_CTX context;
6242                         u_int8_t digest[16];
6243
6244                         MD5Init(&context);
6245                         
6246                         MD5Update(&context,
6247                                   (unsigned char *)&path->device->inq_data,
6248                                   sizeof(struct scsi_inquiry_data));
6249
6250                         if (have_serialnum)
6251                                 MD5Update(&context, serial_buf->serial_num,
6252                                           serial_buf->length);
6253
6254                         MD5Final(digest, &context);
6255                         if (bcmp(softc->digest, digest, 16) == 0)
6256                                 changed = 0;
6257
6258                         /*
6259                          * XXX Do we need to do a TUR in order to ensure
6260                          *     that the device really hasn't changed???
6261                          */
6262                         if ((changed != 0)
6263                          && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
6264                                 xpt_async(AC_LOST_DEVICE, path, NULL);
6265                 }
6266                 if (serial_buf != NULL)
6267                         free(serial_buf, M_TEMP);
6268
6269                 if (changed != 0) {
6270                         /*
6271                          * Now that we have all the necessary
6272                          * information to safely perform transfer
6273                          * negotiations... Controllers don't perform
6274                          * any negotiation or tagged queuing until
6275                          * after the first XPT_SET_TRAN_SETTINGS ccb is
6276                          * received.  So, on a new device, just retrieve
6277                          * the user settings, and set them as the current
6278                          * settings to set the device up.
6279                          */
6280                         proberequestdefaultnegotiation(periph);
6281                         xpt_release_ccb(done_ccb);
6282
6283                         /*
6284                          * Perform a TUR to allow the controller to
6285                          * perform any necessary transfer negotiation.
6286                          */
6287                         softc->action = PROBE_TUR_FOR_NEGOTIATION;
6288                         xpt_schedule(periph, priority);
6289                         return;
6290                 }
6291                 xpt_release_ccb(done_ccb);
6292                 break;
6293         }
6294         case PROBE_TUR_FOR_NEGOTIATION:
6295         case PROBE_DV_EXIT:
6296                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6297                         /* Don't wedge the queue */
6298                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6299                                          /*run_queue*/TRUE);
6300                 }
6301                 /*
6302                  * Do Domain Validation for lun 0 on devices that claim
6303                  * to support Synchronous Transfer modes.
6304                  */
6305                 if (softc->action == PROBE_TUR_FOR_NEGOTIATION
6306                  && done_ccb->ccb_h.target_lun == 0
6307                  && (path->device->inq_data.flags & SID_Sync) != 0
6308                  && (path->device->flags & CAM_DEV_IN_DV) == 0) {
6309                         CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6310                             ("Begin Domain Validation\n"));
6311                         path->device->flags |= CAM_DEV_IN_DV;
6312                         xpt_release_ccb(done_ccb);
6313                         softc->action = PROBE_INQUIRY_BASIC_DV1;
6314                         xpt_schedule(periph, priority);
6315                         return;
6316                 }
6317                 if (softc->action == PROBE_DV_EXIT) {
6318                         CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6319                             ("Leave Domain Validation\n"));
6320                 }
6321                 path->device->flags &=
6322                     ~(CAM_DEV_UNCONFIGURED|CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
6323                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
6324                         /* Inform the XPT that a new device has been found */
6325                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
6326                         xpt_action(done_ccb);
6327                         xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
6328                                   done_ccb);
6329                 }
6330                 xpt_release_ccb(done_ccb);
6331                 break;
6332         case PROBE_INQUIRY_BASIC_DV1:
6333         case PROBE_INQUIRY_BASIC_DV2:
6334         {
6335                 struct scsi_inquiry_data *nbuf;
6336                 struct ccb_scsiio *csio;
6337
6338                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6339                         /* Don't wedge the queue */
6340                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6341                                          /*run_queue*/TRUE);
6342                 }
6343                 csio = &done_ccb->csio;
6344                 nbuf = (struct scsi_inquiry_data *)csio->data_ptr;
6345                 if (bcmp(nbuf, &path->device->inq_data, SHORT_INQUIRY_LENGTH)) {
6346                         xpt_print(path,
6347                             "inquiry data fails comparison at DV%d step\n",
6348                             softc->action == PROBE_INQUIRY_BASIC_DV1? 1 : 2);
6349                         if (proberequestbackoff(periph, path->device)) {
6350                                 path->device->flags &= ~CAM_DEV_IN_DV;
6351                                 softc->action = PROBE_TUR_FOR_NEGOTIATION;
6352                         } else {
6353                                 /* give up */
6354                                 softc->action = PROBE_DV_EXIT;
6355                         }
6356                         free(nbuf, M_TEMP);
6357                         xpt_release_ccb(done_ccb);
6358                         xpt_schedule(periph, priority);
6359                         return;
6360                 }
6361                 free(nbuf, M_TEMP);
6362                 if (softc->action == PROBE_INQUIRY_BASIC_DV1) {
6363                         softc->action = PROBE_INQUIRY_BASIC_DV2;
6364                         xpt_release_ccb(done_ccb);
6365                         xpt_schedule(periph, priority);
6366                         return;
6367                 }
6368                 if (softc->action == PROBE_DV_EXIT) {
6369                         CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6370                             ("Leave Domain Validation Successfully\n"));
6371                 }
6372                 path->device->flags &=
6373                     ~(CAM_DEV_UNCONFIGURED|CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
6374                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
6375                         /* Inform the XPT that a new device has been found */
6376                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
6377                         xpt_action(done_ccb);
6378                         xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
6379                                   done_ccb);
6380                 }
6381                 xpt_release_ccb(done_ccb);
6382                 break;
6383         }
6384         }
6385         done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
6386         TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
6387         done_ccb->ccb_h.status = CAM_REQ_CMP;
6388         xpt_done(done_ccb);
6389         if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
6390                 cam_periph_invalidate(periph);
6391                 cam_periph_release(periph);
6392         } else {
6393                 probeschedule(periph);
6394         }
6395 }
6396
6397 static void
6398 probecleanup(struct cam_periph *periph)
6399 {
6400         free(periph->softc, M_TEMP);
6401 }
6402
6403 static void
6404 xpt_find_quirk(struct cam_ed *device)
6405 {
6406         caddr_t match;
6407
6408         match = cam_quirkmatch((caddr_t)&device->inq_data,
6409                                (caddr_t)xpt_quirk_table,
6410                                sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
6411                                sizeof(*xpt_quirk_table), scsi_inquiry_match);
6412
6413         if (match == NULL)
6414                 panic("xpt_find_quirk: device didn't match wildcard entry!!");
6415
6416         device->quirk = (struct xpt_quirk_entry *)match;
6417 }
6418
6419 static int
6420 sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS)
6421 {
6422         int error, bool;
6423
6424         bool = cam_srch_hi;
6425         error = sysctl_handle_int(oidp, &bool, sizeof(bool), req);
6426         if (error != 0 || req->newptr == NULL)
6427                 return (error);
6428         if (bool == 0 || bool == 1) {
6429                 cam_srch_hi = bool;
6430                 return (0);
6431         } else {
6432                 return (EINVAL);
6433         }
6434 }
6435
6436
6437 static void
6438 xpt_devise_transport(struct cam_path *path)
6439 {
6440         struct ccb_pathinq cpi;
6441         struct ccb_trans_settings cts;
6442         struct scsi_inquiry_data *inq_buf;
6443
6444         /* Get transport information from the SIM */
6445         xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
6446         cpi.ccb_h.func_code = XPT_PATH_INQ;
6447         xpt_action((union ccb *)&cpi);
6448
6449         inq_buf = NULL;
6450         if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
6451                 inq_buf = &path->device->inq_data;
6452         path->device->protocol = PROTO_SCSI;
6453         path->device->protocol_version =
6454             inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
6455         path->device->transport = cpi.transport;
6456         path->device->transport_version = cpi.transport_version;
6457
6458         /*
6459          * Any device not using SPI3 features should
6460          * be considered SPI2 or lower.
6461          */
6462         if (inq_buf != NULL) {
6463                 if (path->device->transport == XPORT_SPI
6464                  && (inq_buf->spi3data & SID_SPI_MASK) == 0
6465                  && path->device->transport_version > 2)
6466                         path->device->transport_version = 2;
6467         } else {
6468                 struct cam_ed* otherdev;
6469
6470                 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
6471                      otherdev != NULL;
6472                      otherdev = TAILQ_NEXT(otherdev, links)) {
6473                         if (otherdev != path->device)
6474                                 break;
6475                 }
6476                     
6477                 if (otherdev != NULL) {
6478                         /*
6479                          * Initially assume the same versioning as
6480                          * prior luns for this target.
6481                          */
6482                         path->device->protocol_version =
6483                             otherdev->protocol_version;
6484                         path->device->transport_version =
6485                             otherdev->transport_version;
6486                 } else {
6487                         /* Until we know better, opt for safty */
6488                         path->device->protocol_version = 2;
6489                         if (path->device->transport == XPORT_SPI)
6490                                 path->device->transport_version = 2;
6491                         else
6492                                 path->device->transport_version = 0;
6493                 }
6494         }
6495
6496         /*
6497          * XXX
6498          * For a device compliant with SPC-2 we should be able
6499          * to determine the transport version supported by
6500          * scrutinizing the version descriptors in the
6501          * inquiry buffer.
6502          */
6503
6504         /* Tell the controller what we think */
6505         xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
6506         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
6507         cts.type = CTS_TYPE_CURRENT_SETTINGS;
6508         cts.transport = path->device->transport;
6509         cts.transport_version = path->device->transport_version;
6510         cts.protocol = path->device->protocol;
6511         cts.protocol_version = path->device->protocol_version;
6512         cts.proto_specific.valid = 0;
6513         cts.xport_specific.valid = 0;
6514         xpt_action((union ccb *)&cts);
6515 }
6516
6517 static void
6518 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6519                           int async_update)
6520 {
6521         struct  ccb_pathinq cpi;
6522         struct  ccb_trans_settings cur_cts;
6523         struct  ccb_trans_settings_scsi *scsi;
6524         struct  ccb_trans_settings_scsi *cur_scsi;
6525         struct  cam_sim *sim;
6526         struct  scsi_inquiry_data *inq_data;
6527
6528         if (device == NULL) {
6529                 cts->ccb_h.status = CAM_PATH_INVALID;
6530                 xpt_done((union ccb *)cts);
6531                 return;
6532         }
6533
6534         if (cts->protocol == PROTO_UNKNOWN
6535          || cts->protocol == PROTO_UNSPECIFIED) {
6536                 cts->protocol = device->protocol;
6537                 cts->protocol_version = device->protocol_version;
6538         }
6539
6540         if (cts->protocol_version == PROTO_VERSION_UNKNOWN
6541          || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
6542                 cts->protocol_version = device->protocol_version;
6543
6544         if (cts->protocol != device->protocol) {
6545                 xpt_print(cts->ccb_h.path, "Uninitialized Protocol %x:%x?\n",
6546                        cts->protocol, device->protocol);
6547                 cts->protocol = device->protocol;
6548         }
6549
6550         if (cts->protocol_version > device->protocol_version) {
6551                 if (bootverbose) {
6552                         xpt_print(cts->ccb_h.path, "Down reving Protocol "
6553                             "Version from %d to %d?\n", cts->protocol_version,
6554                             device->protocol_version);
6555                 }
6556                 cts->protocol_version = device->protocol_version;
6557         }
6558
6559         if (cts->transport == XPORT_UNKNOWN
6560          || cts->transport == XPORT_UNSPECIFIED) {
6561                 cts->transport = device->transport;
6562                 cts->transport_version = device->transport_version;
6563         }
6564
6565         if (cts->transport_version == XPORT_VERSION_UNKNOWN
6566          || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
6567                 cts->transport_version = device->transport_version;
6568
6569         if (cts->transport != device->transport) {
6570                 xpt_print(cts->ccb_h.path, "Uninitialized Transport %x:%x?\n",
6571                     cts->transport, device->transport);
6572                 cts->transport = device->transport;
6573         }
6574
6575         if (cts->transport_version > device->transport_version) {
6576                 if (bootverbose) {
6577                         xpt_print(cts->ccb_h.path, "Down reving Transport "
6578                             "Version from %d to %d?\n", cts->transport_version,
6579                             device->transport_version);
6580                 }
6581                 cts->transport_version = device->transport_version;
6582         }
6583
6584         sim = cts->ccb_h.path->bus->sim;
6585
6586         /*
6587          * Nothing more of interest to do unless
6588          * this is a device connected via the
6589          * SCSI protocol.
6590          */
6591         if (cts->protocol != PROTO_SCSI) {
6592                 if (async_update == FALSE) 
6593                         (*(sim->sim_action))(sim, (union ccb *)cts);
6594                 return;
6595         }
6596
6597         inq_data = &device->inq_data;
6598         scsi = &cts->proto_specific.scsi;
6599         xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6600         cpi.ccb_h.func_code = XPT_PATH_INQ;
6601         xpt_action((union ccb *)&cpi);
6602
6603         /* SCSI specific sanity checking */
6604         if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6605          || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
6606          || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6607          || (device->quirk->mintags == 0)) {
6608                 /*
6609                  * Can't tag on hardware that doesn't support tags,
6610                  * doesn't have it enabled, or has broken tag support.
6611                  */
6612                 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6613         }
6614
6615         if (async_update == FALSE) {
6616                 /*
6617                  * Perform sanity checking against what the
6618                  * controller and device can do.
6619                  */
6620                 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6621                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6622                 cur_cts.type = cts->type;
6623                 xpt_action((union ccb *)&cur_cts);
6624                 if ((cur_cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6625                         return;
6626                 }
6627                 cur_scsi = &cur_cts.proto_specific.scsi;
6628                 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
6629                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6630                         scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
6631                 }
6632                 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
6633                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6634         }
6635
6636         /* SPI specific sanity checking */
6637         if (cts->transport == XPORT_SPI && async_update == FALSE) {
6638                 u_int spi3caps;
6639                 struct ccb_trans_settings_spi *spi;
6640                 struct ccb_trans_settings_spi *cur_spi;
6641
6642                 spi = &cts->xport_specific.spi;
6643
6644                 cur_spi = &cur_cts.xport_specific.spi;
6645
6646                 /* Fill in any gaps in what the user gave us */
6647                 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6648                         spi->sync_period = cur_spi->sync_period;
6649                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6650                         spi->sync_period = 0;
6651                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6652                         spi->sync_offset = cur_spi->sync_offset;
6653                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6654                         spi->sync_offset = 0;
6655                 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6656                         spi->ppr_options = cur_spi->ppr_options;
6657                 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6658                         spi->ppr_options = 0;
6659                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6660                         spi->bus_width = cur_spi->bus_width;
6661                 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6662                         spi->bus_width = 0;
6663                 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
6664                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6665                         spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
6666                 }
6667                 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
6668                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6669                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6670                   && (inq_data->flags & SID_Sync) == 0
6671                   && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6672                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6673                  || (spi->sync_offset == 0)
6674                  || (spi->sync_period == 0)) {
6675                         /* Force async */
6676                         spi->sync_period = 0;
6677                         spi->sync_offset = 0;
6678                 }
6679
6680                 switch (spi->bus_width) {
6681                 case MSG_EXT_WDTR_BUS_32_BIT:
6682                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6683                           || (inq_data->flags & SID_WBus32) != 0
6684                           || cts->type == CTS_TYPE_USER_SETTINGS)
6685                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6686                                 break;
6687                         /* Fall Through to 16-bit */
6688                 case MSG_EXT_WDTR_BUS_16_BIT:
6689                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6690                           || (inq_data->flags & SID_WBus16) != 0
6691                           || cts->type == CTS_TYPE_USER_SETTINGS)
6692                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6693                                 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6694                                 break;
6695                         }
6696                         /* Fall Through to 8-bit */
6697                 default: /* New bus width?? */
6698                 case MSG_EXT_WDTR_BUS_8_BIT:
6699                         /* All targets can do this */
6700                         spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6701                         break;
6702                 }
6703
6704                 spi3caps = cpi.xport_specific.spi.ppr_options;
6705                 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6706                  && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6707                         spi3caps &= inq_data->spi3data;
6708
6709                 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
6710                         spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
6711
6712                 if ((spi3caps & SID_SPI_IUS) == 0)
6713                         spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
6714
6715                 if ((spi3caps & SID_SPI_QAS) == 0)
6716                         spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
6717
6718                 /* No SPI Transfer settings are allowed unless we are wide */
6719                 if (spi->bus_width == 0)
6720                         spi->ppr_options = 0;
6721
6722                 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
6723                         /*
6724                          * Can't tag queue without disconnection.
6725                          */
6726                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6727                         scsi->valid |= CTS_SCSI_VALID_TQ;
6728                 }
6729
6730                 /*
6731                  * If we are currently performing tagged transactions to
6732                  * this device and want to change its negotiation parameters,
6733                  * go non-tagged for a bit to give the controller a chance to
6734                  * negotiate unhampered by tag messages.
6735                  */
6736                 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6737                  && (device->inq_flags & SID_CmdQue) != 0
6738                  && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6739                  && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
6740                                    CTS_SPI_VALID_SYNC_OFFSET|
6741                                    CTS_SPI_VALID_BUS_WIDTH)) != 0)
6742                         xpt_toggle_tags(cts->ccb_h.path);
6743         }
6744
6745         if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6746          && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
6747                 int device_tagenb;
6748
6749                 /*
6750                  * If we are transitioning from tags to no-tags or
6751                  * vice-versa, we need to carefully freeze and restart
6752                  * the queue so that we don't overlap tagged and non-tagged
6753                  * commands.  We also temporarily stop tags if there is
6754                  * a change in transfer negotiation settings to allow
6755                  * "tag-less" negotiation.
6756                  */
6757                 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6758                  || (device->inq_flags & SID_CmdQue) != 0)
6759                         device_tagenb = TRUE;
6760                 else
6761                         device_tagenb = FALSE;
6762
6763                 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6764                   && device_tagenb == FALSE)
6765                  || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
6766                   && device_tagenb == TRUE)) {
6767
6768                         if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
6769                                 /*
6770                                  * Delay change to use tags until after a
6771                                  * few commands have gone to this device so
6772                                  * the controller has time to perform transfer
6773                                  * negotiations without tagged messages getting
6774                                  * in the way.
6775                                  */
6776                                 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6777                                 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6778                         } else {
6779                                 struct ccb_relsim crs;
6780
6781                                 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6782                                 device->inq_flags &= ~SID_CmdQue;
6783                                 xpt_dev_ccbq_resize(cts->ccb_h.path,
6784                                                     sim->max_dev_openings);
6785                                 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6786                                 device->tag_delay_count = 0;
6787
6788                                 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6789                                               /*priority*/1);
6790                                 crs.ccb_h.func_code = XPT_REL_SIMQ;
6791                                 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6792                                 crs.openings
6793                                     = crs.release_timeout 
6794                                     = crs.qfrozen_cnt
6795                                     = 0;
6796                                 xpt_action((union ccb *)&crs);
6797                         }
6798                 }
6799         }
6800         if (async_update == FALSE) 
6801                 (*(sim->sim_action))(sim, (union ccb *)cts);
6802 }
6803
6804
6805 static void
6806 xpt_toggle_tags(struct cam_path *path)
6807 {
6808         struct cam_ed *dev;
6809
6810         /*
6811          * Give controllers a chance to renegotiate
6812          * before starting tag operations.  We
6813          * "toggle" tagged queuing off then on
6814          * which causes the tag enable command delay
6815          * counter to come into effect.
6816          */
6817         dev = path->device;
6818         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6819          || ((dev->inq_flags & SID_CmdQue) != 0
6820           && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
6821                 struct ccb_trans_settings cts;
6822
6823                 xpt_setup_ccb(&cts.ccb_h, path, 1);
6824                 cts.protocol = PROTO_SCSI;
6825                 cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
6826                 cts.transport = XPORT_UNSPECIFIED;
6827                 cts.transport_version = XPORT_VERSION_UNSPECIFIED;
6828                 cts.proto_specific.scsi.flags = 0;
6829                 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
6830                 xpt_set_transfer_settings(&cts, path->device,
6831                                           /*async_update*/TRUE);
6832                 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
6833                 xpt_set_transfer_settings(&cts, path->device,
6834                                           /*async_update*/TRUE);
6835         }
6836 }
6837
6838 static void
6839 xpt_start_tags(struct cam_path *path)
6840 {
6841         struct ccb_relsim crs;
6842         struct cam_ed *device;
6843         struct cam_sim *sim;
6844         int    newopenings;
6845
6846         device = path->device;
6847         sim = path->bus->sim;
6848         device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6849         xpt_freeze_devq(path, /*count*/1);
6850         device->inq_flags |= SID_CmdQue;
6851         if (device->tag_saved_openings != 0)
6852                 newopenings = device->tag_saved_openings;
6853         else
6854                 newopenings = min(device->quirk->maxtags,
6855                                   sim->max_tagged_dev_openings);
6856         xpt_dev_ccbq_resize(path, newopenings);
6857         xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
6858         crs.ccb_h.func_code = XPT_REL_SIMQ;
6859         crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6860         crs.openings
6861             = crs.release_timeout 
6862             = crs.qfrozen_cnt
6863             = 0;
6864         xpt_action((union ccb *)&crs);
6865 }
6866
6867 static int busses_to_config;
6868 static int busses_to_reset;
6869
6870 static int
6871 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
6872 {
6873         if (bus->path_id != CAM_XPT_PATH_ID) {
6874                 struct cam_path path;
6875                 struct ccb_pathinq cpi;
6876                 int can_negotiate;
6877
6878                 busses_to_config++;
6879                 xpt_compile_path(&path, NULL, bus->path_id,
6880                                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
6881                 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
6882                 cpi.ccb_h.func_code = XPT_PATH_INQ;
6883                 xpt_action((union ccb *)&cpi);
6884                 can_negotiate = cpi.hba_inquiry;
6885                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6886                 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
6887                  && can_negotiate)
6888                         busses_to_reset++;
6889                 xpt_release_path(&path);
6890         }
6891
6892         return(1);
6893 }
6894
6895 static int
6896 xptconfigfunc(struct cam_eb *bus, void *arg)
6897 {
6898         struct  cam_path *path;
6899         union   ccb *work_ccb;
6900
6901         if (bus->path_id != CAM_XPT_PATH_ID) {
6902                 cam_status status;
6903                 int can_negotiate;
6904
6905                 work_ccb = xpt_alloc_ccb();
6906                 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
6907                                               CAM_TARGET_WILDCARD,
6908                                               CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
6909                         printf("xptconfigfunc: xpt_create_path failed with "
6910                                "status %#x for bus %d\n", status, bus->path_id);
6911                         printf("xptconfigfunc: halting bus configuration\n");
6912                         xpt_free_ccb(work_ccb);
6913                         busses_to_config--;
6914                         xpt_finishconfig(xpt_periph, NULL);
6915                         return(0);
6916                 }
6917                 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6918                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
6919                 xpt_action(work_ccb);
6920                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
6921                         printf("xptconfigfunc: CPI failed on bus %d "
6922                                "with status %d\n", bus->path_id,
6923                                work_ccb->ccb_h.status);
6924                         xpt_finishconfig(xpt_periph, work_ccb);
6925                         return(1);
6926                 }
6927
6928                 can_negotiate = work_ccb->cpi.hba_inquiry;
6929                 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6930                 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
6931                  && (can_negotiate != 0)) {
6932                         xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6933                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6934                         work_ccb->ccb_h.cbfcnp = NULL;
6935                         CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
6936                                   ("Resetting Bus\n"));
6937                         xpt_action(work_ccb);
6938                         xpt_finishconfig(xpt_periph, work_ccb);
6939                 } else {
6940                         /* Act as though we performed a successful BUS RESET */
6941                         work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6942                         xpt_finishconfig(xpt_periph, work_ccb);
6943                 }
6944         }
6945
6946         return(1);
6947 }
6948
6949 static void
6950 xpt_config(void *arg)
6951 {
6952         /*
6953          * Now that interrupts are enabled, go find our devices
6954          */
6955
6956 #ifdef CAMDEBUG
6957         /* Setup debugging flags and path */
6958 #ifdef CAM_DEBUG_FLAGS
6959         cam_dflags = CAM_DEBUG_FLAGS;
6960 #else /* !CAM_DEBUG_FLAGS */
6961         cam_dflags = CAM_DEBUG_NONE;
6962 #endif /* CAM_DEBUG_FLAGS */
6963 #ifdef CAM_DEBUG_BUS
6964         if (cam_dflags != CAM_DEBUG_NONE) {
6965                 if (xpt_create_path(&cam_dpath, xpt_periph,
6966                                     CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
6967                                     CAM_DEBUG_LUN) != CAM_REQ_CMP) {
6968                         printf("xpt_config: xpt_create_path() failed for debug"
6969                                " target %d:%d:%d, debugging disabled\n",
6970                                CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
6971                         cam_dflags = CAM_DEBUG_NONE;
6972                 }
6973         } else
6974                 cam_dpath = NULL;
6975 #else /* !CAM_DEBUG_BUS */
6976         cam_dpath = NULL;
6977 #endif /* CAM_DEBUG_BUS */
6978 #endif /* CAMDEBUG */
6979
6980         /*
6981          * Scan all installed busses.
6982          */
6983         xpt_for_all_busses(xptconfigbuscountfunc, NULL);
6984
6985         if (busses_to_config == 0) {
6986                 /* Call manually because we don't have any busses */
6987                 xpt_finishconfig(xpt_periph, NULL);
6988         } else  {
6989                 if (busses_to_reset > 0 && scsi_delay >= 2000) {
6990                         printf("Waiting %d seconds for SCSI "
6991                                "devices to settle\n", scsi_delay/1000);
6992                 }
6993                 xpt_for_all_busses(xptconfigfunc, NULL);
6994         }
6995 }
6996
6997 /*
6998  * If the given device only has one peripheral attached to it, and if that
6999  * peripheral is the passthrough driver, announce it.  This insures that the
7000  * user sees some sort of announcement for every peripheral in their system.
7001  */
7002 static int
7003 xptpassannouncefunc(struct cam_ed *device, void *arg)
7004 {
7005         struct cam_periph *periph;
7006         int i;
7007
7008         for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
7009              periph = SLIST_NEXT(periph, periph_links), i++);
7010
7011         periph = SLIST_FIRST(&device->periphs);
7012         if ((i == 1)
7013          && (strncmp(periph->periph_name, "pass", 4) == 0))
7014                 xpt_announce_periph(periph, NULL);
7015
7016         return(1);
7017 }
7018
7019 static void
7020 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
7021 {
7022         struct  periph_driver **p_drv;
7023         int     i;
7024
7025         if (done_ccb != NULL) {
7026                 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
7027                           ("xpt_finishconfig\n"));
7028                 switch(done_ccb->ccb_h.func_code) {
7029                 case XPT_RESET_BUS:
7030                         if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
7031                                 done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
7032                                 done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
7033                                 done_ccb->crcn.flags = 0;
7034                                 xpt_action(done_ccb);
7035                                 return;
7036                         }
7037                         /* FALLTHROUGH */
7038                 case XPT_SCAN_BUS:
7039                 default:
7040                         xpt_free_path(done_ccb->ccb_h.path);
7041                         busses_to_config--;
7042                         break;
7043                 }
7044         }
7045
7046         if (busses_to_config == 0) {
7047                 /* Register all the peripheral drivers */
7048                 /* XXX This will have to change when we have loadable modules */
7049                 p_drv = periph_drivers;
7050                 for (i = 0; p_drv[i] != NULL; i++) {
7051                         (*p_drv[i]->init)();
7052                 }
7053
7054                 /*
7055                  * Check for devices with no "standard" peripheral driver
7056                  * attached.  For any devices like that, announce the
7057                  * passthrough driver so the user will see something.
7058                  */
7059                 xpt_for_all_devices(xptpassannouncefunc, NULL);
7060
7061                 /* Release our hook so that the boot can continue. */
7062                 config_intrhook_disestablish(xpt_config_hook);
7063                 free(xpt_config_hook, M_TEMP);
7064                 xpt_config_hook = NULL;
7065         }
7066         if (done_ccb != NULL)
7067                 xpt_free_ccb(done_ccb);
7068 }
7069
7070 static void
7071 xptaction(struct cam_sim *sim, union ccb *work_ccb)
7072 {
7073         CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
7074
7075         switch (work_ccb->ccb_h.func_code) {
7076         /* Common cases first */
7077         case XPT_PATH_INQ:              /* Path routing inquiry */
7078         {
7079                 struct ccb_pathinq *cpi;
7080
7081                 cpi = &work_ccb->cpi;
7082                 cpi->version_num = 1; /* XXX??? */
7083                 cpi->hba_inquiry = 0;
7084                 cpi->target_sprt = 0;
7085                 cpi->hba_misc = 0;
7086                 cpi->hba_eng_cnt = 0;
7087                 cpi->max_target = 0;
7088                 cpi->max_lun = 0;
7089                 cpi->initiator_id = 0;
7090                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
7091                 strncpy(cpi->hba_vid, "", HBA_IDLEN);
7092                 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
7093                 cpi->unit_number = sim->unit_number;
7094                 cpi->bus_id = sim->bus_id;
7095                 cpi->base_transfer_speed = 0;
7096                 cpi->protocol = PROTO_UNSPECIFIED;
7097                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
7098                 cpi->transport = XPORT_UNSPECIFIED;
7099                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
7100                 cpi->ccb_h.status = CAM_REQ_CMP;
7101                 xpt_done(work_ccb);
7102                 break;
7103         }
7104         default:
7105                 work_ccb->ccb_h.status = CAM_REQ_INVALID;
7106                 xpt_done(work_ccb);
7107                 break;
7108         }
7109 }
7110
7111 /*
7112  * The xpt as a "controller" has no interrupt sources, so polling
7113  * is a no-op.
7114  */
7115 static void
7116 xptpoll(struct cam_sim *sim)
7117 {
7118 }
7119
7120 static void
7121 camisr(void *V_queue)
7122 {
7123         cam_isrq_t *oqueue = V_queue;
7124         cam_isrq_t queue;
7125         int     s;
7126         struct  ccb_hdr *ccb_h;
7127
7128         /*
7129          * Transfer the ccb_bioq list to a temporary list so we can operate
7130          * on it without needing to lock/unlock on every loop.  The concat
7131          * function with re-init the real list for us.
7132          */
7133         s = splcam();
7134         mtx_lock(&cam_bioq_lock);
7135         TAILQ_INIT(&queue);
7136         TAILQ_CONCAT(&queue, oqueue, sim_links.tqe);
7137         mtx_unlock(&cam_bioq_lock);
7138
7139         while ((ccb_h = TAILQ_FIRST(&queue)) != NULL) {
7140                 int     runq;
7141
7142                 TAILQ_REMOVE(&queue, ccb_h, sim_links.tqe);
7143                 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
7144                 splx(s);
7145
7146                 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
7147                           ("camisr\n"));
7148
7149                 runq = FALSE;
7150
7151                 if (ccb_h->flags & CAM_HIGH_POWER) {
7152                         struct highpowerlist    *hphead;
7153                         union ccb               *send_ccb;
7154
7155                         hphead = &highpowerq;
7156
7157                         send_ccb = (union ccb *)STAILQ_FIRST(hphead);
7158
7159                         /*
7160                          * Increment the count since this command is done.
7161                          */
7162                         num_highpower++;
7163
7164                         /* 
7165                          * Any high powered commands queued up?
7166                          */
7167                         if (send_ccb != NULL) {
7168
7169                                 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
7170
7171                                 xpt_release_devq(send_ccb->ccb_h.path,
7172                                                  /*count*/1, /*runqueue*/TRUE);
7173                         }
7174                 }
7175                 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
7176                         struct cam_ed *dev;
7177
7178                         dev = ccb_h->path->device;
7179
7180                         s = splcam();
7181                         cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
7182
7183                         if (!SIM_DEAD(ccb_h->path->bus->sim)) {
7184                                 ccb_h->path->bus->sim->devq->send_active--;
7185                                 ccb_h->path->bus->sim->devq->send_openings++;
7186                         }
7187                         splx(s);
7188                         
7189                         if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
7190                           && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
7191                          || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
7192                           && (dev->ccbq.dev_active == 0))) {
7193                                 
7194                                 xpt_release_devq(ccb_h->path, /*count*/1,
7195                                                  /*run_queue*/TRUE);
7196                         }
7197
7198                         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
7199                          && (--dev->tag_delay_count == 0))
7200                                 xpt_start_tags(ccb_h->path);
7201
7202                         if ((dev->ccbq.queue.entries > 0)
7203                          && (dev->qfrozen_cnt == 0)
7204                          && (device_is_send_queued(dev) == 0)) {
7205                                 runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
7206                                                               dev);
7207                         }
7208                 }
7209
7210                 if (ccb_h->status & CAM_RELEASE_SIMQ) {
7211                         xpt_release_simq(ccb_h->path->bus->sim,
7212                                          /*run_queue*/TRUE);
7213                         ccb_h->status &= ~CAM_RELEASE_SIMQ;
7214                         runq = FALSE;
7215                 } 
7216
7217                 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
7218                  && (ccb_h->status & CAM_DEV_QFRZN)) {
7219                         xpt_release_devq(ccb_h->path, /*count*/1,
7220                                          /*run_queue*/TRUE);
7221                         ccb_h->status &= ~CAM_DEV_QFRZN;
7222                 } else if (runq) {
7223                         xpt_run_dev_sendq(ccb_h->path->bus);
7224                 }
7225
7226                 /* Call the peripheral driver's callback */
7227                 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
7228
7229                 /* Raise IPL for while test */
7230                 s = splcam();
7231         }
7232         splx(s);
7233 }
7234
7235 static void
7236 dead_sim_action(struct cam_sim *sim, union ccb *ccb)
7237 {
7238
7239         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
7240         xpt_done(ccb);
7241 }
7242  
7243 static void
7244 dead_sim_poll(struct cam_sim *sim)
7245 {
7246 }