]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/cam/scsi/scsi_xpt.c
zfs: merge openzfs/zfs@043c6ee3b
[FreeBSD/FreeBSD.git] / sys / cam / scsi / scsi_xpt.c
1 /*-
2  * Implementation of the SCSI Transport
3  *
4  * SPDX-License-Identifier: BSD-2-Clause
5  *
6  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
7  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions, and the following disclaimer,
15  *    without modification, immediately at the beginning of the file.
16  * 2. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31
32 #include <sys/cdefs.h>
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/systm.h>
36 #include <sys/types.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/time.h>
40 #include <sys/conf.h>
41 #include <sys/fcntl.h>
42 #include <sys/md5.h>
43 #include <sys/sbuf.h>
44
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/sysctl.h>
48
49 #include <cam/cam.h>
50 #include <cam/cam_ccb.h>
51 #include <cam/cam_queue.h>
52 #include <cam/cam_periph.h>
53 #include <cam/cam_sim.h>
54 #include <cam/cam_xpt.h>
55 #include <cam/cam_xpt_sim.h>
56 #include <cam/cam_xpt_periph.h>
57 #include <cam/cam_xpt_internal.h>
58 #include <cam/cam_debug.h>
59
60 #include <cam/scsi/scsi_all.h>
61 #include <cam/scsi/scsi_message.h>
62 #include <cam/scsi/scsi_pass.h>
63 #include <machine/stdarg.h>     /* for xpt_print below */
64 #include "opt_cam.h"
65
66 struct scsi_quirk_entry {
67         struct scsi_inquiry_pattern inq_pat;
68         uint8_t quirks;
69 #define CAM_QUIRK_NOLUNS        0x01
70 #define CAM_QUIRK_NOVPDS        0x02
71 #define CAM_QUIRK_HILUNS        0x04
72 #define CAM_QUIRK_NOHILUNS      0x08
73 #define CAM_QUIRK_NORPTLUNS     0x10
74         u_int mintags;
75         u_int maxtags;
76 };
77 #define SCSI_QUIRK(dev) ((struct scsi_quirk_entry *)((dev)->quirk))
78
79 static int cam_srch_hi = 0;
80 SYSCTL_INT(_kern_cam, OID_AUTO, cam_srch_hi, CTLFLAG_RWTUN,
81     &cam_srch_hi, 0, "Search above LUN 7 for SCSI3 and greater devices");
82
83 #define CAM_SCSI2_MAXLUN        8
84 #define CAM_CAN_GET_SIMPLE_LUN(x, i)                            \
85         ((((x)->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) ==  \
86         RPL_LUNDATA_ATYP_PERIPH) ||                             \
87         (((x)->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) ==   \
88         RPL_LUNDATA_ATYP_FLAT))
89 #define CAM_GET_SIMPLE_LUN(lp, i, lval)                                 \
90         if (((lp)->luns[(i)].lundata[0] & RPL_LUNDATA_ATYP_MASK) ==     \
91             RPL_LUNDATA_ATYP_PERIPH) {                                  \
92                 (lval) = (lp)->luns[(i)].lundata[1];                    \
93         } else {                                                        \
94                 (lval) = (lp)->luns[(i)].lundata[0];                    \
95                 (lval) &= RPL_LUNDATA_FLAT_LUN_MASK;                    \
96                 (lval) <<= 8;                                           \
97                 (lval) |=  (lp)->luns[(i)].lundata[1];                  \
98         }
99 #define CAM_GET_LUN(lp, i, lval)                                        \
100         (lval) = scsi_8btou64((lp)->luns[(i)].lundata);                 \
101         (lval) = CAM_EXTLUN_BYTE_SWIZZLE(lval);
102
103 /*
104  * If we're not quirked to search <= the first 8 luns
105  * and we are either quirked to search above lun 8,
106  * or we're > SCSI-2 and we've enabled hilun searching,
107  * or we're > SCSI-2 and the last lun was a success,
108  * we can look for luns above lun 8.
109  */
110 #define CAN_SRCH_HI_SPARSE(dv)                                  \
111   (((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_NOHILUNS) == 0)         \
112   && ((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_HILUNS)               \
113   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
114
115 #define CAN_SRCH_HI_DENSE(dv)                                   \
116   (((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_NOHILUNS) == 0)         \
117   && ((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_HILUNS)               \
118   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
119
120 static periph_init_t probe_periph_init;
121
122 static struct periph_driver probe_driver =
123 {
124         probe_periph_init, "probe",
125         TAILQ_HEAD_INITIALIZER(probe_driver.units), /* generation */ 0,
126         CAM_PERIPH_DRV_EARLY
127 };
128
129 PERIPHDRIVER_DECLARE(probe, probe_driver);
130
131 typedef enum {
132         PROBE_TUR,
133         PROBE_INQUIRY,  /* this counts as DV0 for Basic Domain Validation */
134         PROBE_FULL_INQUIRY,
135         PROBE_REPORT_LUNS,
136         PROBE_MODE_SENSE,
137         PROBE_SUPPORTED_VPD_LIST,
138         PROBE_DEVICE_ID,
139         PROBE_EXTENDED_INQUIRY,
140         PROBE_SERIAL_NUM,
141         PROBE_TUR_FOR_NEGOTIATION,
142         PROBE_INQUIRY_BASIC_DV1,
143         PROBE_INQUIRY_BASIC_DV2,
144         PROBE_DV_EXIT,
145         PROBE_DONE,
146         PROBE_INVALID
147 } probe_action;
148
149 static char *probe_action_text[] = {
150         "PROBE_TUR",
151         "PROBE_INQUIRY",
152         "PROBE_FULL_INQUIRY",
153         "PROBE_REPORT_LUNS",
154         "PROBE_MODE_SENSE",
155         "PROBE_SUPPORTED_VPD_LIST",
156         "PROBE_DEVICE_ID",
157         "PROBE_EXTENDED_INQUIRY",
158         "PROBE_SERIAL_NUM",
159         "PROBE_TUR_FOR_NEGOTIATION",
160         "PROBE_INQUIRY_BASIC_DV1",
161         "PROBE_INQUIRY_BASIC_DV2",
162         "PROBE_DV_EXIT",
163         "PROBE_DONE",
164         "PROBE_INVALID"
165 };
166
167 #define PROBE_SET_ACTION(softc, newaction)      \
168 do {                                                                    \
169         char **text;                                                    \
170         text = probe_action_text;                                       \
171         CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE,               \
172             ("Probe %s to %s\n", text[(softc)->action],                 \
173             text[(newaction)]));                                        \
174         (softc)->action = (newaction);                                  \
175 } while(0)
176
177 typedef enum {
178         PROBE_INQUIRY_CKSUM     = 0x01,
179         PROBE_NO_ANNOUNCE       = 0x04,
180         PROBE_EXTLUN            = 0x08
181 } probe_flags;
182
183 typedef struct {
184         TAILQ_HEAD(, ccb_hdr) request_ccbs;
185         probe_action    action;
186         probe_flags     flags;
187         MD5_CTX         context;
188         uint8_t digest[16];
189         struct cam_periph *periph;
190 } probe_softc;
191
192 static const char quantum[] = "QUANTUM";
193 static const char sony[] = "SONY";
194 static const char west_digital[] = "WDIGTL";
195 static const char samsung[] = "SAMSUNG";
196 static const char seagate[] = "SEAGATE";
197 static const char microp[] = "MICROP";
198
199 static struct scsi_quirk_entry scsi_quirk_table[] =
200 {
201         {
202                 /* Reports QUEUE FULL for temporary resource shortages */
203                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
204                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
205         },
206         {
207                 /* Reports QUEUE FULL for temporary resource shortages */
208                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
209                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
210         },
211         {
212                 /* Reports QUEUE FULL for temporary resource shortages */
213                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
214                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
215         },
216         {
217                 /* Broken tagged queuing drive */
218                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
219                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
220         },
221         {
222                 /* Broken tagged queuing drive */
223                 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
224                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
225         },
226         {
227                 /* Broken tagged queuing drive */
228                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
229                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
230         },
231         {
232                 /*
233                  * Unfortunately, the Quantum Atlas III has the same
234                  * problem as the Atlas II drives above.
235                  * Reported by: "Johan Granlund" <johan@granlund.nu>
236                  *
237                  * For future reference, the drive with the problem was:
238                  * QUANTUM QM39100TD-SW N1B0
239                  *
240                  * It's possible that Quantum will fix the problem in later
241                  * firmware revisions.  If that happens, the quirk entry
242                  * will need to be made specific to the firmware revisions
243                  * with the problem.
244                  *
245                  */
246                 /* Reports QUEUE FULL for temporary resource shortages */
247                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
248                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
249         },
250         {
251                 /*
252                  * 18 Gig Atlas III, same problem as the 9G version.
253                  * Reported by: Andre Albsmeier
254                  *              <andre.albsmeier@mchp.siemens.de>
255                  *
256                  * For future reference, the drive with the problem was:
257                  * QUANTUM QM318000TD-S N491
258                  */
259                 /* Reports QUEUE FULL for temporary resource shortages */
260                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
261                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
262         },
263         {
264                 /*
265                  * Broken tagged queuing drive
266                  * Reported by: Bret Ford <bford@uop.cs.uop.edu>
267                  *         and: Martin Renters <martin@tdc.on.ca>
268                  */
269                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
270                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
271         },
272                 /*
273                  * The Seagate Medalist Pro drives have very poor write
274                  * performance with anything more than 2 tags.
275                  *
276                  * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
277                  * Drive:  <SEAGATE ST36530N 1444>
278                  *
279                  * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
280                  * Drive:  <SEAGATE ST34520W 1281>
281                  *
282                  * No one has actually reported that the 9G version
283                  * (ST39140*) of the Medalist Pro has the same problem, but
284                  * we're assuming that it does because the 4G and 6.5G
285                  * versions of the drive are broken.
286                  */
287         {
288                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
289                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
290         },
291         {
292                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
293                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
294         },
295         {
296                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
297                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
298         },
299         {
300                 /*
301                  * Experiences command timeouts under load with a
302                  * tag count higher than 55.
303                  */
304                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST3146855LW", "*"},
305                 /*quirks*/0, /*mintags*/2, /*maxtags*/55
306         },
307         {
308                 /*
309                  * Slow when tagged queueing is enabled.  Write performance
310                  * steadily drops off with more and more concurrent
311                  * transactions.  Best sequential write performance with
312                  * tagged queueing turned off and write caching turned on.
313                  *
314                  * PR:  kern/10398
315                  * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
316                  * Drive:  DCAS-34330 w/ "S65A" firmware.
317                  *
318                  * The drive with the problem had the "S65A" firmware
319                  * revision, and has also been reported (by Stephen J.
320                  * Roznowski <sjr@home.net>) for a drive with the "S61A"
321                  * firmware revision.
322                  *
323                  * Although no one has reported problems with the 2 gig
324                  * version of the DCAS drive, the assumption is that it
325                  * has the same problems as the 4 gig version.  Therefore
326                  * this quirk entries disables tagged queueing for all
327                  * DCAS drives.
328                  */
329                 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
330                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
331         },
332         {
333                 /* Broken tagged queuing drive */
334                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
335                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
336         },
337         {
338                 /* Broken tagged queuing drive */
339                 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
340                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
341         },
342         {
343                 /* This does not support other than LUN 0 */
344                 { T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" },
345                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
346         },
347         {
348                 /*
349                  * Broken tagged queuing drive.
350                  * Submitted by:
351                  * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
352                  * in PR kern/9535
353                  */
354                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
355                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
356         },
357         {
358                 /*
359                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
360                  * 8MB/sec.)
361                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
362                  * Best performance with these drives is achieved with
363                  * tagged queueing turned off, and write caching turned on.
364                  */
365                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
366                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
367         },
368         {
369                 /*
370                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
371                  * 8MB/sec.)
372                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
373                  * Best performance with these drives is achieved with
374                  * tagged queueing turned off, and write caching turned on.
375                  */
376                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
377                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
378         },
379         {
380                 /*
381                  * Doesn't handle queue full condition correctly,
382                  * so we need to limit maxtags to what the device
383                  * can handle instead of determining this automatically.
384                  */
385                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
386                 /*quirks*/0, /*mintags*/2, /*maxtags*/32
387         },
388         {
389                 /* Really only one LUN */
390                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
391                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
392         },
393         {
394                 /* I can't believe we need a quirk for DPT volumes. */
395                 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
396                 CAM_QUIRK_NOLUNS,
397                 /*mintags*/0, /*maxtags*/255
398         },
399         {
400                 /*
401                  * Many Sony CDROM drives don't like multi-LUN probing.
402                  */
403                 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
404                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
405         },
406         {
407                 /*
408                  * This drive doesn't like multiple LUN probing.
409                  * Submitted by:  Parag Patel <parag@cgt.com>
410                  */
411                 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
412                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
413         },
414         {
415                 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
416                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
417         },
418         {
419                 /*
420                  * The 8200 doesn't like multi-lun probing, and probably
421                  * don't like serial number requests either.
422                  */
423                 {
424                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
425                         "EXB-8200*", "*"
426                 },
427                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
428         },
429         {
430                 /*
431                  * Let's try the same as above, but for a drive that says
432                  * it's an IPL-6860 but is actually an EXB 8200.
433                  */
434                 {
435                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
436                         "IPL-6860*", "*"
437                 },
438                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
439         },
440         {
441                 /*
442                  * These Hitachi drives don't like multi-lun probing.
443                  * The PR submitter has a DK319H, but says that the Linux
444                  * kernel has a similar work-around for the DK312 and DK314,
445                  * so all DK31* drives are quirked here.
446                  * PR:            misc/18793
447                  * Submitted by:  Paul Haddad <paul@pth.com>
448                  */
449                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
450                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
451         },
452         {
453                 /*
454                  * The Hitachi CJ series with J8A8 firmware apparently has
455                  * problems with tagged commands.
456                  * PR: 23536
457                  * Reported by: amagai@nue.org
458                  */
459                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
460                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
461         },
462         {
463                 /*
464                  * These are the large storage arrays.
465                  * Submitted by:  William Carrel <william.carrel@infospace.com>
466                  */
467                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
468                 CAM_QUIRK_HILUNS, 2, 1024
469         },
470         {
471                 /*
472                  * This old revision of the TDC3600 is also SCSI-1, and
473                  * hangs upon serial number probing.
474                  */
475                 {
476                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
477                         " TDC 3600", "U07:"
478                 },
479                 CAM_QUIRK_NOVPDS, /*mintags*/0, /*maxtags*/0
480         },
481         {
482                 /*
483                  * Would repond to all LUNs if asked for.
484                  */
485                 {
486                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
487                         "CP150", "*"
488                 },
489                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
490         },
491         {
492                 /*
493                  * Would repond to all LUNs if asked for.
494                  */
495                 {
496                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
497                         "96X2*", "*"
498                 },
499                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
500         },
501         {
502                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
503                 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
504                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
505         },
506         {
507                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
508                 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
509                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
510         },
511         {
512                 /* TeraSolutions special settings for TRC-22 RAID */
513                 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
514                   /*quirks*/0, /*mintags*/55, /*maxtags*/255
515         },
516         {
517                 /* Veritas Storage Appliance */
518                 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
519                   CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
520         },
521         {
522                 /*
523                  * Would respond to all LUNs.  Device type and removable
524                  * flag are jumper-selectable.
525                  */
526                 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
527                   "Tahiti 1", "*"
528                 },
529                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
530         },
531         {
532                 /* EasyRAID E5A aka. areca ARC-6010 */
533                 { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" },
534                   CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255
535         },
536         {
537                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" },
538                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
539         },
540         {
541                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "Garmin", "*", "*" },
542                 CAM_QUIRK_NORPTLUNS, /*mintags*/2, /*maxtags*/255
543         },
544         {
545                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic", "STORAGE DEVICE*", "120?" },
546                 CAM_QUIRK_NORPTLUNS, /*mintags*/2, /*maxtags*/255
547         },
548         {
549                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic", "MassStorageClass", "1533" },
550                 CAM_QUIRK_NORPTLUNS, /*mintags*/2, /*maxtags*/255
551         },
552         {
553                 /* Default tagged queuing parameters for all devices */
554                 {
555                   T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
556                   /*vendor*/"*", /*product*/"*", /*revision*/"*"
557                 },
558                 /*quirks*/0, /*mintags*/2, /*maxtags*/255
559         },
560 };
561
562 static cam_status       proberegister(struct cam_periph *periph,
563                                       void *arg);
564 static void      probeschedule(struct cam_periph *probe_periph);
565 static void      probestart(struct cam_periph *periph, union ccb *start_ccb);
566 static void      proberequestdefaultnegotiation(struct cam_periph *periph);
567 static int       proberequestbackoff(struct cam_periph *periph,
568                                      struct cam_ed *device);
569 static void      probedone(struct cam_periph *periph, union ccb *done_ccb);
570 static void      probe_purge_old(struct cam_path *path,
571                                  struct scsi_report_luns_data *new,
572                                  probe_flags flags);
573 static void      probecleanup(struct cam_periph *periph);
574 static void      scsi_find_quirk(struct cam_ed *device);
575 static void      scsi_scan_bus(struct cam_periph *periph, union ccb *ccb);
576 static void      scsi_scan_lun(struct cam_periph *periph,
577                                struct cam_path *path, cam_flags flags,
578                                union ccb *ccb);
579 static void      xptscandone(struct cam_periph *periph, union ccb *done_ccb);
580 static struct cam_ed *
581                  scsi_alloc_device(struct cam_eb *bus, struct cam_et *target,
582                                    lun_id_t lun_id);
583 static void      scsi_devise_transport(struct cam_path *path);
584 static void      scsi_set_transfer_settings(struct ccb_trans_settings *cts,
585                                             struct cam_path *path,
586                                             int async_update);
587 static void      scsi_toggle_tags(struct cam_path *path);
588 static void      scsi_dev_async(uint32_t async_code,
589                                 struct cam_eb *bus,
590                                 struct cam_et *target,
591                                 struct cam_ed *device,
592                                 void *async_arg);
593 static void      scsi_action(union ccb *start_ccb);
594 static void      scsi_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb);
595 static void      scsi_proto_announce_sbuf(struct cam_ed *device,
596                                           struct sbuf *sb);
597 static void      scsi_proto_denounce_sbuf(struct cam_ed *device,
598                                           struct sbuf *sb);
599 static void      scsi_proto_debug_out(union ccb *ccb);
600 static void      _scsi_announce_periph(struct cam_periph *, u_int *, u_int *, struct ccb_trans_settings *);
601
602 static struct xpt_xport_ops scsi_xport_ops = {
603         .alloc_device = scsi_alloc_device,
604         .action = scsi_action,
605         .async = scsi_dev_async,
606         .announce_sbuf = scsi_announce_periph_sbuf,
607 };
608 #define SCSI_XPT_XPORT(x, X)                    \
609 static struct xpt_xport scsi_xport_ ## x = {    \
610         .xport = XPORT_ ## X,                   \
611         .name = #x,                             \
612         .ops = &scsi_xport_ops,                 \
613 };                                              \
614 CAM_XPT_XPORT(scsi_xport_ ## x);
615
616 SCSI_XPT_XPORT(spi, SPI);
617 SCSI_XPT_XPORT(sas, SAS);
618 SCSI_XPT_XPORT(fc, FC);
619 SCSI_XPT_XPORT(usb, USB);
620 SCSI_XPT_XPORT(iscsi, ISCSI);
621 SCSI_XPT_XPORT(srp, SRP);
622 SCSI_XPT_XPORT(ppb, PPB);
623
624 #undef SCSI_XPORT_XPORT
625
626 static struct xpt_proto_ops scsi_proto_ops = {
627         .announce_sbuf = scsi_proto_announce_sbuf,
628         .denounce_sbuf = scsi_proto_denounce_sbuf,
629         .debug_out = scsi_proto_debug_out,
630 };
631 static struct xpt_proto scsi_proto = {
632         .proto = PROTO_SCSI,
633         .name = "scsi",
634         .ops = &scsi_proto_ops,
635 };
636 CAM_XPT_PROTO(scsi_proto);
637
638 static void
639 probe_periph_init(void)
640 {
641 }
642
643 static cam_status
644 proberegister(struct cam_periph *periph, void *arg)
645 {
646         union ccb *request_ccb; /* CCB representing the probe request */
647         probe_softc *softc;
648
649         request_ccb = (union ccb *)arg;
650         if (request_ccb == NULL) {
651                 printf("proberegister: no probe CCB, "
652                        "can't register device\n");
653                 return(CAM_REQ_CMP_ERR);
654         }
655
656         softc = (probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_NOWAIT);
657
658         if (softc == NULL) {
659                 printf("proberegister: Unable to probe new device. "
660                        "Unable to allocate softc\n");
661                 return(CAM_REQ_CMP_ERR);
662         }
663         TAILQ_INIT(&softc->request_ccbs);
664         TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
665                           periph_links.tqe);
666         softc->flags = 0;
667         periph->softc = softc;
668         softc->periph = periph;
669         softc->action = PROBE_INVALID;
670         if (cam_periph_acquire(periph) != 0)
671                 return (CAM_REQ_CMP_ERR);
672
673         CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n"));
674         scsi_devise_transport(periph->path);
675
676         /*
677          * Ensure we've waited at least a bus settle
678          * delay before attempting to probe the device.
679          * For HBAs that don't do bus resets, this won't make a difference.
680          */
681         cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
682                                       scsi_delay);
683         probeschedule(periph);
684         return(CAM_REQ_CMP);
685 }
686
687 static void
688 probeschedule(struct cam_periph *periph)
689 {
690         struct ccb_pathinq cpi;
691         union ccb *ccb;
692         probe_softc *softc;
693
694         softc = (probe_softc *)periph->softc;
695         ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
696
697         xpt_path_inq(&cpi, periph->path);
698
699         /*
700          * If a device has gone away and another device, or the same one,
701          * is back in the same place, it should have a unit attention
702          * condition pending.  It will not report the unit attention in
703          * response to an inquiry, which may leave invalid transfer
704          * negotiations in effect.  The TUR will reveal the unit attention
705          * condition.  Only send the TUR for lun 0, since some devices
706          * will get confused by commands other than inquiry to non-existent
707          * luns.  If you think a device has gone away start your scan from
708          * lun 0.  This will insure that any bogus transfer settings are
709          * invalidated.
710          *
711          * If we haven't seen the device before and the controller supports
712          * some kind of transfer negotiation, negotiate with the first
713          * sent command if no bus reset was performed at startup.  This
714          * ensures that the device is not confused by transfer negotiation
715          * settings left over by loader or BIOS action.
716          */
717         if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
718          && (ccb->ccb_h.target_lun == 0)) {
719                 PROBE_SET_ACTION(softc, PROBE_TUR);
720         } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
721               && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
722                 proberequestdefaultnegotiation(periph);
723                 PROBE_SET_ACTION(softc, PROBE_INQUIRY);
724         } else {
725                 PROBE_SET_ACTION(softc, PROBE_INQUIRY);
726         }
727
728         if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
729                 softc->flags |= PROBE_NO_ANNOUNCE;
730         else
731                 softc->flags &= ~PROBE_NO_ANNOUNCE;
732
733         if (cpi.hba_misc & PIM_EXTLUNS)
734                 softc->flags |= PROBE_EXTLUN;
735         else
736                 softc->flags &= ~PROBE_EXTLUN;
737
738         xpt_schedule(periph, CAM_PRIORITY_XPT);
739 }
740
741 static void
742 probestart(struct cam_periph *periph, union ccb *start_ccb)
743 {
744         /* Probe the device that our peripheral driver points to */
745         struct ccb_scsiio *csio;
746         probe_softc *softc;
747
748         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
749
750         softc = (probe_softc *)periph->softc;
751         csio = &start_ccb->csio;
752 again:
753
754         switch (softc->action) {
755         case PROBE_TUR:
756         case PROBE_TUR_FOR_NEGOTIATION:
757         case PROBE_DV_EXIT:
758         {
759                 scsi_test_unit_ready(csio,
760                                      /*retries*/4,
761                                      probedone,
762                                      MSG_SIMPLE_Q_TAG,
763                                      SSD_FULL_SIZE,
764                                      /*timeout*/60000);
765                 break;
766         }
767         case PROBE_INQUIRY:
768         case PROBE_FULL_INQUIRY:
769         {
770                 u_int inquiry_len;
771                 struct scsi_inquiry_data *inq_buf;
772
773                 inq_buf = &periph->path->device->inq_data;
774
775                 /*
776                  * If the device is currently configured, we calculate an
777                  * MD5 checksum of the inquiry data, and if the serial number
778                  * length is greater than 0, add the serial number data
779                  * into the checksum as well.  Once the inquiry and the
780                  * serial number check finish, we attempt to figure out
781                  * whether we still have the same device.
782                  */
783                 if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) {
784                         softc->flags &= ~PROBE_INQUIRY_CKSUM;
785                 } else if ((softc->flags & PROBE_INQUIRY_CKSUM) == 0) {
786                         MD5Init(&softc->context);
787                         MD5Update(&softc->context, (unsigned char *)inq_buf,
788                                   sizeof(struct scsi_inquiry_data));
789                         if (periph->path->device->serial_num_len > 0) {
790                                 MD5Update(&softc->context,
791                                           periph->path->device->serial_num,
792                                           periph->path->device->serial_num_len);
793                         }
794                         MD5Final(softc->digest, &softc->context);
795                         softc->flags |= PROBE_INQUIRY_CKSUM;
796                 }
797
798                 if (softc->action == PROBE_INQUIRY)
799                         inquiry_len = SHORT_INQUIRY_LENGTH;
800                 else
801                         inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf);
802
803                 /*
804                  * Some parallel SCSI devices fail to send an
805                  * ignore wide residue message when dealing with
806                  * odd length inquiry requests.  Round up to be
807                  * safe.
808                  */
809                 inquiry_len = roundup2(inquiry_len, 2);
810
811                 scsi_inquiry(csio,
812                              /*retries*/4,
813                              probedone,
814                              MSG_SIMPLE_Q_TAG,
815                              (uint8_t *)inq_buf,
816                              inquiry_len,
817                              /*evpd*/FALSE,
818                              /*page_code*/0,
819                              SSD_MIN_SIZE,
820                              /*timeout*/60 * 1000);
821                 break;
822         }
823         case PROBE_REPORT_LUNS:
824         {
825                 void *rp;
826
827                 rp = malloc(periph->path->target->rpl_size,
828                     M_CAMXPT, M_NOWAIT | M_ZERO);
829                 if (rp == NULL) {
830                         struct scsi_inquiry_data *inq_buf;
831                         inq_buf = &periph->path->device->inq_data;
832                         xpt_print(periph->path,
833                             "Unable to alloc report luns storage\n");
834                         if (INQ_DATA_TQ_ENABLED(inq_buf))
835                                 PROBE_SET_ACTION(softc, PROBE_MODE_SENSE);
836                         else
837                                 PROBE_SET_ACTION(softc,
838                                     PROBE_SUPPORTED_VPD_LIST);
839                         goto again;
840                 }
841                 scsi_report_luns(csio, 5, probedone, MSG_SIMPLE_Q_TAG,
842                     RPL_REPORT_DEFAULT, rp, periph->path->target->rpl_size,
843                     SSD_FULL_SIZE, 60000);
844                 break;
845         }
846         case PROBE_MODE_SENSE:
847         {
848                 void  *mode_buf;
849                 int    mode_buf_len;
850
851                 mode_buf_len = sizeof(struct scsi_mode_header_6)
852                              + sizeof(struct scsi_mode_blk_desc)
853                              + sizeof(struct scsi_control_page);
854                 mode_buf = malloc(mode_buf_len, M_CAMXPT, M_NOWAIT);
855                 if (mode_buf != NULL) {
856                         scsi_mode_sense(csio,
857                                         /*retries*/4,
858                                         probedone,
859                                         MSG_SIMPLE_Q_TAG,
860                                         /*dbd*/FALSE,
861                                         SMS_PAGE_CTRL_CURRENT,
862                                         SMS_CONTROL_MODE_PAGE,
863                                         mode_buf,
864                                         mode_buf_len,
865                                         SSD_FULL_SIZE,
866                                         /*timeout*/60000);
867                         break;
868                 }
869                 xpt_print(periph->path, "Unable to mode sense control page - "
870                     "malloc failure\n");
871                 PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST);
872         }
873         /* FALLTHROUGH */
874         case PROBE_SUPPORTED_VPD_LIST:
875         {
876                 struct scsi_vpd_supported_page_list *vpd_list;
877                 struct cam_ed *device;
878
879                 vpd_list = NULL;
880                 device = periph->path->device;
881
882                 if ((SCSI_QUIRK(device)->quirks & CAM_QUIRK_NOVPDS) == 0)
883                         vpd_list = malloc(sizeof(*vpd_list), M_CAMXPT,
884                             M_NOWAIT | M_ZERO);
885
886                 if (vpd_list != NULL) {
887                         scsi_inquiry(csio,
888                                      /*retries*/4,
889                                      probedone,
890                                      MSG_SIMPLE_Q_TAG,
891                                      (uint8_t *)vpd_list,
892                                      sizeof(*vpd_list),
893                                      /*evpd*/TRUE,
894                                      SVPD_SUPPORTED_PAGE_LIST,
895                                      SSD_MIN_SIZE,
896                                      /*timeout*/60 * 1000);
897                         break;
898                 }
899 done:
900                 /*
901                  * We'll have to do without, let our probedone
902                  * routine finish up for us.
903                  */
904                 start_ccb->csio.data_ptr = NULL;
905                 cam_freeze_devq(periph->path);
906                 cam_periph_doacquire(periph);
907                 probedone(periph, start_ccb);
908                 return;
909         }
910         case PROBE_DEVICE_ID:
911         {
912                 struct scsi_vpd_device_id *devid;
913
914                 devid = NULL;
915                 if (scsi_vpd_supported_page(periph, SVPD_DEVICE_ID))
916                         devid = malloc(SVPD_DEVICE_ID_MAX_SIZE, M_CAMXPT,
917                             M_NOWAIT | M_ZERO);
918
919                 if (devid != NULL) {
920                         scsi_inquiry(csio,
921                                      /*retries*/4,
922                                      probedone,
923                                      MSG_SIMPLE_Q_TAG,
924                                      (uint8_t *)devid,
925                                      SVPD_DEVICE_ID_MAX_SIZE,
926                                      /*evpd*/TRUE,
927                                      SVPD_DEVICE_ID,
928                                      SSD_MIN_SIZE,
929                                      /*timeout*/60 * 1000);
930                         break;
931                 }
932                 goto done;
933         }
934         case PROBE_EXTENDED_INQUIRY:
935         {
936                 struct scsi_vpd_extended_inquiry_data *ext_inq;
937
938                 ext_inq = NULL;
939                 if (scsi_vpd_supported_page(periph, SVPD_EXTENDED_INQUIRY_DATA))
940                         ext_inq = malloc(sizeof(*ext_inq), M_CAMXPT,
941                             M_NOWAIT | M_ZERO);
942
943                 if (ext_inq != NULL) {
944                         scsi_inquiry(csio,
945                                      /*retries*/4,
946                                      probedone,
947                                      MSG_SIMPLE_Q_TAG,
948                                      (uint8_t *)ext_inq,
949                                      sizeof(*ext_inq),
950                                      /*evpd*/TRUE,
951                                      SVPD_EXTENDED_INQUIRY_DATA,
952                                      SSD_MIN_SIZE,
953                                      /*timeout*/60 * 1000);
954                         break;
955                 }
956                 /*
957                  * We'll have to do without, let our probedone
958                  * routine finish up for us.
959                  */
960                 goto done;
961         }
962         case PROBE_SERIAL_NUM:
963         {
964                 struct scsi_vpd_unit_serial_number *serial_buf;
965                 struct cam_ed* device;
966
967                 serial_buf = NULL;
968                 device = periph->path->device;
969                 if (device->serial_num != NULL) {
970                         free(device->serial_num, M_CAMXPT);
971                         device->serial_num = NULL;
972                         device->serial_num_len = 0;
973                 }
974
975                 if (scsi_vpd_supported_page(periph, SVPD_UNIT_SERIAL_NUMBER))
976                         serial_buf = (struct scsi_vpd_unit_serial_number *)
977                                 malloc(sizeof(*serial_buf), M_CAMXPT,
978                                     M_NOWAIT|M_ZERO);
979
980                 if (serial_buf != NULL) {
981                         scsi_inquiry(csio,
982                                      /*retries*/4,
983                                      probedone,
984                                      MSG_SIMPLE_Q_TAG,
985                                      (uint8_t *)serial_buf,
986                                      sizeof(*serial_buf),
987                                      /*evpd*/TRUE,
988                                      SVPD_UNIT_SERIAL_NUMBER,
989                                      SSD_MIN_SIZE,
990                                      /*timeout*/60 * 1000);
991                         break;
992                 }
993                 goto done;
994         }
995         case PROBE_INQUIRY_BASIC_DV1:
996         case PROBE_INQUIRY_BASIC_DV2:
997         {
998                 u_int inquiry_len;
999                 struct scsi_inquiry_data *inq_buf;
1000
1001                 inq_buf = &periph->path->device->inq_data;
1002                 inquiry_len = roundup2(SID_ADDITIONAL_LENGTH(inq_buf), 2);
1003                 inq_buf = malloc(inquiry_len, M_CAMXPT, M_NOWAIT);
1004                 if (inq_buf == NULL) {
1005                         xpt_print(periph->path, "malloc failure- skipping Basic"
1006                             "Domain Validation\n");
1007                         PROBE_SET_ACTION(softc, PROBE_DV_EXIT);
1008                         scsi_test_unit_ready(csio,
1009                                              /*retries*/4,
1010                                              probedone,
1011                                              MSG_SIMPLE_Q_TAG,
1012                                              SSD_FULL_SIZE,
1013                                              /*timeout*/60000);
1014                         break;
1015                 }
1016
1017                 scsi_inquiry(csio,
1018                              /*retries*/4,
1019                              probedone,
1020                              MSG_SIMPLE_Q_TAG,
1021                              (uint8_t *)inq_buf,
1022                              inquiry_len,
1023                              /*evpd*/FALSE,
1024                              /*page_code*/0,
1025                              SSD_MIN_SIZE,
1026                              /*timeout*/60 * 1000);
1027                 break;
1028         }
1029         default:
1030                 panic("probestart: invalid action state 0x%x\n", softc->action);
1031         }
1032         start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1033         cam_periph_doacquire(periph);
1034         xpt_action(start_ccb);
1035 }
1036
1037 static void
1038 proberequestdefaultnegotiation(struct cam_periph *periph)
1039 {
1040         struct ccb_trans_settings cts;
1041
1042         memset(&cts, 0, sizeof(cts));
1043         xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE);
1044         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1045         cts.type = CTS_TYPE_USER_SETTINGS;
1046         xpt_action((union ccb *)&cts);
1047         if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) {
1048                 return;
1049         }
1050         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
1051         cts.type = CTS_TYPE_CURRENT_SETTINGS;
1052         xpt_action((union ccb *)&cts);
1053 }
1054
1055 /*
1056  * Backoff Negotiation Code- only pertinent for SPI devices.
1057  */
1058 static int
1059 proberequestbackoff(struct cam_periph *periph, struct cam_ed *device)
1060 {
1061         struct ccb_trans_settings cts;
1062         struct ccb_trans_settings_spi *spi;
1063
1064         memset(&cts, 0, sizeof (cts));
1065         xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE);
1066         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1067         cts.type = CTS_TYPE_CURRENT_SETTINGS;
1068         xpt_action((union ccb *)&cts);
1069         if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) {
1070                 if (bootverbose) {
1071                         xpt_print(periph->path,
1072                             "failed to get current device settings\n");
1073                 }
1074                 return (0);
1075         }
1076         if (cts.transport != XPORT_SPI) {
1077                 if (bootverbose) {
1078                         xpt_print(periph->path, "not SPI transport\n");
1079                 }
1080                 return (0);
1081         }
1082         spi = &cts.xport_specific.spi;
1083
1084         /*
1085          * We cannot renegotiate sync rate if we don't have one.
1086          */
1087         if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
1088                 if (bootverbose) {
1089                         xpt_print(periph->path, "no sync rate known\n");
1090                 }
1091                 return (0);
1092         }
1093
1094         /*
1095          * We'll assert that we don't have to touch PPR options- the
1096          * SIM will see what we do with period and offset and adjust
1097          * the PPR options as appropriate.
1098          */
1099
1100         /*
1101          * A sync rate with unknown or zero offset is nonsensical.
1102          * A sync period of zero means Async.
1103          */
1104         if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0
1105          || spi->sync_offset == 0 || spi->sync_period == 0) {
1106                 if (bootverbose) {
1107                         xpt_print(periph->path, "no sync rate available\n");
1108                 }
1109                 return (0);
1110         }
1111
1112         if (device->flags & CAM_DEV_DV_HIT_BOTTOM) {
1113                 CAM_DEBUG(periph->path, CAM_DEBUG_PROBE,
1114                     ("hit async: giving up on DV\n"));
1115                 return (0);
1116         }
1117
1118         /*
1119          * Jump sync_period up by one, but stop at 5MHz and fall back to Async.
1120          * We don't try to remember 'last' settings to see if the SIM actually
1121          * gets into the speed we want to set. We check on the SIM telling
1122          * us that a requested speed is bad, but otherwise don't try and
1123          * check the speed due to the asynchronous and handshake nature
1124          * of speed setting.
1125          */
1126         spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET;
1127         for (;;) {
1128                 spi->sync_period++;
1129                 if (spi->sync_period >= 0xf) {
1130                         spi->sync_period = 0;
1131                         spi->sync_offset = 0;
1132                         CAM_DEBUG(periph->path, CAM_DEBUG_PROBE,
1133                             ("setting to async for DV\n"));
1134                         /*
1135                          * Once we hit async, we don't want to try
1136                          * any more settings.
1137                          */
1138                         device->flags |= CAM_DEV_DV_HIT_BOTTOM;
1139                 } else if (bootverbose) {
1140                         CAM_DEBUG(periph->path, CAM_DEBUG_PROBE,
1141                             ("DV: period 0x%x\n", spi->sync_period));
1142                         printf("setting period to 0x%x\n", spi->sync_period);
1143                 }
1144                 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
1145                 cts.type = CTS_TYPE_CURRENT_SETTINGS;
1146                 xpt_action((union ccb *)&cts);
1147                 if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) {
1148                         break;
1149                 }
1150                 CAM_DEBUG(periph->path, CAM_DEBUG_PROBE,
1151                     ("DV: failed to set period 0x%x\n", spi->sync_period));
1152                 if (spi->sync_period == 0) {
1153                         return (0);
1154                 }
1155         }
1156         return (1);
1157 }
1158
1159 #define CCB_COMPLETED_OK(ccb) (((ccb).status & CAM_STATUS_MASK) == CAM_REQ_CMP)
1160
1161 static void
1162 probedone(struct cam_periph *periph, union ccb *done_ccb)
1163 {
1164         probe_softc *softc;
1165         struct cam_path *path;
1166         struct scsi_inquiry_data *inq_buf;
1167         uint32_t  priority;
1168
1169         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
1170
1171         softc = (probe_softc *)periph->softc;
1172         path = done_ccb->ccb_h.path;
1173         priority = done_ccb->ccb_h.pinfo.priority;
1174         cam_periph_assert(periph, MA_OWNED);
1175
1176         switch (softc->action) {
1177         case PROBE_TUR:
1178         {
1179                 if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) {
1180                         if (cam_periph_error(done_ccb, 0, SF_NO_PRINT) ==
1181                             ERESTART) {
1182 outr:
1183                                 /* Drop freeze taken due to CAM_DEV_QFREEZE */
1184                                 cam_release_devq(path, 0, 0, 0, FALSE);
1185                                 return;
1186                         }
1187                         else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1188                                 /* Don't wedge the queue */
1189                                 xpt_release_devq(done_ccb->ccb_h.path,
1190                                                  /*count*/1,
1191                                                  /*run_queue*/TRUE);
1192                 }
1193                 PROBE_SET_ACTION(softc, PROBE_INQUIRY);
1194                 xpt_release_ccb(done_ccb);
1195                 xpt_schedule(periph, priority);
1196 out:
1197                 /* Drop freeze taken due to CAM_DEV_QFREEZE and release. */
1198                 cam_release_devq(path, 0, 0, 0, FALSE);
1199                 cam_periph_release_locked(periph);
1200                 return;
1201         }
1202         case PROBE_INQUIRY:
1203         case PROBE_FULL_INQUIRY:
1204         {
1205                 if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) {
1206                         uint8_t periph_qual;
1207
1208                         path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
1209                         scsi_find_quirk(path->device);
1210                         inq_buf = &path->device->inq_data;
1211
1212                         periph_qual = SID_QUAL(inq_buf);
1213
1214                         if (periph_qual == SID_QUAL_LU_CONNECTED ||
1215                             periph_qual == SID_QUAL_LU_OFFLINE) {
1216                                 /*
1217                                  * We conservatively request only
1218                                  * SHORT_INQUIRY_LEN bytes of inquiry
1219                                  * information during our first try
1220                                  * at sending an INQUIRY. If the device
1221                                  * has more information to give,
1222                                  * perform a second request specifying
1223                                  * the amount of information the device
1224                                  * is willing to give.
1225                                  */
1226                                 if (softc->action == PROBE_INQUIRY
1227                                     && SID_ADDITIONAL_LENGTH(inq_buf)
1228                                     > SHORT_INQUIRY_LENGTH) {
1229                                         PROBE_SET_ACTION(softc, PROBE_FULL_INQUIRY);
1230                                         xpt_release_ccb(done_ccb);
1231                                         xpt_schedule(periph, priority);
1232                                         goto out;
1233                                 }
1234
1235                                 scsi_devise_transport(path);
1236
1237                                 if (path->device->lun_id == 0 &&
1238                                     SID_ANSI_REV(inq_buf) > SCSI_REV_SPC2 &&
1239                                     (SCSI_QUIRK(path->device)->quirks &
1240                                      CAM_QUIRK_NORPTLUNS) == 0) {
1241                                         PROBE_SET_ACTION(softc,
1242                                             PROBE_REPORT_LUNS);
1243                                         /*
1244                                          * Start with room for *one* lun.
1245                                          */
1246                                         periph->path->target->rpl_size = 16;
1247                                 } else if (INQ_DATA_TQ_ENABLED(inq_buf))
1248                                         PROBE_SET_ACTION(softc,
1249                                             PROBE_MODE_SENSE);
1250                                 else
1251                                         PROBE_SET_ACTION(softc,
1252                                             PROBE_SUPPORTED_VPD_LIST);
1253
1254                                 if (path->device->flags & CAM_DEV_UNCONFIGURED) {
1255                                         path->device->flags &= ~CAM_DEV_UNCONFIGURED;
1256                                         xpt_acquire_device(path->device);
1257                                 }
1258                                 xpt_release_ccb(done_ccb);
1259                                 xpt_schedule(periph, priority);
1260                                 goto out;
1261                         } else if (path->device->lun_id == 0 &&
1262                             SID_ANSI_REV(inq_buf) >= SCSI_REV_SPC2 &&
1263                             (SCSI_QUIRK(path->device)->quirks &
1264                              CAM_QUIRK_NORPTLUNS) == 0) {
1265                                 PROBE_SET_ACTION(softc, PROBE_REPORT_LUNS);
1266                                 periph->path->target->rpl_size = 16;
1267                                 xpt_release_ccb(done_ccb);
1268                                 xpt_schedule(periph, priority);
1269                                 goto out;
1270                         }
1271                 } else if (cam_periph_error(done_ccb, 0,
1272                                             done_ccb->ccb_h.target_lun > 0
1273                                             ? SF_RETRY_UA|SF_QUIET_IR
1274                                             : SF_RETRY_UA) == ERESTART) {
1275                         goto outr;
1276                 } else {
1277                         if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1278                                 /* Don't wedge the queue */
1279                                 xpt_release_devq(done_ccb->ccb_h.path,
1280                                     /*count*/1, /*run_queue*/TRUE);
1281                         }
1282                         path->device->flags &= ~CAM_DEV_INQUIRY_DATA_VALID;
1283                 }
1284                 /*
1285                  * If we get to this point, we got an error status back
1286                  * from the inquiry and the error status doesn't require
1287                  * automatically retrying the command.  Therefore, the
1288                  * inquiry failed.  If we had inquiry information before
1289                  * for this device, but this latest inquiry command failed,
1290                  * the device has probably gone away.  If this device isn't
1291                  * already marked unconfigured, notify the peripheral
1292                  * drivers that this device is no more.
1293                  */
1294                 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
1295                         /* Send the async notification. */
1296                         xpt_async(AC_LOST_DEVICE, path, NULL);
1297                 PROBE_SET_ACTION(softc, PROBE_INVALID);
1298
1299                 xpt_release_ccb(done_ccb);
1300                 break;
1301         }
1302         case PROBE_REPORT_LUNS:
1303         {
1304                 struct ccb_scsiio *csio;
1305                 struct scsi_report_luns_data *lp;
1306                 u_int nlun, maxlun;
1307
1308                 csio = &done_ccb->csio;
1309
1310                 lp = (struct scsi_report_luns_data *)csio->data_ptr;
1311                 nlun = scsi_4btoul(lp->length) / 8;
1312                 maxlun = (csio->dxfer_len / 8) - 1;
1313
1314                 if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) {
1315                         if (cam_periph_error(done_ccb, 0,
1316                                 done_ccb->ccb_h.target_lun > 0 ?
1317                                 SF_RETRY_UA|SF_QUIET_IR : SF_RETRY_UA) ==
1318                             ERESTART) {
1319                                 goto outr;
1320                         }
1321                         if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1322                                 xpt_release_devq(done_ccb->ccb_h.path, 1,
1323                                     TRUE);
1324                         }
1325                         free(lp, M_CAMXPT);
1326                         lp = NULL;
1327                 } else if (nlun > maxlun) {
1328                         /*
1329                          * Reallocate and retry to cover all luns
1330                          */
1331                         CAM_DEBUG(path, CAM_DEBUG_PROBE,
1332                             ("Probe: reallocating REPORT_LUNS for %u luns\n",
1333                              nlun));
1334                         free(lp, M_CAMXPT);
1335                         path->target->rpl_size = (nlun << 3) + 8;
1336                         xpt_release_ccb(done_ccb);
1337                         xpt_schedule(periph, priority);
1338                         goto out;
1339                 } else if (nlun == 0) {
1340                         /*
1341                          * If there don't appear to be any luns, bail.
1342                          */
1343                         free(lp, M_CAMXPT);
1344                         lp = NULL;
1345                 } else {
1346                         lun_id_t lun;
1347                         int idx;
1348
1349                         CAM_DEBUG(path, CAM_DEBUG_PROBE,
1350                            ("Probe: %u lun(s) reported\n", nlun));
1351
1352                         CAM_GET_LUN(lp, 0, lun);
1353                         /*
1354                          * If the first lun is not lun 0, then either there
1355                          * is no lun 0 in the list, or the list is unsorted.
1356                          */
1357                         if (lun != 0) {
1358                                 for (idx = 0; idx < nlun; idx++) {
1359                                         CAM_GET_LUN(lp, idx, lun);
1360                                         if (lun == 0) {
1361                                                 break;
1362                                         }
1363                                 }
1364                                 if (idx != nlun) {
1365                                         uint8_t tlun[8];
1366                                         memcpy(tlun,
1367                                             lp->luns[0].lundata, 8);
1368                                         memcpy(lp->luns[0].lundata,
1369                                             lp->luns[idx].lundata, 8);
1370                                         memcpy(lp->luns[idx].lundata,
1371                                             tlun, 8);
1372                                         CAM_DEBUG(path, CAM_DEBUG_PROBE,
1373                                             ("lun 0 in position %u\n", idx));
1374                                 }
1375                         }
1376                         /*
1377                          * If we have an old lun list, We can either
1378                          * retest luns that appear to have been dropped,
1379                          * or just nuke them.  We'll opt for the latter.
1380                          * This function will also install the new list
1381                          * in the target structure.
1382                          */
1383                         probe_purge_old(path, lp, softc->flags);
1384                         lp = NULL;
1385                 }
1386                 /* The processing above should either exit via a `goto
1387                  * out` or leave the `lp` variable `NULL` and (if
1388                  * applicable) `free()` the storage to which it had
1389                  * pointed. Assert here that is the case.
1390                  */
1391                 KASSERT(lp == NULL, ("%s: lp is not NULL", __func__));
1392                 inq_buf = &path->device->inq_data;
1393                 if (path->device->flags & CAM_DEV_INQUIRY_DATA_VALID &&
1394                     (SID_QUAL(inq_buf) == SID_QUAL_LU_CONNECTED ||
1395                     SID_QUAL(inq_buf) == SID_QUAL_LU_OFFLINE)) {
1396                         if (INQ_DATA_TQ_ENABLED(inq_buf))
1397                                 PROBE_SET_ACTION(softc, PROBE_MODE_SENSE);
1398                         else
1399                                 PROBE_SET_ACTION(softc,
1400                                     PROBE_SUPPORTED_VPD_LIST);
1401                         xpt_release_ccb(done_ccb);
1402                         xpt_schedule(periph, priority);
1403                         goto out;
1404                 }
1405                 PROBE_SET_ACTION(softc, PROBE_INVALID);
1406                 xpt_release_ccb(done_ccb);
1407                 break;
1408         }
1409         case PROBE_MODE_SENSE:
1410         {
1411                 struct ccb_scsiio *csio;
1412                 struct scsi_mode_header_6 *mode_hdr;
1413
1414                 csio = &done_ccb->csio;
1415                 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
1416                 if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) {
1417                         struct scsi_control_page *page;
1418                         uint8_t *offset;
1419
1420                         offset = ((uint8_t *)&mode_hdr[1])
1421                             + mode_hdr->blk_desc_len;
1422                         page = (struct scsi_control_page *)offset;
1423                         path->device->queue_flags = page->queue_flags;
1424                 } else if (cam_periph_error(done_ccb, 0,
1425                         SF_RETRY_UA|SF_NO_PRINT) == ERESTART) {
1426                         goto outr;
1427                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1428                         /* Don't wedge the queue */
1429                         xpt_release_devq(done_ccb->ccb_h.path,
1430                                          /*count*/1, /*run_queue*/TRUE);
1431                 }
1432                 xpt_release_ccb(done_ccb);
1433                 free(mode_hdr, M_CAMXPT);
1434                 PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST);
1435                 xpt_schedule(periph, priority);
1436                 goto out;
1437         }
1438         case PROBE_SUPPORTED_VPD_LIST:
1439         {
1440                 struct ccb_scsiio *csio;
1441                 struct scsi_vpd_supported_page_list *page_list;
1442
1443                 csio = &done_ccb->csio;
1444                 page_list =
1445                     (struct scsi_vpd_supported_page_list *)csio->data_ptr;
1446
1447                 if (path->device->supported_vpds != NULL) {
1448                         free(path->device->supported_vpds, M_CAMXPT);
1449                         path->device->supported_vpds = NULL;
1450                         path->device->supported_vpds_len = 0;
1451                 }
1452
1453                 if (page_list == NULL) {
1454                         /*
1455                          * Don't process the command as it was never sent
1456                          */
1457                 } else if (CCB_COMPLETED_OK(csio->ccb_h)) {
1458                         /* Got vpd list */
1459                         path->device->supported_vpds_len = page_list->length +
1460                             SVPD_SUPPORTED_PAGES_HDR_LEN;
1461                         path->device->supported_vpds = (uint8_t *)page_list;
1462                         xpt_release_ccb(done_ccb);
1463                         PROBE_SET_ACTION(softc, PROBE_DEVICE_ID);
1464                         xpt_schedule(periph, priority);
1465                         goto out;
1466                 } else if (cam_periph_error(done_ccb, 0,
1467                         SF_RETRY_UA|SF_NO_PRINT) == ERESTART) {
1468                         goto outr;
1469                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1470                         /* Don't wedge the queue */
1471                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
1472                                          /*run_queue*/TRUE);
1473                 }
1474
1475                 if (page_list)
1476                         free(page_list, M_CAMXPT);
1477                 /* No VPDs available, skip to device check. */
1478                 csio->data_ptr = NULL;
1479                 goto probe_device_check;
1480         }
1481         case PROBE_DEVICE_ID:
1482         {
1483                 struct scsi_vpd_device_id *devid;
1484                 struct ccb_scsiio *csio;
1485                 uint32_t length = 0;
1486
1487                 csio = &done_ccb->csio;
1488                 devid = (struct scsi_vpd_device_id *)csio->data_ptr;
1489
1490                 /* Clean up from previous instance of this device */
1491                 if (path->device->device_id != NULL) {
1492                         path->device->device_id_len = 0;
1493                         free(path->device->device_id, M_CAMXPT);
1494                         path->device->device_id = NULL;
1495                 }
1496
1497                 if (devid == NULL) {
1498                         /* Don't process the command as it was never sent */
1499                 } else if (CCB_COMPLETED_OK(csio->ccb_h)) {
1500                         length = scsi_2btoul(devid->length);
1501                         if (length != 0) {
1502                                 /*
1503                                  * NB: device_id_len is actual response
1504                                  * size, not buffer size.
1505                                  */
1506                                 path->device->device_id_len = length +
1507                                     SVPD_DEVICE_ID_HDR_LEN;
1508                                 path->device->device_id = (uint8_t *)devid;
1509                         }
1510                 } else if (cam_periph_error(done_ccb, 0,
1511                         SF_RETRY_UA) == ERESTART) {
1512                         goto outr;
1513                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1514                         /* Don't wedge the queue */
1515                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
1516                                          /*run_queue*/TRUE);
1517                 }
1518
1519                 /* Free the device id space if we don't use it */
1520                 if (devid && length == 0)
1521                         free(devid, M_CAMXPT);
1522                 xpt_release_ccb(done_ccb);
1523                 PROBE_SET_ACTION(softc, PROBE_EXTENDED_INQUIRY);
1524                 xpt_schedule(periph, priority);
1525                 goto out;
1526         }
1527         case PROBE_EXTENDED_INQUIRY: {
1528                 struct scsi_vpd_extended_inquiry_data *ext_inq;
1529                 struct ccb_scsiio *csio;
1530                 int32_t length = 0;
1531
1532                 csio = &done_ccb->csio;
1533                 ext_inq = (struct scsi_vpd_extended_inquiry_data *)
1534                     csio->data_ptr;
1535                 if (path->device->ext_inq != NULL) {
1536                         path->device->ext_inq_len = 0;
1537                         free(path->device->ext_inq, M_CAMXPT);
1538                         path->device->ext_inq = NULL;
1539                 }
1540
1541                 if (ext_inq == NULL) {
1542                         /* Don't process the command as it was never sent */
1543                 } else if (CCB_COMPLETED_OK(csio->ccb_h)) {
1544                         length = scsi_2btoul(ext_inq->page_length) +
1545                             __offsetof(struct scsi_vpd_extended_inquiry_data,
1546                             flags1);
1547                         length = min(length, sizeof(*ext_inq));
1548                         length -= csio->resid;
1549                         if (length > 0) {
1550                                 path->device->ext_inq_len = length;
1551                                 path->device->ext_inq = (uint8_t *)ext_inq;
1552                         }
1553                 } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA) ==
1554                     ERESTART) {
1555                         goto outr;
1556                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1557                         /* Don't wedge the queue */
1558                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
1559                                          /*run_queue*/TRUE);
1560                 }
1561
1562                 /* Free the device id space if we don't use it */
1563                 if (ext_inq && length <= 0)
1564                         free(ext_inq, M_CAMXPT);
1565                 xpt_release_ccb(done_ccb);
1566                 PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM);
1567                 xpt_schedule(periph, priority);
1568                 goto out;
1569         }
1570
1571 probe_device_check:
1572         case PROBE_SERIAL_NUM:
1573         {
1574                 struct ccb_scsiio *csio;
1575                 struct scsi_vpd_unit_serial_number *serial_buf;
1576                 uint32_t  priority;
1577                 int changed;
1578                 int have_serialnum;
1579
1580                 changed = 1;
1581                 have_serialnum = 0;
1582                 csio = &done_ccb->csio;
1583                 priority = done_ccb->ccb_h.pinfo.priority;
1584                 serial_buf =
1585                     (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
1586
1587                 if (serial_buf == NULL) {
1588                         /*
1589                          * Don't process the command as it was never sent
1590                          */
1591                 } else if (cam_ccb_status(done_ccb) == CAM_REQ_CMP
1592                         && (serial_buf->length > 0)) {
1593                         have_serialnum = 1;
1594                         path->device->serial_num =
1595                                 (uint8_t *)malloc((serial_buf->length + 1),
1596                                                    M_CAMXPT, M_NOWAIT);
1597                         if (path->device->serial_num != NULL) {
1598                                 int start, slen;
1599
1600                                 start = strspn(serial_buf->serial_num, " ");
1601                                 slen = serial_buf->length - start;
1602                                 if (slen <= 0) {
1603                                         /*
1604                                          * SPC5r05 says that an all-space serial
1605                                          * number means no product serial number
1606                                          * is available
1607                                          */
1608                                         slen = 0;
1609                                 }
1610                                 /*
1611                                  * In apparent violation of the spec, some
1612                                  * devices pad their serial numbers with
1613                                  * trailing spaces. Remove them.
1614                                  */
1615                                 while (slen > 0 &&
1616                                     serial_buf->serial_num[start + slen - 1] == ' ')
1617                                         slen--;
1618                                 memcpy(path->device->serial_num,
1619                                        &serial_buf->serial_num[start], slen);
1620                                 path->device->serial_num_len = slen;
1621                                 path->device->serial_num[slen] = '\0';
1622                         }
1623                 } else if (cam_periph_error(done_ccb, 0,
1624                         SF_RETRY_UA|SF_NO_PRINT) == ERESTART) {
1625                         goto outr;
1626                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1627                         /* Don't wedge the queue */
1628                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
1629                                          /*run_queue*/TRUE);
1630                 }
1631
1632                 /*
1633                  * Let's see if we have seen this device before.
1634                  */
1635                 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
1636                         MD5_CTX context;
1637                         uint8_t digest[16];
1638
1639                         MD5Init(&context);
1640
1641                         MD5Update(&context,
1642                                   (unsigned char *)&path->device->inq_data,
1643                                   sizeof(struct scsi_inquiry_data));
1644
1645                         if (have_serialnum)
1646                                 MD5Update(&context, path->device->serial_num,
1647                                           path->device->serial_num_len);
1648
1649                         MD5Final(digest, &context);
1650                         if (bcmp(softc->digest, digest, 16) == 0)
1651                                 changed = 0;
1652
1653                         /*
1654                          * XXX Do we need to do a TUR in order to ensure
1655                          *     that the device really hasn't changed???
1656                          */
1657                         if ((changed != 0)
1658                          && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
1659                                 xpt_async(AC_LOST_DEVICE, path, NULL);
1660                 }
1661                 if (serial_buf != NULL)
1662                         free(serial_buf, M_CAMXPT);
1663
1664                 if (changed != 0) {
1665                         /*
1666                          * Now that we have all the necessary
1667                          * information to safely perform transfer
1668                          * negotiations... Controllers don't perform
1669                          * any negotiation or tagged queuing until
1670                          * after the first XPT_SET_TRAN_SETTINGS ccb is
1671                          * received.  So, on a new device, just retrieve
1672                          * the user settings, and set them as the current
1673                          * settings to set the device up.
1674                          */
1675                         proberequestdefaultnegotiation(periph);
1676                         xpt_release_ccb(done_ccb);
1677
1678                         /*
1679                          * Perform a TUR to allow the controller to
1680                          * perform any necessary transfer negotiation.
1681                          */
1682                         PROBE_SET_ACTION(softc, PROBE_TUR_FOR_NEGOTIATION);
1683                         xpt_schedule(periph, priority);
1684                         goto out;
1685                 }
1686                 xpt_release_ccb(done_ccb);
1687                 break;
1688         }
1689         case PROBE_TUR_FOR_NEGOTIATION:
1690         case PROBE_DV_EXIT:
1691                 if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) {
1692                         if (cam_periph_error(done_ccb, 0, SF_NO_PRINT |
1693                             SF_NO_RECOVERY | SF_NO_RETRY) == ERESTART)
1694                                 goto outr;
1695                 }
1696                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1697                         /* Don't wedge the queue */
1698                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
1699                                          /*run_queue*/TRUE);
1700                 }
1701                 /*
1702                  * Do Domain Validation for lun 0 on devices that claim
1703                  * to support Synchronous Transfer modes.
1704                  */
1705                 if (softc->action == PROBE_TUR_FOR_NEGOTIATION
1706                  && done_ccb->ccb_h.target_lun == 0
1707                  && (path->device->inq_data.flags & SID_Sync) != 0
1708                  && (path->device->flags & CAM_DEV_IN_DV) == 0) {
1709                         CAM_DEBUG(periph->path, CAM_DEBUG_PROBE,
1710                             ("Begin Domain Validation\n"));
1711                         path->device->flags |= CAM_DEV_IN_DV;
1712                         xpt_release_ccb(done_ccb);
1713                         PROBE_SET_ACTION(softc, PROBE_INQUIRY_BASIC_DV1);
1714                         xpt_schedule(periph, priority);
1715                         goto out;
1716                 }
1717                 if (softc->action == PROBE_DV_EXIT) {
1718                         CAM_DEBUG(periph->path, CAM_DEBUG_PROBE,
1719                             ("Leave Domain Validation\n"));
1720                 }
1721                 if (path->device->flags & CAM_DEV_UNCONFIGURED) {
1722                         path->device->flags &= ~CAM_DEV_UNCONFIGURED;
1723                         xpt_acquire_device(path->device);
1724                 }
1725                 path->device->flags &=
1726                     ~(CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
1727                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
1728                         /* Inform the XPT that a new device has been found */
1729                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
1730                         xpt_action(done_ccb);
1731                         xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
1732                                   done_ccb);
1733                 }
1734                 PROBE_SET_ACTION(softc, PROBE_DONE);
1735                 xpt_release_ccb(done_ccb);
1736                 break;
1737         case PROBE_INQUIRY_BASIC_DV1:
1738         case PROBE_INQUIRY_BASIC_DV2:
1739         {
1740                 struct scsi_inquiry_data *nbuf;
1741                 struct ccb_scsiio *csio;
1742
1743                 if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) {
1744                         if (cam_periph_error(done_ccb, 0, SF_NO_PRINT |
1745                             SF_NO_RECOVERY | SF_NO_RETRY) == ERESTART)
1746                                 goto outr;
1747                 }
1748                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1749                         /* Don't wedge the queue */
1750                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
1751                                          /*run_queue*/TRUE);
1752                 }
1753                 csio = &done_ccb->csio;
1754                 nbuf = (struct scsi_inquiry_data *)csio->data_ptr;
1755                 if (bcmp(nbuf, &path->device->inq_data, SHORT_INQUIRY_LENGTH)) {
1756                         xpt_print(path,
1757                             "inquiry data fails comparison at DV%d step\n",
1758                             softc->action == PROBE_INQUIRY_BASIC_DV1 ? 1 : 2);
1759                         if (proberequestbackoff(periph, path->device)) {
1760                                 path->device->flags &= ~CAM_DEV_IN_DV;
1761                                 PROBE_SET_ACTION(softc, PROBE_TUR_FOR_NEGOTIATION);
1762                         } else {
1763                                 /* give up */
1764                                 PROBE_SET_ACTION(softc, PROBE_DV_EXIT);
1765                         }
1766                         free(nbuf, M_CAMXPT);
1767                         xpt_release_ccb(done_ccb);
1768                         xpt_schedule(periph, priority);
1769                         goto out;
1770                 }
1771                 free(nbuf, M_CAMXPT);
1772                 if (softc->action == PROBE_INQUIRY_BASIC_DV1) {
1773                         PROBE_SET_ACTION(softc, PROBE_INQUIRY_BASIC_DV2);
1774                         xpt_release_ccb(done_ccb);
1775                         xpt_schedule(periph, priority);
1776                         goto out;
1777                 }
1778                 if (softc->action == PROBE_INQUIRY_BASIC_DV2) {
1779                         CAM_DEBUG(periph->path, CAM_DEBUG_PROBE,
1780                             ("Leave Domain Validation Successfully\n"));
1781                 }
1782                 if (path->device->flags & CAM_DEV_UNCONFIGURED) {
1783                         path->device->flags &= ~CAM_DEV_UNCONFIGURED;
1784                         xpt_acquire_device(path->device);
1785                 }
1786                 path->device->flags &=
1787                     ~(CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
1788                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
1789                         /* Inform the XPT that a new device has been found */
1790                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
1791                         xpt_action(done_ccb);
1792                         xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
1793                                   done_ccb);
1794                 }
1795                 PROBE_SET_ACTION(softc, PROBE_DONE);
1796                 xpt_release_ccb(done_ccb);
1797                 break;
1798         }
1799         default:
1800                 panic("probedone: invalid action state 0x%x\n", softc->action);
1801         }
1802         done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
1803         TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
1804         done_ccb->ccb_h.status = CAM_REQ_CMP;
1805         xpt_done(done_ccb);
1806         if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
1807                 CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n"));
1808                 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
1809                 cam_release_devq(path, 0, 0, 0, FALSE);
1810                 cam_periph_release_locked(periph);
1811                 cam_periph_invalidate(periph);
1812                 cam_periph_release_locked(periph);
1813         } else {
1814                 probeschedule(periph);
1815                 goto out;
1816         }
1817 }
1818
1819 static void
1820 probe_purge_old(struct cam_path *path, struct scsi_report_luns_data *new,
1821     probe_flags flags)
1822 {
1823         struct cam_path *tp;
1824         struct scsi_report_luns_data *old;
1825         u_int idx1, idx2, nlun_old, nlun_new;
1826         lun_id_t this_lun;
1827         uint8_t *ol, *nl;
1828
1829         if (path->target == NULL) {
1830                 return;
1831         }
1832         mtx_lock(&path->target->luns_mtx);
1833         old = path->target->luns;
1834         path->target->luns = new;
1835         mtx_unlock(&path->target->luns_mtx);
1836         if (old == NULL)
1837                 return;
1838         nlun_old = scsi_4btoul(old->length) / 8;
1839         nlun_new = scsi_4btoul(new->length) / 8;
1840
1841         /*
1842          * We are not going to assume sorted lists. Deal.
1843          */
1844         for (idx1 = 0; idx1 < nlun_old; idx1++) {
1845                 ol = old->luns[idx1].lundata;
1846                 for (idx2 = 0; idx2 < nlun_new; idx2++) {
1847                         nl = new->luns[idx2].lundata;
1848                         if (memcmp(nl, ol, 8) == 0) {
1849                                 break;
1850                         }
1851                 }
1852                 if (idx2 < nlun_new) {
1853                         continue;
1854                 }
1855                 /*
1856                  * An 'old' item not in the 'new' list.
1857                  * Nuke it. Except that if it is lun 0,
1858                  * that would be what the probe state
1859                  * machine is currently working on,
1860                  * so we won't do that.
1861                  */
1862                 CAM_GET_LUN(old, idx1, this_lun);
1863                 if (this_lun == 0) {
1864                         continue;
1865                 }
1866
1867                 /*
1868                  * We also cannot nuke it if it is
1869                  * not in a lun format we understand
1870                  * and replace the LUN with a "simple" LUN
1871                  * if that is all the HBA supports.
1872                  */
1873                 if (!(flags & PROBE_EXTLUN)) {
1874                         if (!CAM_CAN_GET_SIMPLE_LUN(old, idx1))
1875                                 continue;
1876                         CAM_GET_SIMPLE_LUN(old, idx1, this_lun);
1877                 }
1878
1879                 if (xpt_create_path(&tp, NULL, xpt_path_path_id(path),
1880                     xpt_path_target_id(path), this_lun) == CAM_REQ_CMP) {
1881                         xpt_async(AC_LOST_DEVICE, tp, NULL);
1882                         xpt_free_path(tp);
1883                 }
1884         }
1885         free(old, M_CAMXPT);
1886 }
1887
1888 static void
1889 probecleanup(struct cam_periph *periph)
1890 {
1891         free(periph->softc, M_CAMXPT);
1892 }
1893
1894 static void
1895 scsi_find_quirk(struct cam_ed *device)
1896 {
1897         struct scsi_quirk_entry *quirk;
1898         caddr_t match;
1899
1900         match = cam_quirkmatch((caddr_t)&device->inq_data,
1901                                (caddr_t)scsi_quirk_table,
1902                                nitems(scsi_quirk_table),
1903                                sizeof(*scsi_quirk_table), scsi_inquiry_match);
1904
1905         if (match == NULL)
1906                 panic("xpt_find_quirk: device didn't match wildcard entry!!");
1907
1908         quirk = (struct scsi_quirk_entry *)match;
1909         device->quirk = quirk;
1910         device->mintags = quirk->mintags;
1911         device->maxtags = quirk->maxtags;
1912 }
1913
1914 typedef struct {
1915         union   ccb *request_ccb;
1916         struct  ccb_pathinq *cpi;
1917         int     counter;
1918         int     lunindex[0];
1919 } scsi_scan_bus_info;
1920
1921 /*
1922  * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
1923  * As the scan progresses, scsi_scan_bus is used as the
1924  * callback on completion function.
1925  */
1926 static void
1927 scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
1928 {
1929         struct mtx *mtx;
1930
1931         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
1932                   ("scsi_scan_bus\n"));
1933         switch (request_ccb->ccb_h.func_code) {
1934         case XPT_SCAN_BUS:
1935         case XPT_SCAN_TGT:
1936         {
1937                 scsi_scan_bus_info *scan_info;
1938                 union   ccb *work_ccb, *reset_ccb;
1939                 struct  cam_path *path;
1940                 u_int   i;
1941                 u_int   low_target, max_target;
1942                 u_int   initiator_id;
1943
1944                 /* Find out the characteristics of the bus */
1945                 work_ccb = xpt_alloc_ccb_nowait();
1946                 if (work_ccb == NULL) {
1947                         request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1948                         xpt_done(request_ccb);
1949                         return;
1950                 }
1951                 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
1952                               request_ccb->ccb_h.pinfo.priority);
1953                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
1954                 xpt_action(work_ccb);
1955                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
1956                         request_ccb->ccb_h.status = work_ccb->ccb_h.status;
1957                         xpt_free_ccb(work_ccb);
1958                         xpt_done(request_ccb);
1959                         return;
1960                 }
1961
1962                 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
1963                         /*
1964                          * Can't scan the bus on an adapter that
1965                          * cannot perform the initiator role.
1966                          */
1967                         request_ccb->ccb_h.status = CAM_REQ_CMP;
1968                         xpt_free_ccb(work_ccb);
1969                         xpt_done(request_ccb);
1970                         return;
1971                 }
1972
1973                 /* We may need to reset bus first, if we haven't done it yet. */
1974                 if ((work_ccb->cpi.hba_inquiry &
1975                     (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) &&
1976                     !(work_ccb->cpi.hba_misc & PIM_NOBUSRESET) &&
1977                     !timevalisset(&request_ccb->ccb_h.path->bus->last_reset) &&
1978                     (reset_ccb = xpt_alloc_ccb_nowait()) != NULL) {
1979                         xpt_setup_ccb(&reset_ccb->ccb_h, request_ccb->ccb_h.path,
1980                               CAM_PRIORITY_NONE);
1981                         reset_ccb->ccb_h.func_code = XPT_RESET_BUS;
1982                         xpt_action(reset_ccb);
1983                         if (reset_ccb->ccb_h.status != CAM_REQ_CMP) {
1984                                 request_ccb->ccb_h.status = reset_ccb->ccb_h.status;
1985                                 xpt_free_ccb(reset_ccb);
1986                                 xpt_free_ccb(work_ccb);
1987                                 xpt_done(request_ccb);
1988                                 return;
1989                         }
1990                         xpt_free_ccb(reset_ccb);
1991                 }
1992
1993                 /* Save some state for use while we probe for devices */
1994                 scan_info = (scsi_scan_bus_info *) malloc(sizeof(scsi_scan_bus_info) +
1995                     (work_ccb->cpi.max_target * sizeof (u_int)), M_CAMXPT, M_ZERO|M_NOWAIT);
1996                 if (scan_info == NULL) {
1997                         request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1998                         xpt_free_ccb(work_ccb);
1999                         xpt_done(request_ccb);
2000                         return;
2001                 }
2002                 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
2003                    ("SCAN start for %p\n", scan_info));
2004                 scan_info->request_ccb = request_ccb;
2005                 scan_info->cpi = &work_ccb->cpi;
2006
2007                 /* Cache on our stack so we can work asynchronously */
2008                 max_target = scan_info->cpi->max_target;
2009                 low_target = 0;
2010                 initiator_id = scan_info->cpi->initiator_id;
2011
2012                 /*
2013                  * We can scan all targets in parallel, or do it sequentially.
2014                  */
2015
2016                 if (request_ccb->ccb_h.func_code == XPT_SCAN_TGT) {
2017                         max_target = low_target = request_ccb->ccb_h.target_id;
2018                         scan_info->counter = 0;
2019                 } else if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
2020                         max_target = 0;
2021                         scan_info->counter = 0;
2022                 } else {
2023                         scan_info->counter = scan_info->cpi->max_target + 1;
2024                         if (scan_info->cpi->initiator_id < scan_info->counter) {
2025                                 scan_info->counter--;
2026                         }
2027                 }
2028                 mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path);
2029                 mtx_unlock(mtx);
2030
2031                 for (i = low_target; i <= max_target; i++) {
2032                         cam_status status;
2033                         if (i == initiator_id)
2034                                 continue;
2035
2036                         status = xpt_create_path(&path, NULL,
2037                                                  request_ccb->ccb_h.path_id,
2038                                                  i, 0);
2039                         if (status != CAM_REQ_CMP) {
2040                                 printf("scsi_scan_bus: xpt_create_path failed"
2041                                        " with status %#x, bus scan halted\n",
2042                                        status);
2043                                 free(scan_info, M_CAMXPT);
2044                                 request_ccb->ccb_h.status = status;
2045                                 xpt_free_ccb(work_ccb);
2046                                 xpt_done(request_ccb);
2047                                 break;
2048                         }
2049                         work_ccb = xpt_alloc_ccb_nowait();
2050                         if (work_ccb == NULL) {
2051                                 xpt_free_ccb((union ccb *)scan_info->cpi);
2052                                 free(scan_info, M_CAMXPT);
2053                                 xpt_free_path(path);
2054                                 request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2055                                 xpt_done(request_ccb);
2056                                 break;
2057                         }
2058                         xpt_setup_ccb(&work_ccb->ccb_h, path,
2059                                       request_ccb->ccb_h.pinfo.priority);
2060                         work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
2061                         work_ccb->ccb_h.cbfcnp = scsi_scan_bus;
2062                         work_ccb->ccb_h.flags |= CAM_UNLOCKED;
2063                         work_ccb->ccb_h.ppriv_ptr0 = scan_info;
2064                         work_ccb->crcn.flags = request_ccb->crcn.flags;
2065                         xpt_action(work_ccb);
2066                 }
2067
2068                 mtx_lock(mtx);
2069                 break;
2070         }
2071         case XPT_SCAN_LUN:
2072         {
2073                 cam_status status;
2074                 struct cam_path *path, *oldpath;
2075                 scsi_scan_bus_info *scan_info;
2076                 struct cam_et *target;
2077                 struct cam_ed *device, *nextdev;
2078                 int next_target;
2079                 path_id_t path_id;
2080                 target_id_t target_id;
2081                 lun_id_t lun_id;
2082
2083                 oldpath = request_ccb->ccb_h.path;
2084
2085                 status = cam_ccb_status(request_ccb);
2086                 scan_info = (scsi_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
2087                 path_id = request_ccb->ccb_h.path_id;
2088                 target_id = request_ccb->ccb_h.target_id;
2089                 lun_id = request_ccb->ccb_h.target_lun;
2090                 target = request_ccb->ccb_h.path->target;
2091                 next_target = 1;
2092
2093                 mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path);
2094                 mtx_lock(mtx);
2095                 mtx_lock(&target->luns_mtx);
2096                 if (target->luns) {
2097                         lun_id_t first;
2098                         u_int nluns = scsi_4btoul(target->luns->length) / 8;
2099
2100                         /*
2101                          * Make sure we skip over lun 0 if it's the first member
2102                          * of the list as we've actually just finished probing
2103                          * it.
2104                          */
2105                         CAM_GET_LUN(target->luns, 0, first);
2106                         if (first == 0 && scan_info->lunindex[target_id] == 0) {
2107                                 scan_info->lunindex[target_id]++;
2108                         }
2109
2110                         /*
2111                          * Skip any LUNs that the HBA can't deal with.
2112                          */
2113                         while (scan_info->lunindex[target_id] < nluns) {
2114                                 if (scan_info->cpi->hba_misc & PIM_EXTLUNS) {
2115                                         CAM_GET_LUN(target->luns,
2116                                             scan_info->lunindex[target_id],
2117                                             lun_id);
2118                                         break;
2119                                 }
2120
2121                                 if (CAM_CAN_GET_SIMPLE_LUN(target->luns,
2122                                     scan_info->lunindex[target_id])) {
2123                                         CAM_GET_SIMPLE_LUN(target->luns,
2124                                             scan_info->lunindex[target_id],
2125                                             lun_id);
2126                                         break;
2127                                 }
2128                                         
2129                                 scan_info->lunindex[target_id]++;
2130                         }
2131
2132                         if (scan_info->lunindex[target_id] < nluns) {
2133                                 mtx_unlock(&target->luns_mtx);
2134                                 next_target = 0;
2135                                 CAM_DEBUG(request_ccb->ccb_h.path,
2136                                     CAM_DEBUG_PROBE,
2137                                    ("next lun to try at index %u is %jx\n",
2138                                    scan_info->lunindex[target_id],
2139                                    (uintmax_t)lun_id));
2140                                 scan_info->lunindex[target_id]++;
2141                         } else {
2142                                 mtx_unlock(&target->luns_mtx);
2143                                 /* We're done with scanning all luns. */
2144                         }
2145                 } else {
2146                         mtx_unlock(&target->luns_mtx);
2147                         device = request_ccb->ccb_h.path->device;
2148                         /* Continue sequential LUN scan if: */
2149                         /*  -- we have more LUNs that need recheck */
2150                         mtx_lock(&target->bus->eb_mtx);
2151                         nextdev = device;
2152                         while ((nextdev = TAILQ_NEXT(nextdev, links)) != NULL)
2153                                 if ((nextdev->flags & CAM_DEV_UNCONFIGURED) == 0)
2154                                         break;
2155                         mtx_unlock(&target->bus->eb_mtx);
2156                         if (nextdev != NULL) {
2157                                 next_target = 0;
2158                         /*  -- stop if CAM_QUIRK_NOLUNS is set. */
2159                         } else if (SCSI_QUIRK(device)->quirks & CAM_QUIRK_NOLUNS) {
2160                                 next_target = 1;
2161                         /*  -- this LUN is connected and its SCSI version
2162                          *     allows more LUNs. */
2163                         } else if ((device->flags & CAM_DEV_UNCONFIGURED) == 0) {
2164                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) ||
2165                                     CAN_SRCH_HI_DENSE(device))
2166                                         next_target = 0;
2167                         /*  -- this LUN is disconnected, its SCSI version
2168                          *     allows more LUNs and we guess they may be. */
2169                         } else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) {
2170                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) ||
2171                                     CAN_SRCH_HI_SPARSE(device))
2172                                         next_target = 0;
2173                         }
2174                         if (next_target == 0) {
2175                                 lun_id++;
2176                                 if (lun_id > scan_info->cpi->max_lun)
2177                                         next_target = 1;
2178                         }
2179                 }
2180
2181                 /*
2182                  * Check to see if we scan any further luns.
2183                  */
2184                 if (next_target) {
2185                         int done;
2186
2187                         /*
2188                          * Free the current request path- we're done with it.
2189                          */
2190                         xpt_free_path(oldpath);
2191  hop_again:
2192                         done = 0;
2193                         if (scan_info->request_ccb->ccb_h.func_code == XPT_SCAN_TGT) {
2194                                 done = 1;
2195                         } else if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
2196                                 scan_info->counter++;
2197                                 if (scan_info->counter ==
2198                                     scan_info->cpi->initiator_id) {
2199                                         scan_info->counter++;
2200                                 }
2201                                 if (scan_info->counter >=
2202                                     scan_info->cpi->max_target+1) {
2203                                         done = 1;
2204                                 }
2205                         } else {
2206                                 scan_info->counter--;
2207                                 if (scan_info->counter == 0) {
2208                                         done = 1;
2209                                 }
2210                         }
2211                         if (done) {
2212                                 mtx_unlock(mtx);
2213                                 xpt_free_ccb(request_ccb);
2214                                 xpt_free_ccb((union ccb *)scan_info->cpi);
2215                                 request_ccb = scan_info->request_ccb;
2216                                 CAM_DEBUG(request_ccb->ccb_h.path,
2217                                     CAM_DEBUG_TRACE,
2218                                    ("SCAN done for %p\n", scan_info));
2219                                 free(scan_info, M_CAMXPT);
2220                                 request_ccb->ccb_h.status = CAM_REQ_CMP;
2221                                 xpt_done(request_ccb);
2222                                 break;
2223                         }
2224
2225                         if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) {
2226                                 mtx_unlock(mtx);
2227                                 xpt_free_ccb(request_ccb);
2228                                 break;
2229                         }
2230                         status = xpt_create_path(&path, NULL,
2231                             scan_info->request_ccb->ccb_h.path_id,
2232                             scan_info->counter, 0);
2233                         if (status != CAM_REQ_CMP) {
2234                                 mtx_unlock(mtx);
2235                                 printf("scsi_scan_bus: xpt_create_path failed"
2236                                     " with status %#x, bus scan halted\n",
2237                                     status);
2238                                 xpt_free_ccb(request_ccb);
2239                                 xpt_free_ccb((union ccb *)scan_info->cpi);
2240                                 request_ccb = scan_info->request_ccb;
2241                                 free(scan_info, M_CAMXPT);
2242                                 request_ccb->ccb_h.status = status;
2243                                 xpt_done(request_ccb);
2244                                 break;
2245                         }
2246                         xpt_setup_ccb(&request_ccb->ccb_h, path,
2247                             request_ccb->ccb_h.pinfo.priority);
2248                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
2249                         request_ccb->ccb_h.cbfcnp = scsi_scan_bus;
2250                         request_ccb->ccb_h.flags |= CAM_UNLOCKED;
2251                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
2252                         request_ccb->crcn.flags =
2253                             scan_info->request_ccb->crcn.flags;
2254                 } else {
2255                         status = xpt_create_path(&path, NULL,
2256                                                  path_id, target_id, lun_id);
2257                         /*
2258                          * Free the old request path- we're done with it. We
2259                          * do this *after* creating the new path so that
2260                          * we don't remove a target that has our lun list
2261                          * in the case that lun 0 is not present.
2262                          */
2263                         xpt_free_path(oldpath);
2264                         if (status != CAM_REQ_CMP) {
2265                                 printf("scsi_scan_bus: xpt_create_path failed "
2266                                        "with status %#x, halting LUN scan\n",
2267                                        status);
2268                                 goto hop_again;
2269                         }
2270                         xpt_setup_ccb(&request_ccb->ccb_h, path,
2271                                       request_ccb->ccb_h.pinfo.priority);
2272                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
2273                         request_ccb->ccb_h.cbfcnp = scsi_scan_bus;
2274                         request_ccb->ccb_h.flags |= CAM_UNLOCKED;
2275                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
2276                         request_ccb->crcn.flags =
2277                                 scan_info->request_ccb->crcn.flags;
2278                 }
2279                 mtx_unlock(mtx);
2280                 xpt_action(request_ccb);
2281                 break;
2282         }
2283         default:
2284                 break;
2285         }
2286 }
2287
2288 static void
2289 scsi_scan_lun(struct cam_periph *periph, struct cam_path *path,
2290              cam_flags flags, union ccb *request_ccb)
2291 {
2292         struct ccb_pathinq cpi;
2293         cam_status status;
2294         struct cam_path *new_path;
2295         struct cam_periph *old_periph;
2296         int lock;
2297
2298         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("scsi_scan_lun\n"));
2299
2300         memset(&cpi, 0, sizeof(cpi));
2301         xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE);
2302         cpi.ccb_h.func_code = XPT_PATH_INQ;
2303         xpt_action((union ccb *)&cpi);
2304
2305         if (cpi.ccb_h.status != CAM_REQ_CMP) {
2306                 if (request_ccb != NULL) {
2307                         request_ccb->ccb_h.status = cpi.ccb_h.status;
2308                         xpt_done(request_ccb);
2309                 }
2310                 return;
2311         }
2312
2313         if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
2314                 /*
2315                  * Can't scan the bus on an adapter that
2316                  * cannot perform the initiator role.
2317                  */
2318                 if (request_ccb != NULL) {
2319                         request_ccb->ccb_h.status = CAM_REQ_CMP;
2320                         xpt_done(request_ccb);
2321                 }
2322                 return;
2323         }
2324
2325         if (request_ccb == NULL) {
2326                 request_ccb = xpt_alloc_ccb_nowait();
2327                 if (request_ccb == NULL) {
2328                         xpt_print(path, "scsi_scan_lun: can't allocate CCB, "
2329                             "can't continue\n");
2330                         return;
2331                 }
2332                 status = xpt_create_path(&new_path, NULL,
2333                                           path->bus->path_id,
2334                                           path->target->target_id,
2335                                           path->device->lun_id);
2336                 if (status != CAM_REQ_CMP) {
2337                         xpt_print(path, "scsi_scan_lun: can't create path, "
2338                             "can't continue\n");
2339                         xpt_free_ccb(request_ccb);
2340                         return;
2341                 }
2342                 xpt_setup_ccb(&request_ccb->ccb_h, new_path, CAM_PRIORITY_XPT);
2343                 request_ccb->ccb_h.cbfcnp = xptscandone;
2344                 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
2345                 request_ccb->ccb_h.flags |= CAM_UNLOCKED;
2346                 request_ccb->crcn.flags = flags;
2347         }
2348
2349         lock = (xpt_path_owned(path) == 0);
2350         if (lock)
2351                 xpt_path_lock(path);
2352         if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
2353                 if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) {
2354                         probe_softc *softc;
2355
2356                         softc = (probe_softc *)old_periph->softc;
2357                         TAILQ_INSERT_TAIL(&softc->request_ccbs,
2358                             &request_ccb->ccb_h, periph_links.tqe);
2359                 } else {
2360                         request_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2361                         xpt_done(request_ccb);
2362                 }
2363         } else {
2364                 status = cam_periph_alloc(proberegister, NULL, probecleanup,
2365                                           probestart, "probe",
2366                                           CAM_PERIPH_BIO,
2367                                           request_ccb->ccb_h.path, NULL, 0,
2368                                           request_ccb);
2369
2370                 if (status != CAM_REQ_CMP) {
2371                         xpt_print(path, "scsi_scan_lun: cam_alloc_periph "
2372                             "returned an error, can't continue probe\n");
2373                         request_ccb->ccb_h.status = status;
2374                         xpt_done(request_ccb);
2375                 }
2376         }
2377         if (lock)
2378                 xpt_path_unlock(path);
2379 }
2380
2381 static void
2382 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
2383 {
2384
2385         xpt_free_path(done_ccb->ccb_h.path);
2386         xpt_free_ccb(done_ccb);
2387 }
2388
2389 static struct cam_ed *
2390 scsi_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
2391 {
2392         struct scsi_quirk_entry *quirk;
2393         struct cam_ed *device;
2394
2395         device = xpt_alloc_device(bus, target, lun_id);
2396         if (device == NULL)
2397                 return (NULL);
2398
2399         /*
2400          * Take the default quirk entry until we have inquiry
2401          * data and can determine a better quirk to use.
2402          */
2403         quirk = &scsi_quirk_table[nitems(scsi_quirk_table) - 1];
2404         device->quirk = (void *)quirk;
2405         device->mintags = quirk->mintags;
2406         device->maxtags = quirk->maxtags;
2407         bzero(&device->inq_data, sizeof(device->inq_data));
2408         device->inq_flags = 0;
2409         device->queue_flags = 0;
2410         device->serial_num = NULL;
2411         device->serial_num_len = 0;
2412         device->device_id = NULL;
2413         device->device_id_len = 0;
2414         device->supported_vpds = NULL;
2415         device->supported_vpds_len = 0;
2416         return (device);
2417 }
2418
2419 static void
2420 scsi_devise_transport(struct cam_path *path)
2421 {
2422         struct ccb_pathinq cpi;
2423         struct ccb_trans_settings cts;
2424         struct scsi_inquiry_data *inq_buf;
2425
2426         /* Get transport information from the SIM */
2427         memset(&cpi, 0, sizeof(cpi));
2428         xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE);
2429         cpi.ccb_h.func_code = XPT_PATH_INQ;
2430         xpt_action((union ccb *)&cpi);
2431
2432         inq_buf = NULL;
2433         if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
2434                 inq_buf = &path->device->inq_data;
2435         path->device->protocol = PROTO_SCSI;
2436         path->device->protocol_version =
2437             inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
2438         path->device->transport = cpi.transport;
2439         path->device->transport_version = cpi.transport_version;
2440
2441         /*
2442          * Any device not using SPI3 features should
2443          * be considered SPI2 or lower.
2444          */
2445         if (inq_buf != NULL) {
2446                 if (path->device->transport == XPORT_SPI
2447                  && (inq_buf->spi3data & SID_SPI_MASK) == 0
2448                  && path->device->transport_version > 2)
2449                         path->device->transport_version = 2;
2450         } else {
2451                 struct cam_ed* otherdev;
2452
2453                 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
2454                      otherdev != NULL;
2455                      otherdev = TAILQ_NEXT(otherdev, links)) {
2456                         if (otherdev != path->device)
2457                                 break;
2458                 }
2459
2460                 if (otherdev != NULL) {
2461                         /*
2462                          * Initially assume the same versioning as
2463                          * prior luns for this target.
2464                          */
2465                         path->device->protocol_version =
2466                             otherdev->protocol_version;
2467                         path->device->transport_version =
2468                             otherdev->transport_version;
2469                 } else {
2470                         /* Until we know better, opt for safety */
2471                         path->device->protocol_version = 2;
2472                         if (path->device->transport == XPORT_SPI)
2473                                 path->device->transport_version = 2;
2474                         else
2475                                 path->device->transport_version = 0;
2476                 }
2477         }
2478
2479         /*
2480          * XXX
2481          * For a device compliant with SPC-2 we should be able
2482          * to determine the transport version supported by
2483          * scrutinizing the version descriptors in the
2484          * inquiry buffer.
2485          */
2486
2487         /* Tell the controller what we think */
2488         memset(&cts, 0, sizeof(cts));
2489         xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE);
2490         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
2491         cts.type = CTS_TYPE_CURRENT_SETTINGS;
2492         cts.transport = path->device->transport;
2493         cts.transport_version = path->device->transport_version;
2494         cts.protocol = path->device->protocol;
2495         cts.protocol_version = path->device->protocol_version;
2496         cts.proto_specific.valid = 0;
2497         cts.xport_specific.valid = 0;
2498         xpt_action((union ccb *)&cts);
2499 }
2500
2501 static void
2502 scsi_dev_advinfo(union ccb *start_ccb)
2503 {
2504         struct cam_ed *device;
2505         struct ccb_dev_advinfo *cdai;
2506         off_t amt;
2507
2508         xpt_path_assert(start_ccb->ccb_h.path, MA_OWNED);
2509         start_ccb->ccb_h.status = CAM_REQ_INVALID;
2510         device = start_ccb->ccb_h.path->device;
2511         cdai = &start_ccb->cdai;
2512         switch(cdai->buftype) {
2513         case CDAI_TYPE_SCSI_DEVID:
2514                 if (cdai->flags & CDAI_FLAG_STORE)
2515                         return;
2516                 cdai->provsiz = device->device_id_len;
2517                 if (device->device_id_len == 0)
2518                         break;
2519                 amt = device->device_id_len;
2520                 if (cdai->provsiz > cdai->bufsiz)
2521                         amt = cdai->bufsiz;
2522                 memcpy(cdai->buf, device->device_id, amt);
2523                 break;
2524         case CDAI_TYPE_SERIAL_NUM:
2525                 if (cdai->flags & CDAI_FLAG_STORE)
2526                         return;
2527                 cdai->provsiz = device->serial_num_len;
2528                 if (device->serial_num_len == 0)
2529                         break;
2530                 amt = device->serial_num_len;
2531                 if (cdai->provsiz > cdai->bufsiz)
2532                         amt = cdai->bufsiz;
2533                 memcpy(cdai->buf, device->serial_num, amt);
2534                 break;
2535         case CDAI_TYPE_PHYS_PATH:
2536                 if (cdai->flags & CDAI_FLAG_STORE) {
2537                         if (device->physpath != NULL) {
2538                                 free(device->physpath, M_CAMXPT);
2539                                 device->physpath = NULL;
2540                                 device->physpath_len = 0;
2541                         }
2542                         /* Clear existing buffer if zero length */
2543                         if (cdai->bufsiz == 0)
2544                                 break;
2545                         device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT);
2546                         if (device->physpath == NULL) {
2547                                 start_ccb->ccb_h.status = CAM_REQ_ABORTED;
2548                                 return;
2549                         }
2550                         device->physpath_len = cdai->bufsiz;
2551                         memcpy(device->physpath, cdai->buf, cdai->bufsiz);
2552                 } else {
2553                         cdai->provsiz = device->physpath_len;
2554                         if (device->physpath_len == 0)
2555                                 break;
2556                         amt = device->physpath_len;
2557                         if (cdai->provsiz > cdai->bufsiz)
2558                                 amt = cdai->bufsiz;
2559                         memcpy(cdai->buf, device->physpath, amt);
2560                 }
2561                 break;
2562         case CDAI_TYPE_RCAPLONG:
2563                 if (cdai->flags & CDAI_FLAG_STORE) {
2564                         if (device->rcap_buf != NULL) {
2565                                 free(device->rcap_buf, M_CAMXPT);
2566                                 device->rcap_buf = NULL;
2567                         }
2568
2569                         device->rcap_len = cdai->bufsiz;
2570                         /* Clear existing buffer if zero length */
2571                         if (cdai->bufsiz == 0)
2572                                 break;
2573
2574                         device->rcap_buf = malloc(cdai->bufsiz, M_CAMXPT,
2575                                                   M_NOWAIT);
2576                         if (device->rcap_buf == NULL) {
2577                                 start_ccb->ccb_h.status = CAM_REQ_ABORTED;
2578                                 return;
2579                         }
2580
2581                         memcpy(device->rcap_buf, cdai->buf, cdai->bufsiz);
2582                 } else {
2583                         cdai->provsiz = device->rcap_len;
2584                         if (device->rcap_len == 0)
2585                                 break;
2586                         amt = device->rcap_len;
2587                         if (cdai->provsiz > cdai->bufsiz)
2588                                 amt = cdai->bufsiz;
2589                         memcpy(cdai->buf, device->rcap_buf, amt);
2590                 }
2591                 break;
2592         case CDAI_TYPE_EXT_INQ:
2593                 /*
2594                  * We fetch extended inquiry data during probe, if
2595                  * available.  We don't allow changing it.
2596                  */
2597                 if (cdai->flags & CDAI_FLAG_STORE)
2598                         return;
2599                 cdai->provsiz = device->ext_inq_len;
2600                 if (device->ext_inq_len == 0)
2601                         break;
2602                 amt = device->ext_inq_len;
2603                 if (cdai->provsiz > cdai->bufsiz)
2604                         amt = cdai->bufsiz;
2605                 memcpy(cdai->buf, device->ext_inq, amt);
2606                 break;
2607         default:
2608                 return;
2609         }
2610         start_ccb->ccb_h.status = CAM_REQ_CMP;
2611
2612         if (cdai->flags & CDAI_FLAG_STORE) {
2613                 xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path,
2614                           (void *)(uintptr_t)cdai->buftype);
2615         }
2616 }
2617
2618 static void
2619 scsi_action(union ccb *start_ccb)
2620 {
2621
2622         if (start_ccb->ccb_h.func_code != XPT_SCSI_IO) {
2623                 KASSERT((start_ccb->ccb_h.alloc_flags & CAM_CCB_FROM_UMA) == 0,
2624                     ("%s: ccb %p, func_code %#x should not be allocated "
2625                     "from UMA zone\n",
2626                     __func__, start_ccb, start_ccb->ccb_h.func_code));
2627         }
2628
2629         switch (start_ccb->ccb_h.func_code) {
2630         case XPT_SET_TRAN_SETTINGS:
2631         {
2632                 scsi_set_transfer_settings(&start_ccb->cts,
2633                                            start_ccb->ccb_h.path,
2634                                            /*async_update*/FALSE);
2635                 break;
2636         }
2637         case XPT_SCAN_BUS:
2638         case XPT_SCAN_TGT:
2639                 scsi_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
2640                 break;
2641         case XPT_SCAN_LUN:
2642                 scsi_scan_lun(start_ccb->ccb_h.path->periph,
2643                               start_ccb->ccb_h.path, start_ccb->crcn.flags,
2644                               start_ccb);
2645                 break;
2646         case XPT_DEV_ADVINFO:
2647         {
2648                 scsi_dev_advinfo(start_ccb);
2649                 break;
2650         }
2651         default:
2652                 xpt_action_default(start_ccb);
2653                 break;
2654         }
2655 }
2656
2657 static void
2658 scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_path *path,
2659                            int async_update)
2660 {
2661         struct  ccb_pathinq cpi;
2662         struct  ccb_trans_settings cur_cts;
2663         struct  ccb_trans_settings_scsi *scsi;
2664         struct  ccb_trans_settings_scsi *cur_scsi;
2665         struct  scsi_inquiry_data *inq_data;
2666         struct  cam_ed *device;
2667
2668         if (path == NULL || (device = path->device) == NULL) {
2669                 cts->ccb_h.status = CAM_PATH_INVALID;
2670                 xpt_done((union ccb *)cts);
2671                 return;
2672         }
2673
2674         if (cts->protocol == PROTO_UNKNOWN
2675          || cts->protocol == PROTO_UNSPECIFIED) {
2676                 cts->protocol = device->protocol;
2677                 cts->protocol_version = device->protocol_version;
2678         }
2679
2680         if (cts->protocol_version == PROTO_VERSION_UNKNOWN
2681          || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
2682                 cts->protocol_version = device->protocol_version;
2683
2684         if (cts->protocol != device->protocol) {
2685                 xpt_print(path, "Uninitialized Protocol %x:%x?\n",
2686                        cts->protocol, device->protocol);
2687                 cts->protocol = device->protocol;
2688         }
2689
2690         if (cts->protocol_version > device->protocol_version) {
2691                 if (bootverbose) {
2692                         xpt_print(path, "Down reving Protocol "
2693                             "Version from %d to %d?\n", cts->protocol_version,
2694                             device->protocol_version);
2695                 }
2696                 cts->protocol_version = device->protocol_version;
2697         }
2698
2699         if (cts->transport == XPORT_UNKNOWN
2700          || cts->transport == XPORT_UNSPECIFIED) {
2701                 cts->transport = device->transport;
2702                 cts->transport_version = device->transport_version;
2703         }
2704
2705         if (cts->transport_version == XPORT_VERSION_UNKNOWN
2706          || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
2707                 cts->transport_version = device->transport_version;
2708
2709         if (cts->transport != device->transport) {
2710                 xpt_print(path, "Uninitialized Transport %x:%x?\n",
2711                     cts->transport, device->transport);
2712                 cts->transport = device->transport;
2713         }
2714
2715         if (cts->transport_version > device->transport_version) {
2716                 if (bootverbose) {
2717                         xpt_print(path, "Down reving Transport "
2718                             "Version from %d to %d?\n", cts->transport_version,
2719                             device->transport_version);
2720                 }
2721                 cts->transport_version = device->transport_version;
2722         }
2723
2724         /*
2725          * Nothing more of interest to do unless
2726          * this is a device connected via the
2727          * SCSI protocol.
2728          */
2729         if (cts->protocol != PROTO_SCSI) {
2730                 if (async_update == FALSE)
2731                         xpt_action_default((union ccb *)cts);
2732                 return;
2733         }
2734
2735         inq_data = &device->inq_data;
2736         scsi = &cts->proto_specific.scsi;
2737         memset(&cpi, 0, sizeof(cpi));
2738         xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE);
2739         cpi.ccb_h.func_code = XPT_PATH_INQ;
2740         xpt_action((union ccb *)&cpi);
2741
2742         /* SCSI specific sanity checking */
2743         if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
2744          || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
2745          || (device->queue_flags & SCP_QUEUE_DQUE) != 0
2746          || (device->mintags == 0)) {
2747                 /*
2748                  * Can't tag on hardware that doesn't support tags,
2749                  * doesn't have it enabled, or has broken tag support.
2750                  */
2751                 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2752         }
2753
2754         if (async_update == FALSE) {
2755                 /*
2756                  * Perform sanity checking against what the
2757                  * controller and device can do.
2758                  */
2759                 memset(&cur_cts, 0, sizeof(cur_cts));
2760                 xpt_setup_ccb(&cur_cts.ccb_h, path, CAM_PRIORITY_NONE);
2761                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
2762                 cur_cts.type = cts->type;
2763                 xpt_action((union ccb *)&cur_cts);
2764                 if (cam_ccb_status((union ccb *)&cur_cts) != CAM_REQ_CMP) {
2765                         return;
2766                 }
2767                 cur_scsi = &cur_cts.proto_specific.scsi;
2768                 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
2769                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2770                         scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
2771                 }
2772                 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
2773                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2774         }
2775
2776         /* SPI specific sanity checking */
2777         if (cts->transport == XPORT_SPI && async_update == FALSE) {
2778                 u_int spi3caps;
2779                 struct ccb_trans_settings_spi *spi;
2780                 struct ccb_trans_settings_spi *cur_spi;
2781
2782                 spi = &cts->xport_specific.spi;
2783
2784                 cur_spi = &cur_cts.xport_specific.spi;
2785
2786                 /* Fill in any gaps in what the user gave us */
2787                 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
2788                         spi->sync_period = cur_spi->sync_period;
2789                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
2790                         spi->sync_period = 0;
2791                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
2792                         spi->sync_offset = cur_spi->sync_offset;
2793                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
2794                         spi->sync_offset = 0;
2795                 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
2796                         spi->ppr_options = cur_spi->ppr_options;
2797                 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
2798                         spi->ppr_options = 0;
2799                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
2800                         spi->bus_width = cur_spi->bus_width;
2801                 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
2802                         spi->bus_width = 0;
2803                 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
2804                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2805                         spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
2806                 }
2807                 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
2808                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2809                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
2810                   && (inq_data->flags & SID_Sync) == 0
2811                   && cts->type == CTS_TYPE_CURRENT_SETTINGS)
2812                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)) {
2813                         /* Force async */
2814                         spi->sync_period = 0;
2815                         spi->sync_offset = 0;
2816                 }
2817
2818                 switch (spi->bus_width) {
2819                 case MSG_EXT_WDTR_BUS_32_BIT:
2820                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
2821                           || (inq_data->flags & SID_WBus32) != 0
2822                           || cts->type == CTS_TYPE_USER_SETTINGS)
2823                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
2824                                 break;
2825                         /* Fall Through to 16-bit */
2826                 case MSG_EXT_WDTR_BUS_16_BIT:
2827                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
2828                           || (inq_data->flags & SID_WBus16) != 0
2829                           || cts->type == CTS_TYPE_USER_SETTINGS)
2830                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
2831                                 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2832                                 break;
2833                         }
2834                         /* Fall Through to 8-bit */
2835                 default: /* New bus width?? */
2836                 case MSG_EXT_WDTR_BUS_8_BIT:
2837                         /* All targets can do this */
2838                         spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2839                         break;
2840                 }
2841
2842                 spi3caps = cpi.xport_specific.spi.ppr_options;
2843                 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
2844                  && cts->type == CTS_TYPE_CURRENT_SETTINGS)
2845                         spi3caps &= inq_data->spi3data;
2846
2847                 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
2848                         spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
2849
2850                 if ((spi3caps & SID_SPI_IUS) == 0)
2851                         spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
2852
2853                 if ((spi3caps & SID_SPI_QAS) == 0)
2854                         spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
2855
2856                 /* No SPI Transfer settings are allowed unless we are wide */
2857                 if (spi->bus_width == 0)
2858                         spi->ppr_options = 0;
2859
2860                 if ((spi->valid & CTS_SPI_VALID_DISC)
2861                  && ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0)) {
2862                         /*
2863                          * Can't tag queue without disconnection.
2864                          */
2865                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2866                         scsi->valid |= CTS_SCSI_VALID_TQ;
2867                 }
2868
2869                 /*
2870                  * If we are currently performing tagged transactions to
2871                  * this device and want to change its negotiation parameters,
2872                  * go non-tagged for a bit to give the controller a chance to
2873                  * negotiate unhampered by tag messages.
2874                  */
2875                 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
2876                  && (device->inq_flags & SID_CmdQue) != 0
2877                  && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
2878                  && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
2879                                    CTS_SPI_VALID_SYNC_OFFSET|
2880                                    CTS_SPI_VALID_BUS_WIDTH)) != 0)
2881                         scsi_toggle_tags(path);
2882         }
2883
2884         if (cts->type == CTS_TYPE_CURRENT_SETTINGS
2885          && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2886                 int device_tagenb;
2887
2888                 /*
2889                  * If we are transitioning from tags to no-tags or
2890                  * vice-versa, we need to carefully freeze and restart
2891                  * the queue so that we don't overlap tagged and non-tagged
2892                  * commands.  We also temporarily stop tags if there is
2893                  * a change in transfer negotiation settings to allow
2894                  * "tag-less" negotiation.
2895                  */
2896                 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
2897                  || (device->inq_flags & SID_CmdQue) != 0)
2898                         device_tagenb = TRUE;
2899                 else
2900                         device_tagenb = FALSE;
2901
2902                 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
2903                   && device_tagenb == FALSE)
2904                  || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
2905                   && device_tagenb == TRUE)) {
2906                         if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
2907                                 /*
2908                                  * Delay change to use tags until after a
2909                                  * few commands have gone to this device so
2910                                  * the controller has time to perform transfer
2911                                  * negotiations without tagged messages getting
2912                                  * in the way.
2913                                  */
2914                                 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
2915                                 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
2916                         } else {
2917                                 xpt_stop_tags(path);
2918                         }
2919                 }
2920         }
2921         if (async_update == FALSE)
2922                 xpt_action_default((union ccb *)cts);
2923 }
2924
2925 static void
2926 scsi_toggle_tags(struct cam_path *path)
2927 {
2928         struct cam_ed *dev;
2929
2930         /*
2931          * Give controllers a chance to renegotiate
2932          * before starting tag operations.  We
2933          * "toggle" tagged queuing off then on
2934          * which causes the tag enable command delay
2935          * counter to come into effect.
2936          */
2937         dev = path->device;
2938         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
2939          || ((dev->inq_flags & SID_CmdQue) != 0
2940           && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
2941                 struct ccb_trans_settings cts;
2942
2943                 memset(&cts, 0, sizeof(cts));
2944                 xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE);
2945                 cts.protocol = PROTO_SCSI;
2946                 cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
2947                 cts.transport = XPORT_UNSPECIFIED;
2948                 cts.transport_version = XPORT_VERSION_UNSPECIFIED;
2949                 cts.proto_specific.scsi.flags = 0;
2950                 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
2951                 scsi_set_transfer_settings(&cts, path,
2952                                           /*async_update*/TRUE);
2953                 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
2954                 scsi_set_transfer_settings(&cts, path,
2955                                           /*async_update*/TRUE);
2956         }
2957 }
2958
2959 /*
2960  * Handle any per-device event notifications that require action by the XPT.
2961  */
2962 static void
2963 scsi_dev_async(uint32_t async_code, struct cam_eb *bus, struct cam_et *target,
2964               struct cam_ed *device, void *async_arg)
2965 {
2966         cam_status status;
2967         struct cam_path newpath;
2968
2969         /*
2970          * We only need to handle events for real devices.
2971          */
2972         if (target->target_id == CAM_TARGET_WILDCARD
2973          || device->lun_id == CAM_LUN_WILDCARD)
2974                 return;
2975
2976         /*
2977          * We need our own path with wildcards expanded to
2978          * handle certain types of events.
2979          */
2980         if ((async_code == AC_SENT_BDR)
2981          || (async_code == AC_BUS_RESET)
2982          || (async_code == AC_INQ_CHANGED))
2983                 status = xpt_compile_path(&newpath, NULL,
2984                                           bus->path_id,
2985                                           target->target_id,
2986                                           device->lun_id);
2987         else
2988                 status = CAM_REQ_CMP_ERR;
2989
2990         if (status == CAM_REQ_CMP) {
2991                 /*
2992                  * Allow transfer negotiation to occur in a
2993                  * tag free environment and after settle delay.
2994                  */
2995                 if (async_code == AC_SENT_BDR
2996                  || async_code == AC_BUS_RESET) {
2997                         cam_freeze_devq(&newpath);
2998                         cam_release_devq(&newpath,
2999                                 RELSIM_RELEASE_AFTER_TIMEOUT,
3000                                 /*reduction*/0,
3001                                 /*timeout*/scsi_delay,
3002                                 /*getcount_only*/0);
3003                         scsi_toggle_tags(&newpath);
3004                 }
3005
3006                 if (async_code == AC_INQ_CHANGED) {
3007                         /*
3008                          * We've sent a start unit command, or
3009                          * something similar to a device that
3010                          * may have caused its inquiry data to
3011                          * change. So we re-scan the device to
3012                          * refresh the inquiry data for it.
3013                          */
3014                         scsi_scan_lun(newpath.periph, &newpath,
3015                                      CAM_EXPECT_INQ_CHANGE, NULL);
3016                 }
3017                 xpt_release_path(&newpath);
3018         } else if (async_code == AC_LOST_DEVICE &&
3019             (device->flags & CAM_DEV_UNCONFIGURED) == 0) {
3020                 device->flags |= CAM_DEV_UNCONFIGURED;
3021                 xpt_release_device(device);
3022         } else if (async_code == AC_TRANSFER_NEG) {
3023                 struct ccb_trans_settings *settings;
3024                 struct cam_path path;
3025
3026                 settings = (struct ccb_trans_settings *)async_arg;
3027                 xpt_compile_path(&path, NULL, bus->path_id, target->target_id,
3028                                  device->lun_id);
3029                 scsi_set_transfer_settings(settings, &path,
3030                                           /*async_update*/TRUE);
3031                 xpt_release_path(&path);
3032         }
3033 }
3034
3035 static void
3036 _scsi_announce_periph(struct cam_periph *periph, u_int *speed, u_int *freq, struct ccb_trans_settings *cts)
3037 {
3038         struct  ccb_pathinq cpi;
3039         struct  cam_path *path = periph->path;
3040
3041         cam_periph_assert(periph, MA_OWNED);
3042
3043         xpt_setup_ccb(&cts->ccb_h, path, CAM_PRIORITY_NORMAL);
3044         cts->ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
3045         cts->type = CTS_TYPE_CURRENT_SETTINGS;
3046         xpt_action((union ccb*)cts);
3047         if (cam_ccb_status((union ccb *)cts) != CAM_REQ_CMP)
3048                 return;
3049
3050         /* Ask the SIM for its base transfer speed */
3051         memset(&cpi, 0, sizeof(cpi));
3052         xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
3053         cpi.ccb_h.func_code = XPT_PATH_INQ;
3054         xpt_action((union ccb *)&cpi);
3055
3056         /* Report connection speed */
3057         *speed = cpi.base_transfer_speed;
3058         *freq = 0;
3059
3060         if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_SPI) {
3061                 struct  ccb_trans_settings_spi *spi =
3062                     &cts->xport_specific.spi;
3063
3064                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
3065                   && spi->sync_offset != 0) {
3066                         *freq = scsi_calc_syncsrate(spi->sync_period);
3067                         *speed = *freq;
3068                 }
3069                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
3070                         *speed *= (0x01 << spi->bus_width);
3071         }
3072         if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_FC) {
3073                 struct  ccb_trans_settings_fc *fc =
3074                     &cts->xport_specific.fc;
3075
3076                 if (fc->valid & CTS_FC_VALID_SPEED)
3077                         *speed = fc->bitrate;
3078         }
3079         if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_SAS) {
3080                 struct  ccb_trans_settings_sas *sas =
3081                     &cts->xport_specific.sas;
3082
3083                 if (sas->valid & CTS_SAS_VALID_SPEED)
3084                         *speed = sas->bitrate;
3085         }
3086 }
3087
3088 static void
3089 scsi_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb)
3090 {
3091         struct  ccb_trans_settings cts;
3092         u_int speed, freq, mb;
3093
3094         memset(&cts, 0, sizeof(cts));
3095         _scsi_announce_periph(periph, &speed, &freq, &cts);
3096         if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP)
3097                 return;
3098
3099         mb = speed / 1000;
3100         if (mb > 0)
3101                 sbuf_printf(sb, "%s%d: %d.%03dMB/s transfers",
3102                        periph->periph_name, periph->unit_number,
3103                        mb, speed % 1000);
3104         else
3105                 sbuf_printf(sb, "%s%d: %dKB/s transfers", periph->periph_name,
3106                        periph->unit_number, speed);
3107         /* Report additional information about SPI connections */
3108         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
3109                 struct  ccb_trans_settings_spi *spi;
3110
3111                 spi = &cts.xport_specific.spi;
3112                 if (freq != 0) {
3113                         sbuf_printf(sb, " (%d.%03dMHz%s, offset %d", freq / 1000,
3114                                freq % 1000,
3115                                (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
3116                              ? " DT" : "",
3117                                spi->sync_offset);
3118                 }
3119                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
3120                  && spi->bus_width > 0) {
3121                         if (freq != 0) {
3122                                 sbuf_printf(sb, ", ");
3123                         } else {
3124                                 sbuf_printf(sb, " (");
3125                         }
3126                         sbuf_printf(sb, "%dbit)", 8 * (0x01 << spi->bus_width));
3127                 } else if (freq != 0) {
3128                         sbuf_printf(sb, ")");
3129                 }
3130         }
3131         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
3132                 struct  ccb_trans_settings_fc *fc;
3133
3134                 fc = &cts.xport_specific.fc;
3135                 if (fc->valid & CTS_FC_VALID_WWNN)
3136                         sbuf_printf(sb, " WWNN 0x%llx", (long long) fc->wwnn);
3137                 if (fc->valid & CTS_FC_VALID_WWPN)
3138                         sbuf_printf(sb, " WWPN 0x%llx", (long long) fc->wwpn);
3139                 if (fc->valid & CTS_FC_VALID_PORT)
3140                         sbuf_printf(sb, " PortID 0x%x", fc->port);
3141         }
3142         sbuf_printf(sb, "\n");
3143 }
3144
3145 static void
3146 scsi_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb)
3147 {
3148         scsi_print_inquiry_sbuf(sb, &device->inq_data);
3149 }
3150
3151 static void
3152 scsi_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb)
3153 {
3154         scsi_print_inquiry_short_sbuf(sb, &device->inq_data);
3155 }
3156
3157 static void
3158 scsi_proto_debug_out(union ccb *ccb)
3159 {
3160         char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
3161         struct cam_ed *device;
3162
3163         if (ccb->ccb_h.func_code != XPT_SCSI_IO)
3164                 return;
3165
3166         device = ccb->ccb_h.path->device;
3167         CAM_DEBUG(ccb->ccb_h.path,
3168             CAM_DEBUG_CDB,("%s. CDB: %s\n",
3169                 scsi_op_desc(scsiio_cdb_ptr(&ccb->csio)[0], &device->inq_data),
3170                 scsi_cdb_string(scsiio_cdb_ptr(&ccb->csio), cdb_str, sizeof(cdb_str))));
3171 }