]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/cam/scsi/scsi_xpt.c
Merge commit '850ef5ae11d69ea3381bd310f564f025fc8caea3'
[FreeBSD/FreeBSD.git] / sys / cam / scsi / scsi_xpt.c
1 /*-
2  * Implementation of the SCSI Transport
3  *
4  * SPDX-License-Identifier: BSD-2-Clause
5  *
6  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
7  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions, and the following disclaimer,
15  *    without modification, immediately at the beginning of the file.
16  * 2. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
38 #include <sys/time.h>
39 #include <sys/conf.h>
40 #include <sys/fcntl.h>
41 #include <sys/md5.h>
42 #include <sys/sbuf.h>
43
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/sysctl.h>
47
48 #include <cam/cam.h>
49 #include <cam/cam_ccb.h>
50 #include <cam/cam_queue.h>
51 #include <cam/cam_periph.h>
52 #include <cam/cam_sim.h>
53 #include <cam/cam_xpt.h>
54 #include <cam/cam_xpt_sim.h>
55 #include <cam/cam_xpt_periph.h>
56 #include <cam/cam_xpt_internal.h>
57 #include <cam/cam_debug.h>
58
59 #include <cam/scsi/scsi_all.h>
60 #include <cam/scsi/scsi_message.h>
61 #include <cam/scsi/scsi_pass.h>
62 #include <machine/stdarg.h>     /* for xpt_print below */
63
64 struct scsi_quirk_entry {
65         struct scsi_inquiry_pattern inq_pat;
66         uint8_t quirks;
67 #define CAM_QUIRK_NOLUNS        0x01
68 #define CAM_QUIRK_NOVPDS        0x02
69 #define CAM_QUIRK_HILUNS        0x04
70 #define CAM_QUIRK_NOHILUNS      0x08
71 #define CAM_QUIRK_NORPTLUNS     0x10
72         u_int mintags;
73         u_int maxtags;
74 };
75 #define SCSI_QUIRK(dev) ((struct scsi_quirk_entry *)((dev)->quirk))
76
77 static int cam_srch_hi = 0;
78 SYSCTL_INT(_kern_cam, OID_AUTO, cam_srch_hi, CTLFLAG_RWTUN,
79     &cam_srch_hi, 0, "Search above LUN 7 for SCSI3 and greater devices");
80
81 #define CAM_SCSI2_MAXLUN        8
82 #define CAM_CAN_GET_SIMPLE_LUN(x, i)                            \
83         ((((x)->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) ==  \
84         RPL_LUNDATA_ATYP_PERIPH) ||                             \
85         (((x)->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) ==   \
86         RPL_LUNDATA_ATYP_FLAT))
87 #define CAM_GET_SIMPLE_LUN(lp, i, lval)                                 \
88         if (((lp)->luns[(i)].lundata[0] & RPL_LUNDATA_ATYP_MASK) ==     \
89             RPL_LUNDATA_ATYP_PERIPH) {                                  \
90                 (lval) = (lp)->luns[(i)].lundata[1];                    \
91         } else {                                                        \
92                 (lval) = (lp)->luns[(i)].lundata[0];                    \
93                 (lval) &= RPL_LUNDATA_FLAT_LUN_MASK;                    \
94                 (lval) <<= 8;                                           \
95                 (lval) |=  (lp)->luns[(i)].lundata[1];                  \
96         }
97 #define CAM_GET_LUN(lp, i, lval)                                        \
98         (lval) = scsi_8btou64((lp)->luns[(i)].lundata);                 \
99         (lval) = CAM_EXTLUN_BYTE_SWIZZLE(lval);
100
101 /*
102  * If we're not quirked to search <= the first 8 luns
103  * and we are either quirked to search above lun 8,
104  * or we're > SCSI-2 and we've enabled hilun searching,
105  * or we're > SCSI-2 and the last lun was a success,
106  * we can look for luns above lun 8.
107  */
108 #define CAN_SRCH_HI_SPARSE(dv)                                  \
109   (((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_NOHILUNS) == 0)         \
110   && ((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_HILUNS)               \
111   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
112
113 #define CAN_SRCH_HI_DENSE(dv)                                   \
114   (((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_NOHILUNS) == 0)         \
115   && ((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_HILUNS)               \
116   || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
117
118 static periph_init_t probe_periph_init;
119
120 static struct periph_driver probe_driver =
121 {
122         probe_periph_init, "probe",
123         TAILQ_HEAD_INITIALIZER(probe_driver.units), /* generation */ 0,
124         CAM_PERIPH_DRV_EARLY
125 };
126
127 PERIPHDRIVER_DECLARE(probe, probe_driver);
128
129 typedef enum {
130         PROBE_TUR,
131         PROBE_INQUIRY,  /* this counts as DV0 for Basic Domain Validation */
132         PROBE_FULL_INQUIRY,
133         PROBE_REPORT_LUNS,
134         PROBE_MODE_SENSE,
135         PROBE_SUPPORTED_VPD_LIST,
136         PROBE_DEVICE_ID,
137         PROBE_EXTENDED_INQUIRY,
138         PROBE_SERIAL_NUM,
139         PROBE_TUR_FOR_NEGOTIATION,
140         PROBE_INQUIRY_BASIC_DV1,
141         PROBE_INQUIRY_BASIC_DV2,
142         PROBE_DV_EXIT,
143         PROBE_DONE,
144         PROBE_INVALID
145 } probe_action;
146
147 static char *probe_action_text[] = {
148         "PROBE_TUR",
149         "PROBE_INQUIRY",
150         "PROBE_FULL_INQUIRY",
151         "PROBE_REPORT_LUNS",
152         "PROBE_MODE_SENSE",
153         "PROBE_SUPPORTED_VPD_LIST",
154         "PROBE_DEVICE_ID",
155         "PROBE_EXTENDED_INQUIRY",
156         "PROBE_SERIAL_NUM",
157         "PROBE_TUR_FOR_NEGOTIATION",
158         "PROBE_INQUIRY_BASIC_DV1",
159         "PROBE_INQUIRY_BASIC_DV2",
160         "PROBE_DV_EXIT",
161         "PROBE_DONE",
162         "PROBE_INVALID"
163 };
164
165 #define PROBE_SET_ACTION(softc, newaction)      \
166 do {                                                                    \
167         char **text;                                                    \
168         text = probe_action_text;                                       \
169         CAM_DEBUG((softc)->periph->path, CAM_DEBUG_PROBE,               \
170             ("Probe %s to %s\n", text[(softc)->action],                 \
171             text[(newaction)]));                                        \
172         (softc)->action = (newaction);                                  \
173 } while(0)
174
175 typedef enum {
176         PROBE_INQUIRY_CKSUM     = 0x01,
177         PROBE_NO_ANNOUNCE       = 0x04,
178         PROBE_EXTLUN            = 0x08
179 } probe_flags;
180
181 typedef struct {
182         TAILQ_HEAD(, ccb_hdr) request_ccbs;
183         probe_action    action;
184         probe_flags     flags;
185         MD5_CTX         context;
186         uint8_t digest[16];
187         struct cam_periph *periph;
188 } probe_softc;
189
190 static const char quantum[] = "QUANTUM";
191 static const char sony[] = "SONY";
192 static const char west_digital[] = "WDIGTL";
193 static const char samsung[] = "SAMSUNG";
194 static const char seagate[] = "SEAGATE";
195 static const char microp[] = "MICROP";
196
197 static struct scsi_quirk_entry scsi_quirk_table[] =
198 {
199         {
200                 /* Reports QUEUE FULL for temporary resource shortages */
201                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
202                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
203         },
204         {
205                 /* Reports QUEUE FULL for temporary resource shortages */
206                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
207                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
208         },
209         {
210                 /* Reports QUEUE FULL for temporary resource shortages */
211                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
212                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
213         },
214         {
215                 /* Broken tagged queuing drive */
216                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
217                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
218         },
219         {
220                 /* Broken tagged queuing drive */
221                 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
222                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
223         },
224         {
225                 /* Broken tagged queuing drive */
226                 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
227                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
228         },
229         {
230                 /*
231                  * Unfortunately, the Quantum Atlas III has the same
232                  * problem as the Atlas II drives above.
233                  * Reported by: "Johan Granlund" <johan@granlund.nu>
234                  *
235                  * For future reference, the drive with the problem was:
236                  * QUANTUM QM39100TD-SW N1B0
237                  *
238                  * It's possible that Quantum will fix the problem in later
239                  * firmware revisions.  If that happens, the quirk entry
240                  * will need to be made specific to the firmware revisions
241                  * with the problem.
242                  *
243                  */
244                 /* Reports QUEUE FULL for temporary resource shortages */
245                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
246                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
247         },
248         {
249                 /*
250                  * 18 Gig Atlas III, same problem as the 9G version.
251                  * Reported by: Andre Albsmeier
252                  *              <andre.albsmeier@mchp.siemens.de>
253                  *
254                  * For future reference, the drive with the problem was:
255                  * QUANTUM QM318000TD-S N491
256                  */
257                 /* Reports QUEUE FULL for temporary resource shortages */
258                 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
259                 /*quirks*/0, /*mintags*/24, /*maxtags*/32
260         },
261         {
262                 /*
263                  * Broken tagged queuing drive
264                  * Reported by: Bret Ford <bford@uop.cs.uop.edu>
265                  *         and: Martin Renters <martin@tdc.on.ca>
266                  */
267                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
268                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
269         },
270                 /*
271                  * The Seagate Medalist Pro drives have very poor write
272                  * performance with anything more than 2 tags.
273                  *
274                  * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
275                  * Drive:  <SEAGATE ST36530N 1444>
276                  *
277                  * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
278                  * Drive:  <SEAGATE ST34520W 1281>
279                  *
280                  * No one has actually reported that the 9G version
281                  * (ST39140*) of the Medalist Pro has the same problem, but
282                  * we're assuming that it does because the 4G and 6.5G
283                  * versions of the drive are broken.
284                  */
285         {
286                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
287                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
288         },
289         {
290                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
291                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
292         },
293         {
294                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
295                 /*quirks*/0, /*mintags*/2, /*maxtags*/2
296         },
297         {
298                 /*
299                  * Experiences command timeouts under load with a
300                  * tag count higher than 55.
301                  */
302                 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST3146855LW", "*"},
303                 /*quirks*/0, /*mintags*/2, /*maxtags*/55
304         },
305         {
306                 /*
307                  * Slow when tagged queueing is enabled.  Write performance
308                  * steadily drops off with more and more concurrent
309                  * transactions.  Best sequential write performance with
310                  * tagged queueing turned off and write caching turned on.
311                  *
312                  * PR:  kern/10398
313                  * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
314                  * Drive:  DCAS-34330 w/ "S65A" firmware.
315                  *
316                  * The drive with the problem had the "S65A" firmware
317                  * revision, and has also been reported (by Stephen J.
318                  * Roznowski <sjr@home.net>) for a drive with the "S61A"
319                  * firmware revision.
320                  *
321                  * Although no one has reported problems with the 2 gig
322                  * version of the DCAS drive, the assumption is that it
323                  * has the same problems as the 4 gig version.  Therefore
324                  * this quirk entries disables tagged queueing for all
325                  * DCAS drives.
326                  */
327                 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
328                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
329         },
330         {
331                 /* Broken tagged queuing drive */
332                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
333                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
334         },
335         {
336                 /* Broken tagged queuing drive */
337                 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
338                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
339         },
340         {
341                 /* This does not support other than LUN 0 */
342                 { T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" },
343                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
344         },
345         {
346                 /*
347                  * Broken tagged queuing drive.
348                  * Submitted by:
349                  * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
350                  * in PR kern/9535
351                  */
352                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
353                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
354         },
355         {
356                 /*
357                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
358                  * 8MB/sec.)
359                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
360                  * Best performance with these drives is achieved with
361                  * tagged queueing turned off, and write caching turned on.
362                  */
363                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
364                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
365         },
366         {
367                 /*
368                  * Slow when tagged queueing is enabled. (1.5MB/sec versus
369                  * 8MB/sec.)
370                  * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
371                  * Best performance with these drives is achieved with
372                  * tagged queueing turned off, and write caching turned on.
373                  */
374                 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
375                 /*quirks*/0, /*mintags*/0, /*maxtags*/0
376         },
377         {
378                 /*
379                  * Doesn't handle queue full condition correctly,
380                  * so we need to limit maxtags to what the device
381                  * can handle instead of determining this automatically.
382                  */
383                 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
384                 /*quirks*/0, /*mintags*/2, /*maxtags*/32
385         },
386         {
387                 /* Really only one LUN */
388                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
389                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
390         },
391         {
392                 /* I can't believe we need a quirk for DPT volumes. */
393                 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
394                 CAM_QUIRK_NOLUNS,
395                 /*mintags*/0, /*maxtags*/255
396         },
397         {
398                 /*
399                  * Many Sony CDROM drives don't like multi-LUN probing.
400                  */
401                 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
402                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
403         },
404         {
405                 /*
406                  * This drive doesn't like multiple LUN probing.
407                  * Submitted by:  Parag Patel <parag@cgt.com>
408                  */
409                 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
410                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
411         },
412         {
413                 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
414                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
415         },
416         {
417                 /*
418                  * The 8200 doesn't like multi-lun probing, and probably
419                  * don't like serial number requests either.
420                  */
421                 {
422                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
423                         "EXB-8200*", "*"
424                 },
425                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
426         },
427         {
428                 /*
429                  * Let's try the same as above, but for a drive that says
430                  * it's an IPL-6860 but is actually an EXB 8200.
431                  */
432                 {
433                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
434                         "IPL-6860*", "*"
435                 },
436                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
437         },
438         {
439                 /*
440                  * These Hitachi drives don't like multi-lun probing.
441                  * The PR submitter has a DK319H, but says that the Linux
442                  * kernel has a similar work-around for the DK312 and DK314,
443                  * so all DK31* drives are quirked here.
444                  * PR:            misc/18793
445                  * Submitted by:  Paul Haddad <paul@pth.com>
446                  */
447                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
448                 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
449         },
450         {
451                 /*
452                  * The Hitachi CJ series with J8A8 firmware apparently has
453                  * problems with tagged commands.
454                  * PR: 23536
455                  * Reported by: amagai@nue.org
456                  */
457                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
458                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
459         },
460         {
461                 /*
462                  * These are the large storage arrays.
463                  * Submitted by:  William Carrel <william.carrel@infospace.com>
464                  */
465                 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
466                 CAM_QUIRK_HILUNS, 2, 1024
467         },
468         {
469                 /*
470                  * This old revision of the TDC3600 is also SCSI-1, and
471                  * hangs upon serial number probing.
472                  */
473                 {
474                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
475                         " TDC 3600", "U07:"
476                 },
477                 CAM_QUIRK_NOVPDS, /*mintags*/0, /*maxtags*/0
478         },
479         {
480                 /*
481                  * Would repond to all LUNs if asked for.
482                  */
483                 {
484                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
485                         "CP150", "*"
486                 },
487                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
488         },
489         {
490                 /*
491                  * Would repond to all LUNs if asked for.
492                  */
493                 {
494                         T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
495                         "96X2*", "*"
496                 },
497                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
498         },
499         {
500                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
501                 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
502                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
503         },
504         {
505                 /* Submitted by: Matthew Dodd <winter@jurai.net> */
506                 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
507                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
508         },
509         {
510                 /* TeraSolutions special settings for TRC-22 RAID */
511                 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
512                   /*quirks*/0, /*mintags*/55, /*maxtags*/255
513         },
514         {
515                 /* Veritas Storage Appliance */
516                 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
517                   CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
518         },
519         {
520                 /*
521                  * Would respond to all LUNs.  Device type and removable
522                  * flag are jumper-selectable.
523                  */
524                 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
525                   "Tahiti 1", "*"
526                 },
527                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
528         },
529         {
530                 /* EasyRAID E5A aka. areca ARC-6010 */
531                 { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" },
532                   CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255
533         },
534         {
535                 { T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" },
536                 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
537         },
538         {
539                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "Garmin", "*", "*" },
540                 CAM_QUIRK_NORPTLUNS, /*mintags*/2, /*maxtags*/255
541         },
542         {
543                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic", "STORAGE DEVICE*", "120?" },
544                 CAM_QUIRK_NORPTLUNS, /*mintags*/2, /*maxtags*/255
545         },
546         {
547                 { T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic", "MassStorageClass", "1533" },
548                 CAM_QUIRK_NORPTLUNS, /*mintags*/2, /*maxtags*/255
549         },
550         {
551                 /* Default tagged queuing parameters for all devices */
552                 {
553                   T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
554                   /*vendor*/"*", /*product*/"*", /*revision*/"*"
555                 },
556                 /*quirks*/0, /*mintags*/2, /*maxtags*/255
557         },
558 };
559
560 static cam_status       proberegister(struct cam_periph *periph,
561                                       void *arg);
562 static void      probeschedule(struct cam_periph *probe_periph);
563 static void      probestart(struct cam_periph *periph, union ccb *start_ccb);
564 static void      proberequestdefaultnegotiation(struct cam_periph *periph);
565 static int       proberequestbackoff(struct cam_periph *periph,
566                                      struct cam_ed *device);
567 static void      probedone(struct cam_periph *periph, union ccb *done_ccb);
568 static void      probe_purge_old(struct cam_path *path,
569                                  struct scsi_report_luns_data *new,
570                                  probe_flags flags);
571 static void      probecleanup(struct cam_periph *periph);
572 static void      scsi_find_quirk(struct cam_ed *device);
573 static void      scsi_scan_bus(struct cam_periph *periph, union ccb *ccb);
574 static void      scsi_scan_lun(struct cam_periph *periph,
575                                struct cam_path *path, cam_flags flags,
576                                union ccb *ccb);
577 static void      xptscandone(struct cam_periph *periph, union ccb *done_ccb);
578 static struct cam_ed *
579                  scsi_alloc_device(struct cam_eb *bus, struct cam_et *target,
580                                    lun_id_t lun_id);
581 static void      scsi_devise_transport(struct cam_path *path);
582 static void      scsi_set_transfer_settings(struct ccb_trans_settings *cts,
583                                             struct cam_path *path,
584                                             int async_update);
585 static void      scsi_toggle_tags(struct cam_path *path);
586 static void      scsi_dev_async(uint32_t async_code,
587                                 struct cam_eb *bus,
588                                 struct cam_et *target,
589                                 struct cam_ed *device,
590                                 void *async_arg);
591 static void      scsi_action(union ccb *start_ccb);
592 static void      scsi_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb);
593 static void      scsi_proto_announce_sbuf(struct cam_ed *device,
594                                           struct sbuf *sb);
595 static void      scsi_proto_denounce_sbuf(struct cam_ed *device,
596                                           struct sbuf *sb);
597 static void      scsi_proto_debug_out(union ccb *ccb);
598 static void      _scsi_announce_periph(struct cam_periph *, u_int *, u_int *, struct ccb_trans_settings *);
599
600 static struct xpt_xport_ops scsi_xport_ops = {
601         .alloc_device = scsi_alloc_device,
602         .action = scsi_action,
603         .async = scsi_dev_async,
604         .announce_sbuf = scsi_announce_periph_sbuf,
605 };
606 #define SCSI_XPT_XPORT(x, X)                    \
607 static struct xpt_xport scsi_xport_ ## x = {    \
608         .xport = XPORT_ ## X,                   \
609         .name = #x,                             \
610         .ops = &scsi_xport_ops,                 \
611 };                                              \
612 CAM_XPT_XPORT(scsi_xport_ ## x);
613
614 SCSI_XPT_XPORT(spi, SPI);
615 SCSI_XPT_XPORT(sas, SAS);
616 SCSI_XPT_XPORT(fc, FC);
617 SCSI_XPT_XPORT(usb, USB);
618 SCSI_XPT_XPORT(iscsi, ISCSI);
619 SCSI_XPT_XPORT(srp, SRP);
620 SCSI_XPT_XPORT(ppb, PPB);
621
622 #undef SCSI_XPORT_XPORT
623
624 static struct xpt_proto_ops scsi_proto_ops = {
625         .announce_sbuf = scsi_proto_announce_sbuf,
626         .denounce_sbuf = scsi_proto_denounce_sbuf,
627         .debug_out = scsi_proto_debug_out,
628 };
629 static struct xpt_proto scsi_proto = {
630         .proto = PROTO_SCSI,
631         .name = "scsi",
632         .ops = &scsi_proto_ops,
633 };
634 CAM_XPT_PROTO(scsi_proto);
635
636 static void
637 probe_periph_init(void)
638 {
639 }
640
641 static cam_status
642 proberegister(struct cam_periph *periph, void *arg)
643 {
644         union ccb *request_ccb; /* CCB representing the probe request */
645         probe_softc *softc;
646
647         request_ccb = (union ccb *)arg;
648         if (request_ccb == NULL) {
649                 printf("proberegister: no probe CCB, "
650                        "can't register device\n");
651                 return(CAM_REQ_CMP_ERR);
652         }
653
654         softc = (probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_NOWAIT);
655
656         if (softc == NULL) {
657                 printf("proberegister: Unable to probe new device. "
658                        "Unable to allocate softc\n");
659                 return(CAM_REQ_CMP_ERR);
660         }
661         TAILQ_INIT(&softc->request_ccbs);
662         TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
663                           periph_links.tqe);
664         softc->flags = 0;
665         periph->softc = softc;
666         softc->periph = periph;
667         softc->action = PROBE_INVALID;
668         if (cam_periph_acquire(periph) != 0)
669                 return (CAM_REQ_CMP_ERR);
670
671         CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe started\n"));
672         scsi_devise_transport(periph->path);
673
674         /*
675          * Ensure we've waited at least a bus settle
676          * delay before attempting to probe the device.
677          * For HBAs that don't do bus resets, this won't make a difference.
678          */
679         cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
680                                       scsi_delay);
681         probeschedule(periph);
682         return(CAM_REQ_CMP);
683 }
684
685 static void
686 probeschedule(struct cam_periph *periph)
687 {
688         struct ccb_pathinq cpi;
689         union ccb *ccb;
690         probe_softc *softc;
691
692         softc = (probe_softc *)periph->softc;
693         ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
694
695         xpt_path_inq(&cpi, periph->path);
696
697         /*
698          * If a device has gone away and another device, or the same one,
699          * is back in the same place, it should have a unit attention
700          * condition pending.  It will not report the unit attention in
701          * response to an inquiry, which may leave invalid transfer
702          * negotiations in effect.  The TUR will reveal the unit attention
703          * condition.  Only send the TUR for lun 0, since some devices
704          * will get confused by commands other than inquiry to non-existent
705          * luns.  If you think a device has gone away start your scan from
706          * lun 0.  This will insure that any bogus transfer settings are
707          * invalidated.
708          *
709          * If we haven't seen the device before and the controller supports
710          * some kind of transfer negotiation, negotiate with the first
711          * sent command if no bus reset was performed at startup.  This
712          * ensures that the device is not confused by transfer negotiation
713          * settings left over by loader or BIOS action.
714          */
715         if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
716          && (ccb->ccb_h.target_lun == 0)) {
717                 PROBE_SET_ACTION(softc, PROBE_TUR);
718         } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
719               && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
720                 proberequestdefaultnegotiation(periph);
721                 PROBE_SET_ACTION(softc, PROBE_INQUIRY);
722         } else {
723                 PROBE_SET_ACTION(softc, PROBE_INQUIRY);
724         }
725
726         if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
727                 softc->flags |= PROBE_NO_ANNOUNCE;
728         else
729                 softc->flags &= ~PROBE_NO_ANNOUNCE;
730
731         if (cpi.hba_misc & PIM_EXTLUNS)
732                 softc->flags |= PROBE_EXTLUN;
733         else
734                 softc->flags &= ~PROBE_EXTLUN;
735
736         xpt_schedule(periph, CAM_PRIORITY_XPT);
737 }
738
739 static void
740 probestart(struct cam_periph *periph, union ccb *start_ccb)
741 {
742         /* Probe the device that our peripheral driver points to */
743         struct ccb_scsiio *csio;
744         probe_softc *softc;
745
746         CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
747
748         softc = (probe_softc *)periph->softc;
749         csio = &start_ccb->csio;
750 again:
751
752         switch (softc->action) {
753         case PROBE_TUR:
754         case PROBE_TUR_FOR_NEGOTIATION:
755         case PROBE_DV_EXIT:
756         {
757                 scsi_test_unit_ready(csio,
758                                      /*retries*/4,
759                                      probedone,
760                                      MSG_SIMPLE_Q_TAG,
761                                      SSD_FULL_SIZE,
762                                      /*timeout*/60000);
763                 break;
764         }
765         case PROBE_INQUIRY:
766         case PROBE_FULL_INQUIRY:
767         {
768                 u_int inquiry_len;
769                 struct scsi_inquiry_data *inq_buf;
770
771                 inq_buf = &periph->path->device->inq_data;
772
773                 /*
774                  * If the device is currently configured, we calculate an
775                  * MD5 checksum of the inquiry data, and if the serial number
776                  * length is greater than 0, add the serial number data
777                  * into the checksum as well.  Once the inquiry and the
778                  * serial number check finish, we attempt to figure out
779                  * whether we still have the same device.
780                  */
781                 if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) {
782                         softc->flags &= ~PROBE_INQUIRY_CKSUM;
783                 } else if ((softc->flags & PROBE_INQUIRY_CKSUM) == 0) {
784                         MD5Init(&softc->context);
785                         MD5Update(&softc->context, (unsigned char *)inq_buf,
786                                   sizeof(struct scsi_inquiry_data));
787                         if (periph->path->device->serial_num_len > 0) {
788                                 MD5Update(&softc->context,
789                                           periph->path->device->serial_num,
790                                           periph->path->device->serial_num_len);
791                         }
792                         MD5Final(softc->digest, &softc->context);
793                         softc->flags |= PROBE_INQUIRY_CKSUM;
794                 }
795
796                 if (softc->action == PROBE_INQUIRY)
797                         inquiry_len = SHORT_INQUIRY_LENGTH;
798                 else
799                         inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf);
800
801                 /*
802                  * Some parallel SCSI devices fail to send an
803                  * ignore wide residue message when dealing with
804                  * odd length inquiry requests.  Round up to be
805                  * safe.
806                  */
807                 inquiry_len = roundup2(inquiry_len, 2);
808
809                 scsi_inquiry(csio,
810                              /*retries*/4,
811                              probedone,
812                              MSG_SIMPLE_Q_TAG,
813                              (uint8_t *)inq_buf,
814                              inquiry_len,
815                              /*evpd*/FALSE,
816                              /*page_code*/0,
817                              SSD_MIN_SIZE,
818                              /*timeout*/60 * 1000);
819                 break;
820         }
821         case PROBE_REPORT_LUNS:
822         {
823                 void *rp;
824
825                 rp = malloc(periph->path->target->rpl_size,
826                     M_CAMXPT, M_NOWAIT | M_ZERO);
827                 if (rp == NULL) {
828                         struct scsi_inquiry_data *inq_buf;
829                         inq_buf = &periph->path->device->inq_data;
830                         xpt_print(periph->path,
831                             "Unable to alloc report luns storage\n");
832                         if (INQ_DATA_TQ_ENABLED(inq_buf))
833                                 PROBE_SET_ACTION(softc, PROBE_MODE_SENSE);
834                         else
835                                 PROBE_SET_ACTION(softc,
836                                     PROBE_SUPPORTED_VPD_LIST);
837                         goto again;
838                 }
839                 scsi_report_luns(csio, 5, probedone, MSG_SIMPLE_Q_TAG,
840                     RPL_REPORT_DEFAULT, rp, periph->path->target->rpl_size,
841                     SSD_FULL_SIZE, 60000);
842                 break;
843         }
844         case PROBE_MODE_SENSE:
845         {
846                 void  *mode_buf;
847                 int    mode_buf_len;
848
849                 mode_buf_len = sizeof(struct scsi_mode_header_6)
850                              + sizeof(struct scsi_mode_blk_desc)
851                              + sizeof(struct scsi_control_page);
852                 mode_buf = malloc(mode_buf_len, M_CAMXPT, M_NOWAIT);
853                 if (mode_buf != NULL) {
854                         scsi_mode_sense(csio,
855                                         /*retries*/4,
856                                         probedone,
857                                         MSG_SIMPLE_Q_TAG,
858                                         /*dbd*/FALSE,
859                                         SMS_PAGE_CTRL_CURRENT,
860                                         SMS_CONTROL_MODE_PAGE,
861                                         mode_buf,
862                                         mode_buf_len,
863                                         SSD_FULL_SIZE,
864                                         /*timeout*/60000);
865                         break;
866                 }
867                 xpt_print(periph->path, "Unable to mode sense control page - "
868                     "malloc failure\n");
869                 PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST);
870         }
871         /* FALLTHROUGH */
872         case PROBE_SUPPORTED_VPD_LIST:
873         {
874                 struct scsi_vpd_supported_page_list *vpd_list;
875                 struct cam_ed *device;
876
877                 vpd_list = NULL;
878                 device = periph->path->device;
879
880                 if ((SCSI_QUIRK(device)->quirks & CAM_QUIRK_NOVPDS) == 0)
881                         vpd_list = malloc(sizeof(*vpd_list), M_CAMXPT,
882                             M_NOWAIT | M_ZERO);
883
884                 if (vpd_list != NULL) {
885                         scsi_inquiry(csio,
886                                      /*retries*/4,
887                                      probedone,
888                                      MSG_SIMPLE_Q_TAG,
889                                      (uint8_t *)vpd_list,
890                                      sizeof(*vpd_list),
891                                      /*evpd*/TRUE,
892                                      SVPD_SUPPORTED_PAGE_LIST,
893                                      SSD_MIN_SIZE,
894                                      /*timeout*/60 * 1000);
895                         break;
896                 }
897 done:
898                 /*
899                  * We'll have to do without, let our probedone
900                  * routine finish up for us.
901                  */
902                 start_ccb->csio.data_ptr = NULL;
903                 cam_freeze_devq(periph->path);
904                 cam_periph_doacquire(periph);
905                 probedone(periph, start_ccb);
906                 return;
907         }
908         case PROBE_DEVICE_ID:
909         {
910                 struct scsi_vpd_device_id *devid;
911
912                 devid = NULL;
913                 if (scsi_vpd_supported_page(periph, SVPD_DEVICE_ID))
914                         devid = malloc(SVPD_DEVICE_ID_MAX_SIZE, M_CAMXPT,
915                             M_NOWAIT | M_ZERO);
916
917                 if (devid != NULL) {
918                         scsi_inquiry(csio,
919                                      /*retries*/4,
920                                      probedone,
921                                      MSG_SIMPLE_Q_TAG,
922                                      (uint8_t *)devid,
923                                      SVPD_DEVICE_ID_MAX_SIZE,
924                                      /*evpd*/TRUE,
925                                      SVPD_DEVICE_ID,
926                                      SSD_MIN_SIZE,
927                                      /*timeout*/60 * 1000);
928                         break;
929                 }
930                 goto done;
931         }
932         case PROBE_EXTENDED_INQUIRY:
933         {
934                 struct scsi_vpd_extended_inquiry_data *ext_inq;
935
936                 ext_inq = NULL;
937                 if (scsi_vpd_supported_page(periph, SVPD_EXTENDED_INQUIRY_DATA))
938                         ext_inq = malloc(sizeof(*ext_inq), M_CAMXPT,
939                             M_NOWAIT | M_ZERO);
940
941                 if (ext_inq != NULL) {
942                         scsi_inquiry(csio,
943                                      /*retries*/4,
944                                      probedone,
945                                      MSG_SIMPLE_Q_TAG,
946                                      (uint8_t *)ext_inq,
947                                      sizeof(*ext_inq),
948                                      /*evpd*/TRUE,
949                                      SVPD_EXTENDED_INQUIRY_DATA,
950                                      SSD_MIN_SIZE,
951                                      /*timeout*/60 * 1000);
952                         break;
953                 }
954                 /*
955                  * We'll have to do without, let our probedone
956                  * routine finish up for us.
957                  */
958                 goto done;
959         }
960         case PROBE_SERIAL_NUM:
961         {
962                 struct scsi_vpd_unit_serial_number *serial_buf;
963                 struct cam_ed* device;
964
965                 serial_buf = NULL;
966                 device = periph->path->device;
967                 if (device->serial_num != NULL) {
968                         free(device->serial_num, M_CAMXPT);
969                         device->serial_num = NULL;
970                         device->serial_num_len = 0;
971                 }
972
973                 if (scsi_vpd_supported_page(periph, SVPD_UNIT_SERIAL_NUMBER))
974                         serial_buf = (struct scsi_vpd_unit_serial_number *)
975                                 malloc(sizeof(*serial_buf), M_CAMXPT,
976                                     M_NOWAIT|M_ZERO);
977
978                 if (serial_buf != NULL) {
979                         scsi_inquiry(csio,
980                                      /*retries*/4,
981                                      probedone,
982                                      MSG_SIMPLE_Q_TAG,
983                                      (uint8_t *)serial_buf,
984                                      sizeof(*serial_buf),
985                                      /*evpd*/TRUE,
986                                      SVPD_UNIT_SERIAL_NUMBER,
987                                      SSD_MIN_SIZE,
988                                      /*timeout*/60 * 1000);
989                         break;
990                 }
991                 goto done;
992         }
993         case PROBE_INQUIRY_BASIC_DV1:
994         case PROBE_INQUIRY_BASIC_DV2:
995         {
996                 u_int inquiry_len;
997                 struct scsi_inquiry_data *inq_buf;
998
999                 inq_buf = &periph->path->device->inq_data;
1000                 inquiry_len = roundup2(SID_ADDITIONAL_LENGTH(inq_buf), 2);
1001                 inq_buf = malloc(inquiry_len, M_CAMXPT, M_NOWAIT);
1002                 if (inq_buf == NULL) {
1003                         xpt_print(periph->path, "malloc failure- skipping Basic"
1004                             "Domain Validation\n");
1005                         PROBE_SET_ACTION(softc, PROBE_DV_EXIT);
1006                         scsi_test_unit_ready(csio,
1007                                              /*retries*/4,
1008                                              probedone,
1009                                              MSG_SIMPLE_Q_TAG,
1010                                              SSD_FULL_SIZE,
1011                                              /*timeout*/60000);
1012                         break;
1013                 }
1014
1015                 scsi_inquiry(csio,
1016                              /*retries*/4,
1017                              probedone,
1018                              MSG_SIMPLE_Q_TAG,
1019                              (uint8_t *)inq_buf,
1020                              inquiry_len,
1021                              /*evpd*/FALSE,
1022                              /*page_code*/0,
1023                              SSD_MIN_SIZE,
1024                              /*timeout*/60 * 1000);
1025                 break;
1026         }
1027         default:
1028                 panic("probestart: invalid action state 0x%x\n", softc->action);
1029         }
1030         start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1031         cam_periph_doacquire(periph);
1032         xpt_action(start_ccb);
1033 }
1034
1035 static void
1036 proberequestdefaultnegotiation(struct cam_periph *periph)
1037 {
1038         struct ccb_trans_settings cts;
1039
1040         memset(&cts, 0, sizeof(cts));
1041         xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE);
1042         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1043         cts.type = CTS_TYPE_USER_SETTINGS;
1044         xpt_action((union ccb *)&cts);
1045         if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) {
1046                 return;
1047         }
1048         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
1049         cts.type = CTS_TYPE_CURRENT_SETTINGS;
1050         xpt_action((union ccb *)&cts);
1051 }
1052
1053 /*
1054  * Backoff Negotiation Code- only pertinent for SPI devices.
1055  */
1056 static int
1057 proberequestbackoff(struct cam_periph *periph, struct cam_ed *device)
1058 {
1059         struct ccb_trans_settings cts;
1060         struct ccb_trans_settings_spi *spi;
1061
1062         memset(&cts, 0, sizeof (cts));
1063         xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE);
1064         cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1065         cts.type = CTS_TYPE_CURRENT_SETTINGS;
1066         xpt_action((union ccb *)&cts);
1067         if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) {
1068                 if (bootverbose) {
1069                         xpt_print(periph->path,
1070                             "failed to get current device settings\n");
1071                 }
1072                 return (0);
1073         }
1074         if (cts.transport != XPORT_SPI) {
1075                 if (bootverbose) {
1076                         xpt_print(periph->path, "not SPI transport\n");
1077                 }
1078                 return (0);
1079         }
1080         spi = &cts.xport_specific.spi;
1081
1082         /*
1083          * We cannot renegotiate sync rate if we don't have one.
1084          */
1085         if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
1086                 if (bootverbose) {
1087                         xpt_print(periph->path, "no sync rate known\n");
1088                 }
1089                 return (0);
1090         }
1091
1092         /*
1093          * We'll assert that we don't have to touch PPR options- the
1094          * SIM will see what we do with period and offset and adjust
1095          * the PPR options as appropriate.
1096          */
1097
1098         /*
1099          * A sync rate with unknown or zero offset is nonsensical.
1100          * A sync period of zero means Async.
1101          */
1102         if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0
1103          || spi->sync_offset == 0 || spi->sync_period == 0) {
1104                 if (bootverbose) {
1105                         xpt_print(periph->path, "no sync rate available\n");
1106                 }
1107                 return (0);
1108         }
1109
1110         if (device->flags & CAM_DEV_DV_HIT_BOTTOM) {
1111                 CAM_DEBUG(periph->path, CAM_DEBUG_PROBE,
1112                     ("hit async: giving up on DV\n"));
1113                 return (0);
1114         }
1115
1116         /*
1117          * Jump sync_period up by one, but stop at 5MHz and fall back to Async.
1118          * We don't try to remember 'last' settings to see if the SIM actually
1119          * gets into the speed we want to set. We check on the SIM telling
1120          * us that a requested speed is bad, but otherwise don't try and
1121          * check the speed due to the asynchronous and handshake nature
1122          * of speed setting.
1123          */
1124         spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET;
1125         for (;;) {
1126                 spi->sync_period++;
1127                 if (spi->sync_period >= 0xf) {
1128                         spi->sync_period = 0;
1129                         spi->sync_offset = 0;
1130                         CAM_DEBUG(periph->path, CAM_DEBUG_PROBE,
1131                             ("setting to async for DV\n"));
1132                         /*
1133                          * Once we hit async, we don't want to try
1134                          * any more settings.
1135                          */
1136                         device->flags |= CAM_DEV_DV_HIT_BOTTOM;
1137                 } else if (bootverbose) {
1138                         CAM_DEBUG(periph->path, CAM_DEBUG_PROBE,
1139                             ("DV: period 0x%x\n", spi->sync_period));
1140                         printf("setting period to 0x%x\n", spi->sync_period);
1141                 }
1142                 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
1143                 cts.type = CTS_TYPE_CURRENT_SETTINGS;
1144                 xpt_action((union ccb *)&cts);
1145                 if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP) {
1146                         break;
1147                 }
1148                 CAM_DEBUG(periph->path, CAM_DEBUG_PROBE,
1149                     ("DV: failed to set period 0x%x\n", spi->sync_period));
1150                 if (spi->sync_period == 0) {
1151                         return (0);
1152                 }
1153         }
1154         return (1);
1155 }
1156
1157 #define CCB_COMPLETED_OK(ccb) (((ccb).status & CAM_STATUS_MASK) == CAM_REQ_CMP)
1158
1159 static void
1160 probedone(struct cam_periph *periph, union ccb *done_ccb)
1161 {
1162         probe_softc *softc;
1163         struct cam_path *path;
1164         struct scsi_inquiry_data *inq_buf;
1165         uint32_t  priority;
1166
1167         CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
1168
1169         softc = (probe_softc *)periph->softc;
1170         path = done_ccb->ccb_h.path;
1171         priority = done_ccb->ccb_h.pinfo.priority;
1172         cam_periph_assert(periph, MA_OWNED);
1173
1174         switch (softc->action) {
1175         case PROBE_TUR:
1176         {
1177                 if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) {
1178                         if (cam_periph_error(done_ccb, 0, SF_NO_PRINT) ==
1179                             ERESTART) {
1180 outr:
1181                                 /* Drop freeze taken due to CAM_DEV_QFREEZE */
1182                                 cam_release_devq(path, 0, 0, 0, FALSE);
1183                                 return;
1184                         }
1185                         else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1186                                 /* Don't wedge the queue */
1187                                 xpt_release_devq(done_ccb->ccb_h.path,
1188                                                  /*count*/1,
1189                                                  /*run_queue*/TRUE);
1190                 }
1191                 PROBE_SET_ACTION(softc, PROBE_INQUIRY);
1192                 xpt_release_ccb(done_ccb);
1193                 xpt_schedule(periph, priority);
1194 out:
1195                 /* Drop freeze taken due to CAM_DEV_QFREEZE and release. */
1196                 cam_release_devq(path, 0, 0, 0, FALSE);
1197                 cam_periph_release_locked(periph);
1198                 return;
1199         }
1200         case PROBE_INQUIRY:
1201         case PROBE_FULL_INQUIRY:
1202         {
1203                 if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) {
1204                         uint8_t periph_qual;
1205
1206                         path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
1207                         scsi_find_quirk(path->device);
1208                         inq_buf = &path->device->inq_data;
1209
1210                         periph_qual = SID_QUAL(inq_buf);
1211
1212                         if (periph_qual == SID_QUAL_LU_CONNECTED ||
1213                             periph_qual == SID_QUAL_LU_OFFLINE) {
1214                                 /*
1215                                  * We conservatively request only
1216                                  * SHORT_INQUIRY_LEN bytes of inquiry
1217                                  * information during our first try
1218                                  * at sending an INQUIRY. If the device
1219                                  * has more information to give,
1220                                  * perform a second request specifying
1221                                  * the amount of information the device
1222                                  * is willing to give.
1223                                  */
1224                                 if (softc->action == PROBE_INQUIRY
1225                                     && SID_ADDITIONAL_LENGTH(inq_buf)
1226                                     > SHORT_INQUIRY_LENGTH) {
1227                                         PROBE_SET_ACTION(softc, PROBE_FULL_INQUIRY);
1228                                         xpt_release_ccb(done_ccb);
1229                                         xpt_schedule(periph, priority);
1230                                         goto out;
1231                                 }
1232
1233                                 scsi_devise_transport(path);
1234
1235                                 if (path->device->lun_id == 0 &&
1236                                     SID_ANSI_REV(inq_buf) > SCSI_REV_SPC2 &&
1237                                     (SCSI_QUIRK(path->device)->quirks &
1238                                      CAM_QUIRK_NORPTLUNS) == 0) {
1239                                         PROBE_SET_ACTION(softc,
1240                                             PROBE_REPORT_LUNS);
1241                                         /*
1242                                          * Start with room for *one* lun.
1243                                          */
1244                                         periph->path->target->rpl_size = 16;
1245                                 } else if (INQ_DATA_TQ_ENABLED(inq_buf))
1246                                         PROBE_SET_ACTION(softc,
1247                                             PROBE_MODE_SENSE);
1248                                 else
1249                                         PROBE_SET_ACTION(softc,
1250                                             PROBE_SUPPORTED_VPD_LIST);
1251
1252                                 if (path->device->flags & CAM_DEV_UNCONFIGURED) {
1253                                         path->device->flags &= ~CAM_DEV_UNCONFIGURED;
1254                                         xpt_acquire_device(path->device);
1255                                 }
1256                                 xpt_release_ccb(done_ccb);
1257                                 xpt_schedule(periph, priority);
1258                                 goto out;
1259                         } else if (path->device->lun_id == 0 &&
1260                             SID_ANSI_REV(inq_buf) >= SCSI_REV_SPC2 &&
1261                             (SCSI_QUIRK(path->device)->quirks &
1262                              CAM_QUIRK_NORPTLUNS) == 0) {
1263                                 PROBE_SET_ACTION(softc, PROBE_REPORT_LUNS);
1264                                 periph->path->target->rpl_size = 16;
1265                                 xpt_release_ccb(done_ccb);
1266                                 xpt_schedule(periph, priority);
1267                                 goto out;
1268                         }
1269                 } else if (cam_periph_error(done_ccb, 0,
1270                                             done_ccb->ccb_h.target_lun > 0
1271                                             ? SF_RETRY_UA|SF_QUIET_IR
1272                                             : SF_RETRY_UA) == ERESTART) {
1273                         goto outr;
1274                 } else {
1275                         if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1276                                 /* Don't wedge the queue */
1277                                 xpt_release_devq(done_ccb->ccb_h.path,
1278                                     /*count*/1, /*run_queue*/TRUE);
1279                         }
1280                         path->device->flags &= ~CAM_DEV_INQUIRY_DATA_VALID;
1281                 }
1282                 /*
1283                  * If we get to this point, we got an error status back
1284                  * from the inquiry and the error status doesn't require
1285                  * automatically retrying the command.  Therefore, the
1286                  * inquiry failed.  If we had inquiry information before
1287                  * for this device, but this latest inquiry command failed,
1288                  * the device has probably gone away.  If this device isn't
1289                  * already marked unconfigured, notify the peripheral
1290                  * drivers that this device is no more.
1291                  */
1292                 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
1293                         /* Send the async notification. */
1294                         xpt_async(AC_LOST_DEVICE, path, NULL);
1295                 PROBE_SET_ACTION(softc, PROBE_INVALID);
1296
1297                 xpt_release_ccb(done_ccb);
1298                 break;
1299         }
1300         case PROBE_REPORT_LUNS:
1301         {
1302                 struct ccb_scsiio *csio;
1303                 struct scsi_report_luns_data *lp;
1304                 u_int nlun, maxlun;
1305
1306                 csio = &done_ccb->csio;
1307
1308                 lp = (struct scsi_report_luns_data *)csio->data_ptr;
1309                 nlun = scsi_4btoul(lp->length) / 8;
1310                 maxlun = (csio->dxfer_len / 8) - 1;
1311
1312                 if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) {
1313                         if (cam_periph_error(done_ccb, 0,
1314                                 done_ccb->ccb_h.target_lun > 0 ?
1315                                 SF_RETRY_UA|SF_QUIET_IR : SF_RETRY_UA) ==
1316                             ERESTART) {
1317                                 goto outr;
1318                         }
1319                         if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1320                                 xpt_release_devq(done_ccb->ccb_h.path, 1,
1321                                     TRUE);
1322                         }
1323                         free(lp, M_CAMXPT);
1324                         lp = NULL;
1325                 } else if (nlun > maxlun) {
1326                         /*
1327                          * Reallocate and retry to cover all luns
1328                          */
1329                         CAM_DEBUG(path, CAM_DEBUG_PROBE,
1330                             ("Probe: reallocating REPORT_LUNS for %u luns\n",
1331                              nlun));
1332                         free(lp, M_CAMXPT);
1333                         path->target->rpl_size = (nlun << 3) + 8;
1334                         xpt_release_ccb(done_ccb);
1335                         xpt_schedule(periph, priority);
1336                         goto out;
1337                 } else if (nlun == 0) {
1338                         /*
1339                          * If there don't appear to be any luns, bail.
1340                          */
1341                         free(lp, M_CAMXPT);
1342                         lp = NULL;
1343                 } else {
1344                         lun_id_t lun;
1345                         int idx;
1346
1347                         CAM_DEBUG(path, CAM_DEBUG_PROBE,
1348                            ("Probe: %u lun(s) reported\n", nlun));
1349
1350                         CAM_GET_LUN(lp, 0, lun);
1351                         /*
1352                          * If the first lun is not lun 0, then either there
1353                          * is no lun 0 in the list, or the list is unsorted.
1354                          */
1355                         if (lun != 0) {
1356                                 for (idx = 0; idx < nlun; idx++) {
1357                                         CAM_GET_LUN(lp, idx, lun);
1358                                         if (lun == 0) {
1359                                                 break;
1360                                         }
1361                                 }
1362                                 if (idx != nlun) {
1363                                         uint8_t tlun[8];
1364                                         memcpy(tlun,
1365                                             lp->luns[0].lundata, 8);
1366                                         memcpy(lp->luns[0].lundata,
1367                                             lp->luns[idx].lundata, 8);
1368                                         memcpy(lp->luns[idx].lundata,
1369                                             tlun, 8);
1370                                         CAM_DEBUG(path, CAM_DEBUG_PROBE,
1371                                             ("lun 0 in position %u\n", idx));
1372                                 }
1373                         }
1374                         /*
1375                          * If we have an old lun list, We can either
1376                          * retest luns that appear to have been dropped,
1377                          * or just nuke them.  We'll opt for the latter.
1378                          * This function will also install the new list
1379                          * in the target structure.
1380                          */
1381                         probe_purge_old(path, lp, softc->flags);
1382                         lp = NULL;
1383                 }
1384                 /* The processing above should either exit via a `goto
1385                  * out` or leave the `lp` variable `NULL` and (if
1386                  * applicable) `free()` the storage to which it had
1387                  * pointed. Assert here that is the case.
1388                  */
1389                 KASSERT(lp == NULL, ("%s: lp is not NULL", __func__));
1390                 inq_buf = &path->device->inq_data;
1391                 if (path->device->flags & CAM_DEV_INQUIRY_DATA_VALID &&
1392                     (SID_QUAL(inq_buf) == SID_QUAL_LU_CONNECTED ||
1393                     SID_QUAL(inq_buf) == SID_QUAL_LU_OFFLINE)) {
1394                         if (INQ_DATA_TQ_ENABLED(inq_buf))
1395                                 PROBE_SET_ACTION(softc, PROBE_MODE_SENSE);
1396                         else
1397                                 PROBE_SET_ACTION(softc,
1398                                     PROBE_SUPPORTED_VPD_LIST);
1399                         xpt_release_ccb(done_ccb);
1400                         xpt_schedule(periph, priority);
1401                         goto out;
1402                 }
1403                 PROBE_SET_ACTION(softc, PROBE_INVALID);
1404                 xpt_release_ccb(done_ccb);
1405                 break;
1406         }
1407         case PROBE_MODE_SENSE:
1408         {
1409                 struct ccb_scsiio *csio;
1410                 struct scsi_mode_header_6 *mode_hdr;
1411
1412                 csio = &done_ccb->csio;
1413                 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
1414                 if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) {
1415                         struct scsi_control_page *page;
1416                         uint8_t *offset;
1417
1418                         offset = ((uint8_t *)&mode_hdr[1])
1419                             + mode_hdr->blk_desc_len;
1420                         page = (struct scsi_control_page *)offset;
1421                         path->device->queue_flags = page->queue_flags;
1422                 } else if (cam_periph_error(done_ccb, 0,
1423                         SF_RETRY_UA|SF_NO_PRINT) == ERESTART) {
1424                         goto outr;
1425                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1426                         /* Don't wedge the queue */
1427                         xpt_release_devq(done_ccb->ccb_h.path,
1428                                          /*count*/1, /*run_queue*/TRUE);
1429                 }
1430                 xpt_release_ccb(done_ccb);
1431                 free(mode_hdr, M_CAMXPT);
1432                 PROBE_SET_ACTION(softc, PROBE_SUPPORTED_VPD_LIST);
1433                 xpt_schedule(periph, priority);
1434                 goto out;
1435         }
1436         case PROBE_SUPPORTED_VPD_LIST:
1437         {
1438                 struct ccb_scsiio *csio;
1439                 struct scsi_vpd_supported_page_list *page_list;
1440
1441                 csio = &done_ccb->csio;
1442                 page_list =
1443                     (struct scsi_vpd_supported_page_list *)csio->data_ptr;
1444
1445                 if (path->device->supported_vpds != NULL) {
1446                         free(path->device->supported_vpds, M_CAMXPT);
1447                         path->device->supported_vpds = NULL;
1448                         path->device->supported_vpds_len = 0;
1449                 }
1450
1451                 if (page_list == NULL) {
1452                         /*
1453                          * Don't process the command as it was never sent
1454                          */
1455                 } else if (CCB_COMPLETED_OK(csio->ccb_h)) {
1456                         /* Got vpd list */
1457                         path->device->supported_vpds_len = page_list->length +
1458                             SVPD_SUPPORTED_PAGES_HDR_LEN;
1459                         path->device->supported_vpds = (uint8_t *)page_list;
1460                         xpt_release_ccb(done_ccb);
1461                         PROBE_SET_ACTION(softc, PROBE_DEVICE_ID);
1462                         xpt_schedule(periph, priority);
1463                         goto out;
1464                 } else if (cam_periph_error(done_ccb, 0,
1465                         SF_RETRY_UA|SF_NO_PRINT) == ERESTART) {
1466                         goto outr;
1467                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1468                         /* Don't wedge the queue */
1469                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
1470                                          /*run_queue*/TRUE);
1471                 }
1472
1473                 if (page_list)
1474                         free(page_list, M_CAMXPT);
1475                 /* No VPDs available, skip to device check. */
1476                 csio->data_ptr = NULL;
1477                 goto probe_device_check;
1478         }
1479         case PROBE_DEVICE_ID:
1480         {
1481                 struct scsi_vpd_device_id *devid;
1482                 struct ccb_scsiio *csio;
1483                 uint32_t length = 0;
1484
1485                 csio = &done_ccb->csio;
1486                 devid = (struct scsi_vpd_device_id *)csio->data_ptr;
1487
1488                 /* Clean up from previous instance of this device */
1489                 if (path->device->device_id != NULL) {
1490                         path->device->device_id_len = 0;
1491                         free(path->device->device_id, M_CAMXPT);
1492                         path->device->device_id = NULL;
1493                 }
1494
1495                 if (devid == NULL) {
1496                         /* Don't process the command as it was never sent */
1497                 } else if (CCB_COMPLETED_OK(csio->ccb_h)) {
1498                         length = scsi_2btoul(devid->length);
1499                         if (length != 0) {
1500                                 /*
1501                                  * NB: device_id_len is actual response
1502                                  * size, not buffer size.
1503                                  */
1504                                 path->device->device_id_len = length +
1505                                     SVPD_DEVICE_ID_HDR_LEN;
1506                                 path->device->device_id = (uint8_t *)devid;
1507                         }
1508                 } else if (cam_periph_error(done_ccb, 0,
1509                         SF_RETRY_UA) == ERESTART) {
1510                         goto outr;
1511                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1512                         /* Don't wedge the queue */
1513                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
1514                                          /*run_queue*/TRUE);
1515                 }
1516
1517                 /* Free the device id space if we don't use it */
1518                 if (devid && length == 0)
1519                         free(devid, M_CAMXPT);
1520                 xpt_release_ccb(done_ccb);
1521                 PROBE_SET_ACTION(softc, PROBE_EXTENDED_INQUIRY);
1522                 xpt_schedule(periph, priority);
1523                 goto out;
1524         }
1525         case PROBE_EXTENDED_INQUIRY: {
1526                 struct scsi_vpd_extended_inquiry_data *ext_inq;
1527                 struct ccb_scsiio *csio;
1528                 int32_t length = 0;
1529
1530                 csio = &done_ccb->csio;
1531                 ext_inq = (struct scsi_vpd_extended_inquiry_data *)
1532                     csio->data_ptr;
1533                 if (path->device->ext_inq != NULL) {
1534                         path->device->ext_inq_len = 0;
1535                         free(path->device->ext_inq, M_CAMXPT);
1536                         path->device->ext_inq = NULL;
1537                 }
1538
1539                 if (ext_inq == NULL) {
1540                         /* Don't process the command as it was never sent */
1541                 } else if (CCB_COMPLETED_OK(csio->ccb_h)) {
1542                         length = scsi_2btoul(ext_inq->page_length) +
1543                             __offsetof(struct scsi_vpd_extended_inquiry_data,
1544                             flags1);
1545                         length = min(length, sizeof(*ext_inq));
1546                         length -= csio->resid;
1547                         if (length > 0) {
1548                                 path->device->ext_inq_len = length;
1549                                 path->device->ext_inq = (uint8_t *)ext_inq;
1550                         }
1551                 } else if (cam_periph_error(done_ccb, 0, SF_RETRY_UA) ==
1552                     ERESTART) {
1553                         goto outr;
1554                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1555                         /* Don't wedge the queue */
1556                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
1557                                          /*run_queue*/TRUE);
1558                 }
1559
1560                 /* Free the device id space if we don't use it */
1561                 if (ext_inq && length <= 0)
1562                         free(ext_inq, M_CAMXPT);
1563                 xpt_release_ccb(done_ccb);
1564                 PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM);
1565                 xpt_schedule(periph, priority);
1566                 goto out;
1567         }
1568
1569 probe_device_check:
1570         case PROBE_SERIAL_NUM:
1571         {
1572                 struct ccb_scsiio *csio;
1573                 struct scsi_vpd_unit_serial_number *serial_buf;
1574                 uint32_t  priority;
1575                 int changed;
1576                 int have_serialnum;
1577
1578                 changed = 1;
1579                 have_serialnum = 0;
1580                 csio = &done_ccb->csio;
1581                 priority = done_ccb->ccb_h.pinfo.priority;
1582                 serial_buf =
1583                     (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
1584
1585                 if (serial_buf == NULL) {
1586                         /*
1587                          * Don't process the command as it was never sent
1588                          */
1589                 } else if (cam_ccb_status(done_ccb) == CAM_REQ_CMP
1590                         && (serial_buf->length > 0)) {
1591                         have_serialnum = 1;
1592                         path->device->serial_num =
1593                                 (uint8_t *)malloc((serial_buf->length + 1),
1594                                                    M_CAMXPT, M_NOWAIT);
1595                         if (path->device->serial_num != NULL) {
1596                                 int start, slen;
1597
1598                                 start = strspn(serial_buf->serial_num, " ");
1599                                 slen = serial_buf->length - start;
1600                                 if (slen <= 0) {
1601                                         /*
1602                                          * SPC5r05 says that an all-space serial
1603                                          * number means no product serial number
1604                                          * is available
1605                                          */
1606                                         slen = 0;
1607                                 }
1608                                 /*
1609                                  * In apparent violation of the spec, some
1610                                  * devices pad their serial numbers with
1611                                  * trailing spaces. Remove them.
1612                                  */
1613                                 while (slen > 0 &&
1614                                     serial_buf->serial_num[start + slen - 1] == ' ')
1615                                         slen--;
1616                                 memcpy(path->device->serial_num,
1617                                        &serial_buf->serial_num[start], slen);
1618                                 path->device->serial_num_len = slen;
1619                                 path->device->serial_num[slen] = '\0';
1620                         }
1621                 } else if (cam_periph_error(done_ccb, 0,
1622                         SF_RETRY_UA|SF_NO_PRINT) == ERESTART) {
1623                         goto outr;
1624                 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1625                         /* Don't wedge the queue */
1626                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
1627                                          /*run_queue*/TRUE);
1628                 }
1629
1630                 /*
1631                  * Let's see if we have seen this device before.
1632                  */
1633                 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
1634                         MD5_CTX context;
1635                         uint8_t digest[16];
1636
1637                         MD5Init(&context);
1638
1639                         MD5Update(&context,
1640                                   (unsigned char *)&path->device->inq_data,
1641                                   sizeof(struct scsi_inquiry_data));
1642
1643                         if (have_serialnum)
1644                                 MD5Update(&context, path->device->serial_num,
1645                                           path->device->serial_num_len);
1646
1647                         MD5Final(digest, &context);
1648                         if (bcmp(softc->digest, digest, 16) == 0)
1649                                 changed = 0;
1650
1651                         /*
1652                          * XXX Do we need to do a TUR in order to ensure
1653                          *     that the device really hasn't changed???
1654                          */
1655                         if ((changed != 0)
1656                          && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
1657                                 xpt_async(AC_LOST_DEVICE, path, NULL);
1658                 }
1659                 if (serial_buf != NULL)
1660                         free(serial_buf, M_CAMXPT);
1661
1662                 if (changed != 0) {
1663                         /*
1664                          * Now that we have all the necessary
1665                          * information to safely perform transfer
1666                          * negotiations... Controllers don't perform
1667                          * any negotiation or tagged queuing until
1668                          * after the first XPT_SET_TRAN_SETTINGS ccb is
1669                          * received.  So, on a new device, just retrieve
1670                          * the user settings, and set them as the current
1671                          * settings to set the device up.
1672                          */
1673                         proberequestdefaultnegotiation(periph);
1674                         xpt_release_ccb(done_ccb);
1675
1676                         /*
1677                          * Perform a TUR to allow the controller to
1678                          * perform any necessary transfer negotiation.
1679                          */
1680                         PROBE_SET_ACTION(softc, PROBE_TUR_FOR_NEGOTIATION);
1681                         xpt_schedule(periph, priority);
1682                         goto out;
1683                 }
1684                 xpt_release_ccb(done_ccb);
1685                 break;
1686         }
1687         case PROBE_TUR_FOR_NEGOTIATION:
1688         case PROBE_DV_EXIT:
1689                 if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) {
1690                         if (cam_periph_error(done_ccb, 0, SF_NO_PRINT |
1691                             SF_NO_RECOVERY | SF_NO_RETRY) == ERESTART)
1692                                 goto outr;
1693                 }
1694                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1695                         /* Don't wedge the queue */
1696                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
1697                                          /*run_queue*/TRUE);
1698                 }
1699                 /*
1700                  * Do Domain Validation for lun 0 on devices that claim
1701                  * to support Synchronous Transfer modes.
1702                  */
1703                 if (softc->action == PROBE_TUR_FOR_NEGOTIATION
1704                  && done_ccb->ccb_h.target_lun == 0
1705                  && (path->device->inq_data.flags & SID_Sync) != 0
1706                  && (path->device->flags & CAM_DEV_IN_DV) == 0) {
1707                         CAM_DEBUG(periph->path, CAM_DEBUG_PROBE,
1708                             ("Begin Domain Validation\n"));
1709                         path->device->flags |= CAM_DEV_IN_DV;
1710                         xpt_release_ccb(done_ccb);
1711                         PROBE_SET_ACTION(softc, PROBE_INQUIRY_BASIC_DV1);
1712                         xpt_schedule(periph, priority);
1713                         goto out;
1714                 }
1715                 if (softc->action == PROBE_DV_EXIT) {
1716                         CAM_DEBUG(periph->path, CAM_DEBUG_PROBE,
1717                             ("Leave Domain Validation\n"));
1718                 }
1719                 if (path->device->flags & CAM_DEV_UNCONFIGURED) {
1720                         path->device->flags &= ~CAM_DEV_UNCONFIGURED;
1721                         xpt_acquire_device(path->device);
1722                 }
1723                 path->device->flags &=
1724                     ~(CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
1725                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
1726                         /* Inform the XPT that a new device has been found */
1727                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
1728                         xpt_action(done_ccb);
1729                         xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
1730                                   done_ccb);
1731                 }
1732                 PROBE_SET_ACTION(softc, PROBE_DONE);
1733                 xpt_release_ccb(done_ccb);
1734                 break;
1735         case PROBE_INQUIRY_BASIC_DV1:
1736         case PROBE_INQUIRY_BASIC_DV2:
1737         {
1738                 struct scsi_inquiry_data *nbuf;
1739                 struct ccb_scsiio *csio;
1740
1741                 if (cam_ccb_status(done_ccb) != CAM_REQ_CMP) {
1742                         if (cam_periph_error(done_ccb, 0, SF_NO_PRINT |
1743                             SF_NO_RECOVERY | SF_NO_RETRY) == ERESTART)
1744                                 goto outr;
1745                 }
1746                 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1747                         /* Don't wedge the queue */
1748                         xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
1749                                          /*run_queue*/TRUE);
1750                 }
1751                 csio = &done_ccb->csio;
1752                 nbuf = (struct scsi_inquiry_data *)csio->data_ptr;
1753                 if (bcmp(nbuf, &path->device->inq_data, SHORT_INQUIRY_LENGTH)) {
1754                         xpt_print(path,
1755                             "inquiry data fails comparison at DV%d step\n",
1756                             softc->action == PROBE_INQUIRY_BASIC_DV1 ? 1 : 2);
1757                         if (proberequestbackoff(periph, path->device)) {
1758                                 path->device->flags &= ~CAM_DEV_IN_DV;
1759                                 PROBE_SET_ACTION(softc, PROBE_TUR_FOR_NEGOTIATION);
1760                         } else {
1761                                 /* give up */
1762                                 PROBE_SET_ACTION(softc, PROBE_DV_EXIT);
1763                         }
1764                         free(nbuf, M_CAMXPT);
1765                         xpt_release_ccb(done_ccb);
1766                         xpt_schedule(periph, priority);
1767                         goto out;
1768                 }
1769                 free(nbuf, M_CAMXPT);
1770                 if (softc->action == PROBE_INQUIRY_BASIC_DV1) {
1771                         PROBE_SET_ACTION(softc, PROBE_INQUIRY_BASIC_DV2);
1772                         xpt_release_ccb(done_ccb);
1773                         xpt_schedule(periph, priority);
1774                         goto out;
1775                 }
1776                 if (softc->action == PROBE_INQUIRY_BASIC_DV2) {
1777                         CAM_DEBUG(periph->path, CAM_DEBUG_PROBE,
1778                             ("Leave Domain Validation Successfully\n"));
1779                 }
1780                 if (path->device->flags & CAM_DEV_UNCONFIGURED) {
1781                         path->device->flags &= ~CAM_DEV_UNCONFIGURED;
1782                         xpt_acquire_device(path->device);
1783                 }
1784                 path->device->flags &=
1785                     ~(CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
1786                 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
1787                         /* Inform the XPT that a new device has been found */
1788                         done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
1789                         xpt_action(done_ccb);
1790                         xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
1791                                   done_ccb);
1792                 }
1793                 PROBE_SET_ACTION(softc, PROBE_DONE);
1794                 xpt_release_ccb(done_ccb);
1795                 break;
1796         }
1797         default:
1798                 panic("probedone: invalid action state 0x%x\n", softc->action);
1799         }
1800         done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
1801         TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
1802         done_ccb->ccb_h.status = CAM_REQ_CMP;
1803         xpt_done(done_ccb);
1804         if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
1805                 CAM_DEBUG(periph->path, CAM_DEBUG_PROBE, ("Probe completed\n"));
1806                 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
1807                 cam_release_devq(path, 0, 0, 0, FALSE);
1808                 cam_periph_release_locked(periph);
1809                 cam_periph_invalidate(periph);
1810                 cam_periph_release_locked(periph);
1811         } else {
1812                 probeschedule(periph);
1813                 goto out;
1814         }
1815 }
1816
1817 static void
1818 probe_purge_old(struct cam_path *path, struct scsi_report_luns_data *new,
1819     probe_flags flags)
1820 {
1821         struct cam_path *tp;
1822         struct scsi_report_luns_data *old;
1823         u_int idx1, idx2, nlun_old, nlun_new;
1824         lun_id_t this_lun;
1825         uint8_t *ol, *nl;
1826
1827         if (path->target == NULL) {
1828                 return;
1829         }
1830         mtx_lock(&path->target->luns_mtx);
1831         old = path->target->luns;
1832         path->target->luns = new;
1833         mtx_unlock(&path->target->luns_mtx);
1834         if (old == NULL)
1835                 return;
1836         nlun_old = scsi_4btoul(old->length) / 8;
1837         nlun_new = scsi_4btoul(new->length) / 8;
1838
1839         /*
1840          * We are not going to assume sorted lists. Deal.
1841          */
1842         for (idx1 = 0; idx1 < nlun_old; idx1++) {
1843                 ol = old->luns[idx1].lundata;
1844                 for (idx2 = 0; idx2 < nlun_new; idx2++) {
1845                         nl = new->luns[idx2].lundata;
1846                         if (memcmp(nl, ol, 8) == 0) {
1847                                 break;
1848                         }
1849                 }
1850                 if (idx2 < nlun_new) {
1851                         continue;
1852                 }
1853                 /*
1854                  * An 'old' item not in the 'new' list.
1855                  * Nuke it. Except that if it is lun 0,
1856                  * that would be what the probe state
1857                  * machine is currently working on,
1858                  * so we won't do that.
1859                  */
1860                 CAM_GET_LUN(old, idx1, this_lun);
1861                 if (this_lun == 0) {
1862                         continue;
1863                 }
1864
1865                 /*
1866                  * We also cannot nuke it if it is
1867                  * not in a lun format we understand
1868                  * and replace the LUN with a "simple" LUN
1869                  * if that is all the HBA supports.
1870                  */
1871                 if (!(flags & PROBE_EXTLUN)) {
1872                         if (!CAM_CAN_GET_SIMPLE_LUN(old, idx1))
1873                                 continue;
1874                         CAM_GET_SIMPLE_LUN(old, idx1, this_lun);
1875                 }
1876
1877                 if (xpt_create_path(&tp, NULL, xpt_path_path_id(path),
1878                     xpt_path_target_id(path), this_lun) == CAM_REQ_CMP) {
1879                         xpt_async(AC_LOST_DEVICE, tp, NULL);
1880                         xpt_free_path(tp);
1881                 }
1882         }
1883         free(old, M_CAMXPT);
1884 }
1885
1886 static void
1887 probecleanup(struct cam_periph *periph)
1888 {
1889         free(periph->softc, M_CAMXPT);
1890 }
1891
1892 static void
1893 scsi_find_quirk(struct cam_ed *device)
1894 {
1895         struct scsi_quirk_entry *quirk;
1896         caddr_t match;
1897
1898         match = cam_quirkmatch((caddr_t)&device->inq_data,
1899                                (caddr_t)scsi_quirk_table,
1900                                nitems(scsi_quirk_table),
1901                                sizeof(*scsi_quirk_table), scsi_inquiry_match);
1902
1903         if (match == NULL)
1904                 panic("xpt_find_quirk: device didn't match wildcard entry!!");
1905
1906         quirk = (struct scsi_quirk_entry *)match;
1907         device->quirk = quirk;
1908         device->mintags = quirk->mintags;
1909         device->maxtags = quirk->maxtags;
1910 }
1911
1912 typedef struct {
1913         union   ccb *request_ccb;
1914         struct  ccb_pathinq *cpi;
1915         int     counter;
1916         int     lunindex[0];
1917 } scsi_scan_bus_info;
1918
1919 /*
1920  * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
1921  * As the scan progresses, scsi_scan_bus is used as the
1922  * callback on completion function.
1923  */
1924 static void
1925 scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
1926 {
1927         struct mtx *mtx;
1928
1929         CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
1930                   ("scsi_scan_bus\n"));
1931         switch (request_ccb->ccb_h.func_code) {
1932         case XPT_SCAN_BUS:
1933         case XPT_SCAN_TGT:
1934         {
1935                 scsi_scan_bus_info *scan_info;
1936                 union   ccb *work_ccb, *reset_ccb;
1937                 struct  cam_path *path;
1938                 u_int   i;
1939                 u_int   low_target, max_target;
1940                 u_int   initiator_id;
1941
1942                 /* Find out the characteristics of the bus */
1943                 work_ccb = xpt_alloc_ccb_nowait();
1944                 if (work_ccb == NULL) {
1945                         request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1946                         xpt_done(request_ccb);
1947                         return;
1948                 }
1949                 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
1950                               request_ccb->ccb_h.pinfo.priority);
1951                 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
1952                 xpt_action(work_ccb);
1953                 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
1954                         request_ccb->ccb_h.status = work_ccb->ccb_h.status;
1955                         xpt_free_ccb(work_ccb);
1956                         xpt_done(request_ccb);
1957                         return;
1958                 }
1959
1960                 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
1961                         /*
1962                          * Can't scan the bus on an adapter that
1963                          * cannot perform the initiator role.
1964                          */
1965                         request_ccb->ccb_h.status = CAM_REQ_CMP;
1966                         xpt_free_ccb(work_ccb);
1967                         xpt_done(request_ccb);
1968                         return;
1969                 }
1970
1971                 /* We may need to reset bus first, if we haven't done it yet. */
1972                 if ((work_ccb->cpi.hba_inquiry &
1973                     (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) &&
1974                     !(work_ccb->cpi.hba_misc & PIM_NOBUSRESET) &&
1975                     !timevalisset(&request_ccb->ccb_h.path->bus->last_reset) &&
1976                     (reset_ccb = xpt_alloc_ccb_nowait()) != NULL) {
1977                         xpt_setup_ccb(&reset_ccb->ccb_h, request_ccb->ccb_h.path,
1978                               CAM_PRIORITY_NONE);
1979                         reset_ccb->ccb_h.func_code = XPT_RESET_BUS;
1980                         xpt_action(reset_ccb);
1981                         if (reset_ccb->ccb_h.status != CAM_REQ_CMP) {
1982                                 request_ccb->ccb_h.status = reset_ccb->ccb_h.status;
1983                                 xpt_free_ccb(reset_ccb);
1984                                 xpt_free_ccb(work_ccb);
1985                                 xpt_done(request_ccb);
1986                                 return;
1987                         }
1988                         xpt_free_ccb(reset_ccb);
1989                 }
1990
1991                 /* Save some state for use while we probe for devices */
1992                 scan_info = (scsi_scan_bus_info *) malloc(sizeof(scsi_scan_bus_info) +
1993                     (work_ccb->cpi.max_target * sizeof (u_int)), M_CAMXPT, M_ZERO|M_NOWAIT);
1994                 if (scan_info == NULL) {
1995                         request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1996                         xpt_free_ccb(work_ccb);
1997                         xpt_done(request_ccb);
1998                         return;
1999                 }
2000                 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
2001                    ("SCAN start for %p\n", scan_info));
2002                 scan_info->request_ccb = request_ccb;
2003                 scan_info->cpi = &work_ccb->cpi;
2004
2005                 /* Cache on our stack so we can work asynchronously */
2006                 max_target = scan_info->cpi->max_target;
2007                 low_target = 0;
2008                 initiator_id = scan_info->cpi->initiator_id;
2009
2010                 /*
2011                  * We can scan all targets in parallel, or do it sequentially.
2012                  */
2013
2014                 if (request_ccb->ccb_h.func_code == XPT_SCAN_TGT) {
2015                         max_target = low_target = request_ccb->ccb_h.target_id;
2016                         scan_info->counter = 0;
2017                 } else if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
2018                         max_target = 0;
2019                         scan_info->counter = 0;
2020                 } else {
2021                         scan_info->counter = scan_info->cpi->max_target + 1;
2022                         if (scan_info->cpi->initiator_id < scan_info->counter) {
2023                                 scan_info->counter--;
2024                         }
2025                 }
2026                 mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path);
2027                 mtx_unlock(mtx);
2028
2029                 for (i = low_target; i <= max_target; i++) {
2030                         cam_status status;
2031                         if (i == initiator_id)
2032                                 continue;
2033
2034                         status = xpt_create_path(&path, NULL,
2035                                                  request_ccb->ccb_h.path_id,
2036                                                  i, 0);
2037                         if (status != CAM_REQ_CMP) {
2038                                 printf("scsi_scan_bus: xpt_create_path failed"
2039                                        " with status %#x, bus scan halted\n",
2040                                        status);
2041                                 free(scan_info, M_CAMXPT);
2042                                 request_ccb->ccb_h.status = status;
2043                                 xpt_free_ccb(work_ccb);
2044                                 xpt_done(request_ccb);
2045                                 break;
2046                         }
2047                         work_ccb = xpt_alloc_ccb_nowait();
2048                         if (work_ccb == NULL) {
2049                                 xpt_free_ccb((union ccb *)scan_info->cpi);
2050                                 free(scan_info, M_CAMXPT);
2051                                 xpt_free_path(path);
2052                                 request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2053                                 xpt_done(request_ccb);
2054                                 break;
2055                         }
2056                         xpt_setup_ccb(&work_ccb->ccb_h, path,
2057                                       request_ccb->ccb_h.pinfo.priority);
2058                         work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
2059                         work_ccb->ccb_h.cbfcnp = scsi_scan_bus;
2060                         work_ccb->ccb_h.flags |= CAM_UNLOCKED;
2061                         work_ccb->ccb_h.ppriv_ptr0 = scan_info;
2062                         work_ccb->crcn.flags = request_ccb->crcn.flags;
2063                         xpt_action(work_ccb);
2064                 }
2065
2066                 mtx_lock(mtx);
2067                 break;
2068         }
2069         case XPT_SCAN_LUN:
2070         {
2071                 cam_status status;
2072                 struct cam_path *path, *oldpath;
2073                 scsi_scan_bus_info *scan_info;
2074                 struct cam_et *target;
2075                 struct cam_ed *device, *nextdev;
2076                 int next_target;
2077                 path_id_t path_id;
2078                 target_id_t target_id;
2079                 lun_id_t lun_id;
2080
2081                 oldpath = request_ccb->ccb_h.path;
2082
2083                 status = cam_ccb_status(request_ccb);
2084                 scan_info = (scsi_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
2085                 path_id = request_ccb->ccb_h.path_id;
2086                 target_id = request_ccb->ccb_h.target_id;
2087                 lun_id = request_ccb->ccb_h.target_lun;
2088                 target = request_ccb->ccb_h.path->target;
2089                 next_target = 1;
2090
2091                 mtx = xpt_path_mtx(scan_info->request_ccb->ccb_h.path);
2092                 mtx_lock(mtx);
2093                 mtx_lock(&target->luns_mtx);
2094                 if (target->luns) {
2095                         lun_id_t first;
2096                         u_int nluns = scsi_4btoul(target->luns->length) / 8;
2097
2098                         /*
2099                          * Make sure we skip over lun 0 if it's the first member
2100                          * of the list as we've actually just finished probing
2101                          * it.
2102                          */
2103                         CAM_GET_LUN(target->luns, 0, first);
2104                         if (first == 0 && scan_info->lunindex[target_id] == 0) {
2105                                 scan_info->lunindex[target_id]++;
2106                         }
2107
2108                         /*
2109                          * Skip any LUNs that the HBA can't deal with.
2110                          */
2111                         while (scan_info->lunindex[target_id] < nluns) {
2112                                 if (scan_info->cpi->hba_misc & PIM_EXTLUNS) {
2113                                         CAM_GET_LUN(target->luns,
2114                                             scan_info->lunindex[target_id],
2115                                             lun_id);
2116                                         break;
2117                                 }
2118
2119                                 if (CAM_CAN_GET_SIMPLE_LUN(target->luns,
2120                                     scan_info->lunindex[target_id])) {
2121                                         CAM_GET_SIMPLE_LUN(target->luns,
2122                                             scan_info->lunindex[target_id],
2123                                             lun_id);
2124                                         break;
2125                                 }
2126                                         
2127                                 scan_info->lunindex[target_id]++;
2128                         }
2129
2130                         if (scan_info->lunindex[target_id] < nluns) {
2131                                 mtx_unlock(&target->luns_mtx);
2132                                 next_target = 0;
2133                                 CAM_DEBUG(request_ccb->ccb_h.path,
2134                                     CAM_DEBUG_PROBE,
2135                                    ("next lun to try at index %u is %jx\n",
2136                                    scan_info->lunindex[target_id],
2137                                    (uintmax_t)lun_id));
2138                                 scan_info->lunindex[target_id]++;
2139                         } else {
2140                                 mtx_unlock(&target->luns_mtx);
2141                                 /* We're done with scanning all luns. */
2142                         }
2143                 } else {
2144                         mtx_unlock(&target->luns_mtx);
2145                         device = request_ccb->ccb_h.path->device;
2146                         /* Continue sequential LUN scan if: */
2147                         /*  -- we have more LUNs that need recheck */
2148                         mtx_lock(&target->bus->eb_mtx);
2149                         nextdev = device;
2150                         while ((nextdev = TAILQ_NEXT(nextdev, links)) != NULL)
2151                                 if ((nextdev->flags & CAM_DEV_UNCONFIGURED) == 0)
2152                                         break;
2153                         mtx_unlock(&target->bus->eb_mtx);
2154                         if (nextdev != NULL) {
2155                                 next_target = 0;
2156                         /*  -- stop if CAM_QUIRK_NOLUNS is set. */
2157                         } else if (SCSI_QUIRK(device)->quirks & CAM_QUIRK_NOLUNS) {
2158                                 next_target = 1;
2159                         /*  -- this LUN is connected and its SCSI version
2160                          *     allows more LUNs. */
2161                         } else if ((device->flags & CAM_DEV_UNCONFIGURED) == 0) {
2162                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) ||
2163                                     CAN_SRCH_HI_DENSE(device))
2164                                         next_target = 0;
2165                         /*  -- this LUN is disconnected, its SCSI version
2166                          *     allows more LUNs and we guess they may be. */
2167                         } else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) {
2168                                 if (lun_id < (CAM_SCSI2_MAXLUN-1) ||
2169                                     CAN_SRCH_HI_SPARSE(device))
2170                                         next_target = 0;
2171                         }
2172                         if (next_target == 0) {
2173                                 lun_id++;
2174                                 if (lun_id > scan_info->cpi->max_lun)
2175                                         next_target = 1;
2176                         }
2177                 }
2178
2179                 /*
2180                  * Check to see if we scan any further luns.
2181                  */
2182                 if (next_target) {
2183                         int done;
2184
2185                         /*
2186                          * Free the current request path- we're done with it.
2187                          */
2188                         xpt_free_path(oldpath);
2189  hop_again:
2190                         done = 0;
2191                         if (scan_info->request_ccb->ccb_h.func_code == XPT_SCAN_TGT) {
2192                                 done = 1;
2193                         } else if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
2194                                 scan_info->counter++;
2195                                 if (scan_info->counter ==
2196                                     scan_info->cpi->initiator_id) {
2197                                         scan_info->counter++;
2198                                 }
2199                                 if (scan_info->counter >=
2200                                     scan_info->cpi->max_target+1) {
2201                                         done = 1;
2202                                 }
2203                         } else {
2204                                 scan_info->counter--;
2205                                 if (scan_info->counter == 0) {
2206                                         done = 1;
2207                                 }
2208                         }
2209                         if (done) {
2210                                 mtx_unlock(mtx);
2211                                 xpt_free_ccb(request_ccb);
2212                                 xpt_free_ccb((union ccb *)scan_info->cpi);
2213                                 request_ccb = scan_info->request_ccb;
2214                                 CAM_DEBUG(request_ccb->ccb_h.path,
2215                                     CAM_DEBUG_TRACE,
2216                                    ("SCAN done for %p\n", scan_info));
2217                                 free(scan_info, M_CAMXPT);
2218                                 request_ccb->ccb_h.status = CAM_REQ_CMP;
2219                                 xpt_done(request_ccb);
2220                                 break;
2221                         }
2222
2223                         if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) {
2224                                 mtx_unlock(mtx);
2225                                 xpt_free_ccb(request_ccb);
2226                                 break;
2227                         }
2228                         status = xpt_create_path(&path, NULL,
2229                             scan_info->request_ccb->ccb_h.path_id,
2230                             scan_info->counter, 0);
2231                         if (status != CAM_REQ_CMP) {
2232                                 mtx_unlock(mtx);
2233                                 printf("scsi_scan_bus: xpt_create_path failed"
2234                                     " with status %#x, bus scan halted\n",
2235                                     status);
2236                                 xpt_free_ccb(request_ccb);
2237                                 xpt_free_ccb((union ccb *)scan_info->cpi);
2238                                 request_ccb = scan_info->request_ccb;
2239                                 free(scan_info, M_CAMXPT);
2240                                 request_ccb->ccb_h.status = status;
2241                                 xpt_done(request_ccb);
2242                                 break;
2243                         }
2244                         xpt_setup_ccb(&request_ccb->ccb_h, path,
2245                             request_ccb->ccb_h.pinfo.priority);
2246                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
2247                         request_ccb->ccb_h.cbfcnp = scsi_scan_bus;
2248                         request_ccb->ccb_h.flags |= CAM_UNLOCKED;
2249                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
2250                         request_ccb->crcn.flags =
2251                             scan_info->request_ccb->crcn.flags;
2252                 } else {
2253                         status = xpt_create_path(&path, NULL,
2254                                                  path_id, target_id, lun_id);
2255                         /*
2256                          * Free the old request path- we're done with it. We
2257                          * do this *after* creating the new path so that
2258                          * we don't remove a target that has our lun list
2259                          * in the case that lun 0 is not present.
2260                          */
2261                         xpt_free_path(oldpath);
2262                         if (status != CAM_REQ_CMP) {
2263                                 printf("scsi_scan_bus: xpt_create_path failed "
2264                                        "with status %#x, halting LUN scan\n",
2265                                        status);
2266                                 goto hop_again;
2267                         }
2268                         xpt_setup_ccb(&request_ccb->ccb_h, path,
2269                                       request_ccb->ccb_h.pinfo.priority);
2270                         request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
2271                         request_ccb->ccb_h.cbfcnp = scsi_scan_bus;
2272                         request_ccb->ccb_h.flags |= CAM_UNLOCKED;
2273                         request_ccb->ccb_h.ppriv_ptr0 = scan_info;
2274                         request_ccb->crcn.flags =
2275                                 scan_info->request_ccb->crcn.flags;
2276                 }
2277                 mtx_unlock(mtx);
2278                 xpt_action(request_ccb);
2279                 break;
2280         }
2281         default:
2282                 break;
2283         }
2284 }
2285
2286 static void
2287 scsi_scan_lun(struct cam_periph *periph, struct cam_path *path,
2288              cam_flags flags, union ccb *request_ccb)
2289 {
2290         struct ccb_pathinq cpi;
2291         cam_status status;
2292         struct cam_path *new_path;
2293         struct cam_periph *old_periph;
2294         int lock;
2295
2296         CAM_DEBUG(path, CAM_DEBUG_TRACE, ("scsi_scan_lun\n"));
2297
2298         memset(&cpi, 0, sizeof(cpi));
2299         xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE);
2300         cpi.ccb_h.func_code = XPT_PATH_INQ;
2301         xpt_action((union ccb *)&cpi);
2302
2303         if (cpi.ccb_h.status != CAM_REQ_CMP) {
2304                 if (request_ccb != NULL) {
2305                         request_ccb->ccb_h.status = cpi.ccb_h.status;
2306                         xpt_done(request_ccb);
2307                 }
2308                 return;
2309         }
2310
2311         if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
2312                 /*
2313                  * Can't scan the bus on an adapter that
2314                  * cannot perform the initiator role.
2315                  */
2316                 if (request_ccb != NULL) {
2317                         request_ccb->ccb_h.status = CAM_REQ_CMP;
2318                         xpt_done(request_ccb);
2319                 }
2320                 return;
2321         }
2322
2323         if (request_ccb == NULL) {
2324                 request_ccb = xpt_alloc_ccb_nowait();
2325                 if (request_ccb == NULL) {
2326                         xpt_print(path, "scsi_scan_lun: can't allocate CCB, "
2327                             "can't continue\n");
2328                         return;
2329                 }
2330                 status = xpt_create_path(&new_path, NULL,
2331                                           path->bus->path_id,
2332                                           path->target->target_id,
2333                                           path->device->lun_id);
2334                 if (status != CAM_REQ_CMP) {
2335                         xpt_print(path, "scsi_scan_lun: can't create path, "
2336                             "can't continue\n");
2337                         xpt_free_ccb(request_ccb);
2338                         return;
2339                 }
2340                 xpt_setup_ccb(&request_ccb->ccb_h, new_path, CAM_PRIORITY_XPT);
2341                 request_ccb->ccb_h.cbfcnp = xptscandone;
2342                 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
2343                 request_ccb->ccb_h.flags |= CAM_UNLOCKED;
2344                 request_ccb->crcn.flags = flags;
2345         }
2346
2347         lock = (xpt_path_owned(path) == 0);
2348         if (lock)
2349                 xpt_path_lock(path);
2350         if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
2351                 if ((old_periph->flags & CAM_PERIPH_INVALID) == 0) {
2352                         probe_softc *softc;
2353
2354                         softc = (probe_softc *)old_periph->softc;
2355                         TAILQ_INSERT_TAIL(&softc->request_ccbs,
2356                             &request_ccb->ccb_h, periph_links.tqe);
2357                 } else {
2358                         request_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2359                         xpt_done(request_ccb);
2360                 }
2361         } else {
2362                 status = cam_periph_alloc(proberegister, NULL, probecleanup,
2363                                           probestart, "probe",
2364                                           CAM_PERIPH_BIO,
2365                                           request_ccb->ccb_h.path, NULL, 0,
2366                                           request_ccb);
2367
2368                 if (status != CAM_REQ_CMP) {
2369                         xpt_print(path, "scsi_scan_lun: cam_alloc_periph "
2370                             "returned an error, can't continue probe\n");
2371                         request_ccb->ccb_h.status = status;
2372                         xpt_done(request_ccb);
2373                 }
2374         }
2375         if (lock)
2376                 xpt_path_unlock(path);
2377 }
2378
2379 static void
2380 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
2381 {
2382
2383         xpt_free_path(done_ccb->ccb_h.path);
2384         xpt_free_ccb(done_ccb);
2385 }
2386
2387 static struct cam_ed *
2388 scsi_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
2389 {
2390         struct scsi_quirk_entry *quirk;
2391         struct cam_ed *device;
2392
2393         device = xpt_alloc_device(bus, target, lun_id);
2394         if (device == NULL)
2395                 return (NULL);
2396
2397         /*
2398          * Take the default quirk entry until we have inquiry
2399          * data and can determine a better quirk to use.
2400          */
2401         quirk = &scsi_quirk_table[nitems(scsi_quirk_table) - 1];
2402         device->quirk = (void *)quirk;
2403         device->mintags = quirk->mintags;
2404         device->maxtags = quirk->maxtags;
2405         bzero(&device->inq_data, sizeof(device->inq_data));
2406         device->inq_flags = 0;
2407         device->queue_flags = 0;
2408         device->serial_num = NULL;
2409         device->serial_num_len = 0;
2410         device->device_id = NULL;
2411         device->device_id_len = 0;
2412         device->supported_vpds = NULL;
2413         device->supported_vpds_len = 0;
2414         return (device);
2415 }
2416
2417 static void
2418 scsi_devise_transport(struct cam_path *path)
2419 {
2420         struct ccb_pathinq cpi;
2421         struct ccb_trans_settings cts;
2422         struct scsi_inquiry_data *inq_buf;
2423
2424         /* Get transport information from the SIM */
2425         memset(&cpi, 0, sizeof(cpi));
2426         xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE);
2427         cpi.ccb_h.func_code = XPT_PATH_INQ;
2428         xpt_action((union ccb *)&cpi);
2429
2430         inq_buf = NULL;
2431         if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
2432                 inq_buf = &path->device->inq_data;
2433         path->device->protocol = PROTO_SCSI;
2434         path->device->protocol_version =
2435             inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
2436         path->device->transport = cpi.transport;
2437         path->device->transport_version = cpi.transport_version;
2438
2439         /*
2440          * Any device not using SPI3 features should
2441          * be considered SPI2 or lower.
2442          */
2443         if (inq_buf != NULL) {
2444                 if (path->device->transport == XPORT_SPI
2445                  && (inq_buf->spi3data & SID_SPI_MASK) == 0
2446                  && path->device->transport_version > 2)
2447                         path->device->transport_version = 2;
2448         } else {
2449                 struct cam_ed* otherdev;
2450
2451                 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
2452                      otherdev != NULL;
2453                      otherdev = TAILQ_NEXT(otherdev, links)) {
2454                         if (otherdev != path->device)
2455                                 break;
2456                 }
2457
2458                 if (otherdev != NULL) {
2459                         /*
2460                          * Initially assume the same versioning as
2461                          * prior luns for this target.
2462                          */
2463                         path->device->protocol_version =
2464                             otherdev->protocol_version;
2465                         path->device->transport_version =
2466                             otherdev->transport_version;
2467                 } else {
2468                         /* Until we know better, opt for safety */
2469                         path->device->protocol_version = 2;
2470                         if (path->device->transport == XPORT_SPI)
2471                                 path->device->transport_version = 2;
2472                         else
2473                                 path->device->transport_version = 0;
2474                 }
2475         }
2476
2477         /*
2478          * XXX
2479          * For a device compliant with SPC-2 we should be able
2480          * to determine the transport version supported by
2481          * scrutinizing the version descriptors in the
2482          * inquiry buffer.
2483          */
2484
2485         /* Tell the controller what we think */
2486         memset(&cts, 0, sizeof(cts));
2487         xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE);
2488         cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
2489         cts.type = CTS_TYPE_CURRENT_SETTINGS;
2490         cts.transport = path->device->transport;
2491         cts.transport_version = path->device->transport_version;
2492         cts.protocol = path->device->protocol;
2493         cts.protocol_version = path->device->protocol_version;
2494         cts.proto_specific.valid = 0;
2495         cts.xport_specific.valid = 0;
2496         xpt_action((union ccb *)&cts);
2497 }
2498
2499 static void
2500 scsi_dev_advinfo(union ccb *start_ccb)
2501 {
2502         struct cam_ed *device;
2503         struct ccb_dev_advinfo *cdai;
2504         off_t amt;
2505
2506         xpt_path_assert(start_ccb->ccb_h.path, MA_OWNED);
2507         start_ccb->ccb_h.status = CAM_REQ_INVALID;
2508         device = start_ccb->ccb_h.path->device;
2509         cdai = &start_ccb->cdai;
2510         switch(cdai->buftype) {
2511         case CDAI_TYPE_SCSI_DEVID:
2512                 if (cdai->flags & CDAI_FLAG_STORE)
2513                         return;
2514                 cdai->provsiz = device->device_id_len;
2515                 if (device->device_id_len == 0)
2516                         break;
2517                 amt = device->device_id_len;
2518                 if (cdai->provsiz > cdai->bufsiz)
2519                         amt = cdai->bufsiz;
2520                 memcpy(cdai->buf, device->device_id, amt);
2521                 break;
2522         case CDAI_TYPE_SERIAL_NUM:
2523                 if (cdai->flags & CDAI_FLAG_STORE)
2524                         return;
2525                 cdai->provsiz = device->serial_num_len;
2526                 if (device->serial_num_len == 0)
2527                         break;
2528                 amt = device->serial_num_len;
2529                 if (cdai->provsiz > cdai->bufsiz)
2530                         amt = cdai->bufsiz;
2531                 memcpy(cdai->buf, device->serial_num, amt);
2532                 break;
2533         case CDAI_TYPE_PHYS_PATH:
2534                 if (cdai->flags & CDAI_FLAG_STORE) {
2535                         if (device->physpath != NULL) {
2536                                 free(device->physpath, M_CAMXPT);
2537                                 device->physpath = NULL;
2538                                 device->physpath_len = 0;
2539                         }
2540                         /* Clear existing buffer if zero length */
2541                         if (cdai->bufsiz == 0)
2542                                 break;
2543                         device->physpath = malloc(cdai->bufsiz, M_CAMXPT, M_NOWAIT);
2544                         if (device->physpath == NULL) {
2545                                 start_ccb->ccb_h.status = CAM_REQ_ABORTED;
2546                                 return;
2547                         }
2548                         device->physpath_len = cdai->bufsiz;
2549                         memcpy(device->physpath, cdai->buf, cdai->bufsiz);
2550                 } else {
2551                         cdai->provsiz = device->physpath_len;
2552                         if (device->physpath_len == 0)
2553                                 break;
2554                         amt = device->physpath_len;
2555                         if (cdai->provsiz > cdai->bufsiz)
2556                                 amt = cdai->bufsiz;
2557                         memcpy(cdai->buf, device->physpath, amt);
2558                 }
2559                 break;
2560         case CDAI_TYPE_RCAPLONG:
2561                 if (cdai->flags & CDAI_FLAG_STORE) {
2562                         if (device->rcap_buf != NULL) {
2563                                 free(device->rcap_buf, M_CAMXPT);
2564                                 device->rcap_buf = NULL;
2565                         }
2566
2567                         device->rcap_len = cdai->bufsiz;
2568                         /* Clear existing buffer if zero length */
2569                         if (cdai->bufsiz == 0)
2570                                 break;
2571
2572                         device->rcap_buf = malloc(cdai->bufsiz, M_CAMXPT,
2573                                                   M_NOWAIT);
2574                         if (device->rcap_buf == NULL) {
2575                                 start_ccb->ccb_h.status = CAM_REQ_ABORTED;
2576                                 return;
2577                         }
2578
2579                         memcpy(device->rcap_buf, cdai->buf, cdai->bufsiz);
2580                 } else {
2581                         cdai->provsiz = device->rcap_len;
2582                         if (device->rcap_len == 0)
2583                                 break;
2584                         amt = device->rcap_len;
2585                         if (cdai->provsiz > cdai->bufsiz)
2586                                 amt = cdai->bufsiz;
2587                         memcpy(cdai->buf, device->rcap_buf, amt);
2588                 }
2589                 break;
2590         case CDAI_TYPE_EXT_INQ:
2591                 /*
2592                  * We fetch extended inquiry data during probe, if
2593                  * available.  We don't allow changing it.
2594                  */
2595                 if (cdai->flags & CDAI_FLAG_STORE)
2596                         return;
2597                 cdai->provsiz = device->ext_inq_len;
2598                 if (device->ext_inq_len == 0)
2599                         break;
2600                 amt = device->ext_inq_len;
2601                 if (cdai->provsiz > cdai->bufsiz)
2602                         amt = cdai->bufsiz;
2603                 memcpy(cdai->buf, device->ext_inq, amt);
2604                 break;
2605         default:
2606                 return;
2607         }
2608         start_ccb->ccb_h.status = CAM_REQ_CMP;
2609
2610         if (cdai->flags & CDAI_FLAG_STORE) {
2611                 xpt_async(AC_ADVINFO_CHANGED, start_ccb->ccb_h.path,
2612                           (void *)(uintptr_t)cdai->buftype);
2613         }
2614 }
2615
2616 static void
2617 scsi_action(union ccb *start_ccb)
2618 {
2619
2620         if (start_ccb->ccb_h.func_code != XPT_SCSI_IO) {
2621                 KASSERT((start_ccb->ccb_h.alloc_flags & CAM_CCB_FROM_UMA) == 0,
2622                     ("%s: ccb %p, func_code %#x should not be allocated "
2623                     "from UMA zone\n",
2624                     __func__, start_ccb, start_ccb->ccb_h.func_code));
2625         }
2626
2627         switch (start_ccb->ccb_h.func_code) {
2628         case XPT_SET_TRAN_SETTINGS:
2629         {
2630                 scsi_set_transfer_settings(&start_ccb->cts,
2631                                            start_ccb->ccb_h.path,
2632                                            /*async_update*/FALSE);
2633                 break;
2634         }
2635         case XPT_SCAN_BUS:
2636         case XPT_SCAN_TGT:
2637                 scsi_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
2638                 break;
2639         case XPT_SCAN_LUN:
2640                 scsi_scan_lun(start_ccb->ccb_h.path->periph,
2641                               start_ccb->ccb_h.path, start_ccb->crcn.flags,
2642                               start_ccb);
2643                 break;
2644         case XPT_DEV_ADVINFO:
2645         {
2646                 scsi_dev_advinfo(start_ccb);
2647                 break;
2648         }
2649         default:
2650                 xpt_action_default(start_ccb);
2651                 break;
2652         }
2653 }
2654
2655 static void
2656 scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_path *path,
2657                            int async_update)
2658 {
2659         struct  ccb_pathinq cpi;
2660         struct  ccb_trans_settings cur_cts;
2661         struct  ccb_trans_settings_scsi *scsi;
2662         struct  ccb_trans_settings_scsi *cur_scsi;
2663         struct  scsi_inquiry_data *inq_data;
2664         struct  cam_ed *device;
2665
2666         if (path == NULL || (device = path->device) == NULL) {
2667                 cts->ccb_h.status = CAM_PATH_INVALID;
2668                 xpt_done((union ccb *)cts);
2669                 return;
2670         }
2671
2672         if (cts->protocol == PROTO_UNKNOWN
2673          || cts->protocol == PROTO_UNSPECIFIED) {
2674                 cts->protocol = device->protocol;
2675                 cts->protocol_version = device->protocol_version;
2676         }
2677
2678         if (cts->protocol_version == PROTO_VERSION_UNKNOWN
2679          || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
2680                 cts->protocol_version = device->protocol_version;
2681
2682         if (cts->protocol != device->protocol) {
2683                 xpt_print(path, "Uninitialized Protocol %x:%x?\n",
2684                        cts->protocol, device->protocol);
2685                 cts->protocol = device->protocol;
2686         }
2687
2688         if (cts->protocol_version > device->protocol_version) {
2689                 if (bootverbose) {
2690                         xpt_print(path, "Down reving Protocol "
2691                             "Version from %d to %d?\n", cts->protocol_version,
2692                             device->protocol_version);
2693                 }
2694                 cts->protocol_version = device->protocol_version;
2695         }
2696
2697         if (cts->transport == XPORT_UNKNOWN
2698          || cts->transport == XPORT_UNSPECIFIED) {
2699                 cts->transport = device->transport;
2700                 cts->transport_version = device->transport_version;
2701         }
2702
2703         if (cts->transport_version == XPORT_VERSION_UNKNOWN
2704          || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
2705                 cts->transport_version = device->transport_version;
2706
2707         if (cts->transport != device->transport) {
2708                 xpt_print(path, "Uninitialized Transport %x:%x?\n",
2709                     cts->transport, device->transport);
2710                 cts->transport = device->transport;
2711         }
2712
2713         if (cts->transport_version > device->transport_version) {
2714                 if (bootverbose) {
2715                         xpt_print(path, "Down reving Transport "
2716                             "Version from %d to %d?\n", cts->transport_version,
2717                             device->transport_version);
2718                 }
2719                 cts->transport_version = device->transport_version;
2720         }
2721
2722         /*
2723          * Nothing more of interest to do unless
2724          * this is a device connected via the
2725          * SCSI protocol.
2726          */
2727         if (cts->protocol != PROTO_SCSI) {
2728                 if (async_update == FALSE)
2729                         xpt_action_default((union ccb *)cts);
2730                 return;
2731         }
2732
2733         inq_data = &device->inq_data;
2734         scsi = &cts->proto_specific.scsi;
2735         memset(&cpi, 0, sizeof(cpi));
2736         xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE);
2737         cpi.ccb_h.func_code = XPT_PATH_INQ;
2738         xpt_action((union ccb *)&cpi);
2739
2740         /* SCSI specific sanity checking */
2741         if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
2742          || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
2743          || (device->queue_flags & SCP_QUEUE_DQUE) != 0
2744          || (device->mintags == 0)) {
2745                 /*
2746                  * Can't tag on hardware that doesn't support tags,
2747                  * doesn't have it enabled, or has broken tag support.
2748                  */
2749                 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2750         }
2751
2752         if (async_update == FALSE) {
2753                 /*
2754                  * Perform sanity checking against what the
2755                  * controller and device can do.
2756                  */
2757                 memset(&cur_cts, 0, sizeof(cur_cts));
2758                 xpt_setup_ccb(&cur_cts.ccb_h, path, CAM_PRIORITY_NONE);
2759                 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
2760                 cur_cts.type = cts->type;
2761                 xpt_action((union ccb *)&cur_cts);
2762                 if (cam_ccb_status((union ccb *)&cur_cts) != CAM_REQ_CMP) {
2763                         return;
2764                 }
2765                 cur_scsi = &cur_cts.proto_specific.scsi;
2766                 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
2767                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2768                         scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
2769                 }
2770                 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
2771                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2772         }
2773
2774         /* SPI specific sanity checking */
2775         if (cts->transport == XPORT_SPI && async_update == FALSE) {
2776                 u_int spi3caps;
2777                 struct ccb_trans_settings_spi *spi;
2778                 struct ccb_trans_settings_spi *cur_spi;
2779
2780                 spi = &cts->xport_specific.spi;
2781
2782                 cur_spi = &cur_cts.xport_specific.spi;
2783
2784                 /* Fill in any gaps in what the user gave us */
2785                 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
2786                         spi->sync_period = cur_spi->sync_period;
2787                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
2788                         spi->sync_period = 0;
2789                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
2790                         spi->sync_offset = cur_spi->sync_offset;
2791                 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
2792                         spi->sync_offset = 0;
2793                 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
2794                         spi->ppr_options = cur_spi->ppr_options;
2795                 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
2796                         spi->ppr_options = 0;
2797                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
2798                         spi->bus_width = cur_spi->bus_width;
2799                 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
2800                         spi->bus_width = 0;
2801                 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
2802                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2803                         spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
2804                 }
2805                 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
2806                         spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2807                 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
2808                   && (inq_data->flags & SID_Sync) == 0
2809                   && cts->type == CTS_TYPE_CURRENT_SETTINGS)
2810                  || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)) {
2811                         /* Force async */
2812                         spi->sync_period = 0;
2813                         spi->sync_offset = 0;
2814                 }
2815
2816                 switch (spi->bus_width) {
2817                 case MSG_EXT_WDTR_BUS_32_BIT:
2818                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
2819                           || (inq_data->flags & SID_WBus32) != 0
2820                           || cts->type == CTS_TYPE_USER_SETTINGS)
2821                          && (cpi.hba_inquiry & PI_WIDE_32) != 0)
2822                                 break;
2823                         /* Fall Through to 16-bit */
2824                 case MSG_EXT_WDTR_BUS_16_BIT:
2825                         if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
2826                           || (inq_data->flags & SID_WBus16) != 0
2827                           || cts->type == CTS_TYPE_USER_SETTINGS)
2828                          && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
2829                                 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2830                                 break;
2831                         }
2832                         /* Fall Through to 8-bit */
2833                 default: /* New bus width?? */
2834                 case MSG_EXT_WDTR_BUS_8_BIT:
2835                         /* All targets can do this */
2836                         spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2837                         break;
2838                 }
2839
2840                 spi3caps = cpi.xport_specific.spi.ppr_options;
2841                 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
2842                  && cts->type == CTS_TYPE_CURRENT_SETTINGS)
2843                         spi3caps &= inq_data->spi3data;
2844
2845                 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
2846                         spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
2847
2848                 if ((spi3caps & SID_SPI_IUS) == 0)
2849                         spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
2850
2851                 if ((spi3caps & SID_SPI_QAS) == 0)
2852                         spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
2853
2854                 /* No SPI Transfer settings are allowed unless we are wide */
2855                 if (spi->bus_width == 0)
2856                         spi->ppr_options = 0;
2857
2858                 if ((spi->valid & CTS_SPI_VALID_DISC)
2859                  && ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0)) {
2860                         /*
2861                          * Can't tag queue without disconnection.
2862                          */
2863                         scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2864                         scsi->valid |= CTS_SCSI_VALID_TQ;
2865                 }
2866
2867                 /*
2868                  * If we are currently performing tagged transactions to
2869                  * this device and want to change its negotiation parameters,
2870                  * go non-tagged for a bit to give the controller a chance to
2871                  * negotiate unhampered by tag messages.
2872                  */
2873                 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
2874                  && (device->inq_flags & SID_CmdQue) != 0
2875                  && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
2876                  && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
2877                                    CTS_SPI_VALID_SYNC_OFFSET|
2878                                    CTS_SPI_VALID_BUS_WIDTH)) != 0)
2879                         scsi_toggle_tags(path);
2880         }
2881
2882         if (cts->type == CTS_TYPE_CURRENT_SETTINGS
2883          && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2884                 int device_tagenb;
2885
2886                 /*
2887                  * If we are transitioning from tags to no-tags or
2888                  * vice-versa, we need to carefully freeze and restart
2889                  * the queue so that we don't overlap tagged and non-tagged
2890                  * commands.  We also temporarily stop tags if there is
2891                  * a change in transfer negotiation settings to allow
2892                  * "tag-less" negotiation.
2893                  */
2894                 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
2895                  || (device->inq_flags & SID_CmdQue) != 0)
2896                         device_tagenb = TRUE;
2897                 else
2898                         device_tagenb = FALSE;
2899
2900                 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
2901                   && device_tagenb == FALSE)
2902                  || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
2903                   && device_tagenb == TRUE)) {
2904                         if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
2905                                 /*
2906                                  * Delay change to use tags until after a
2907                                  * few commands have gone to this device so
2908                                  * the controller has time to perform transfer
2909                                  * negotiations without tagged messages getting
2910                                  * in the way.
2911                                  */
2912                                 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
2913                                 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
2914                         } else {
2915                                 xpt_stop_tags(path);
2916                         }
2917                 }
2918         }
2919         if (async_update == FALSE)
2920                 xpt_action_default((union ccb *)cts);
2921 }
2922
2923 static void
2924 scsi_toggle_tags(struct cam_path *path)
2925 {
2926         struct cam_ed *dev;
2927
2928         /*
2929          * Give controllers a chance to renegotiate
2930          * before starting tag operations.  We
2931          * "toggle" tagged queuing off then on
2932          * which causes the tag enable command delay
2933          * counter to come into effect.
2934          */
2935         dev = path->device;
2936         if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
2937          || ((dev->inq_flags & SID_CmdQue) != 0
2938           && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
2939                 struct ccb_trans_settings cts;
2940
2941                 memset(&cts, 0, sizeof(cts));
2942                 xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE);
2943                 cts.protocol = PROTO_SCSI;
2944                 cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
2945                 cts.transport = XPORT_UNSPECIFIED;
2946                 cts.transport_version = XPORT_VERSION_UNSPECIFIED;
2947                 cts.proto_specific.scsi.flags = 0;
2948                 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
2949                 scsi_set_transfer_settings(&cts, path,
2950                                           /*async_update*/TRUE);
2951                 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
2952                 scsi_set_transfer_settings(&cts, path,
2953                                           /*async_update*/TRUE);
2954         }
2955 }
2956
2957 /*
2958  * Handle any per-device event notifications that require action by the XPT.
2959  */
2960 static void
2961 scsi_dev_async(uint32_t async_code, struct cam_eb *bus, struct cam_et *target,
2962               struct cam_ed *device, void *async_arg)
2963 {
2964         cam_status status;
2965         struct cam_path newpath;
2966
2967         /*
2968          * We only need to handle events for real devices.
2969          */
2970         if (target->target_id == CAM_TARGET_WILDCARD
2971          || device->lun_id == CAM_LUN_WILDCARD)
2972                 return;
2973
2974         /*
2975          * We need our own path with wildcards expanded to
2976          * handle certain types of events.
2977          */
2978         if ((async_code == AC_SENT_BDR)
2979          || (async_code == AC_BUS_RESET)
2980          || (async_code == AC_INQ_CHANGED))
2981                 status = xpt_compile_path(&newpath, NULL,
2982                                           bus->path_id,
2983                                           target->target_id,
2984                                           device->lun_id);
2985         else
2986                 status = CAM_REQ_CMP_ERR;
2987
2988         if (status == CAM_REQ_CMP) {
2989                 /*
2990                  * Allow transfer negotiation to occur in a
2991                  * tag free environment and after settle delay.
2992                  */
2993                 if (async_code == AC_SENT_BDR
2994                  || async_code == AC_BUS_RESET) {
2995                         cam_freeze_devq(&newpath);
2996                         cam_release_devq(&newpath,
2997                                 RELSIM_RELEASE_AFTER_TIMEOUT,
2998                                 /*reduction*/0,
2999                                 /*timeout*/scsi_delay,
3000                                 /*getcount_only*/0);
3001                         scsi_toggle_tags(&newpath);
3002                 }
3003
3004                 if (async_code == AC_INQ_CHANGED) {
3005                         /*
3006                          * We've sent a start unit command, or
3007                          * something similar to a device that
3008                          * may have caused its inquiry data to
3009                          * change. So we re-scan the device to
3010                          * refresh the inquiry data for it.
3011                          */
3012                         scsi_scan_lun(newpath.periph, &newpath,
3013                                      CAM_EXPECT_INQ_CHANGE, NULL);
3014                 }
3015                 xpt_release_path(&newpath);
3016         } else if (async_code == AC_LOST_DEVICE &&
3017             (device->flags & CAM_DEV_UNCONFIGURED) == 0) {
3018                 device->flags |= CAM_DEV_UNCONFIGURED;
3019                 xpt_release_device(device);
3020         } else if (async_code == AC_TRANSFER_NEG) {
3021                 struct ccb_trans_settings *settings;
3022                 struct cam_path path;
3023
3024                 settings = (struct ccb_trans_settings *)async_arg;
3025                 xpt_compile_path(&path, NULL, bus->path_id, target->target_id,
3026                                  device->lun_id);
3027                 scsi_set_transfer_settings(settings, &path,
3028                                           /*async_update*/TRUE);
3029                 xpt_release_path(&path);
3030         }
3031 }
3032
3033 static void
3034 _scsi_announce_periph(struct cam_periph *periph, u_int *speed, u_int *freq, struct ccb_trans_settings *cts)
3035 {
3036         struct  ccb_pathinq cpi;
3037         struct  cam_path *path = periph->path;
3038
3039         cam_periph_assert(periph, MA_OWNED);
3040
3041         xpt_setup_ccb(&cts->ccb_h, path, CAM_PRIORITY_NORMAL);
3042         cts->ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
3043         cts->type = CTS_TYPE_CURRENT_SETTINGS;
3044         xpt_action((union ccb*)cts);
3045         if (cam_ccb_status((union ccb *)cts) != CAM_REQ_CMP)
3046                 return;
3047
3048         /* Ask the SIM for its base transfer speed */
3049         memset(&cpi, 0, sizeof(cpi));
3050         xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL);
3051         cpi.ccb_h.func_code = XPT_PATH_INQ;
3052         xpt_action((union ccb *)&cpi);
3053
3054         /* Report connection speed */
3055         *speed = cpi.base_transfer_speed;
3056         *freq = 0;
3057
3058         if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_SPI) {
3059                 struct  ccb_trans_settings_spi *spi =
3060                     &cts->xport_specific.spi;
3061
3062                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
3063                   && spi->sync_offset != 0) {
3064                         *freq = scsi_calc_syncsrate(spi->sync_period);
3065                         *speed = *freq;
3066                 }
3067                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
3068                         *speed *= (0x01 << spi->bus_width);
3069         }
3070         if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_FC) {
3071                 struct  ccb_trans_settings_fc *fc =
3072                     &cts->xport_specific.fc;
3073
3074                 if (fc->valid & CTS_FC_VALID_SPEED)
3075                         *speed = fc->bitrate;
3076         }
3077         if (cts->ccb_h.status == CAM_REQ_CMP && cts->transport == XPORT_SAS) {
3078                 struct  ccb_trans_settings_sas *sas =
3079                     &cts->xport_specific.sas;
3080
3081                 if (sas->valid & CTS_SAS_VALID_SPEED)
3082                         *speed = sas->bitrate;
3083         }
3084 }
3085
3086 static void
3087 scsi_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb)
3088 {
3089         struct  ccb_trans_settings cts;
3090         u_int speed, freq, mb;
3091
3092         memset(&cts, 0, sizeof(cts));
3093         _scsi_announce_periph(periph, &speed, &freq, &cts);
3094         if (cam_ccb_status((union ccb *)&cts) != CAM_REQ_CMP)
3095                 return;
3096
3097         mb = speed / 1000;
3098         if (mb > 0)
3099                 sbuf_printf(sb, "%s%d: %d.%03dMB/s transfers",
3100                        periph->periph_name, periph->unit_number,
3101                        mb, speed % 1000);
3102         else
3103                 sbuf_printf(sb, "%s%d: %dKB/s transfers", periph->periph_name,
3104                        periph->unit_number, speed);
3105         /* Report additional information about SPI connections */
3106         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
3107                 struct  ccb_trans_settings_spi *spi;
3108
3109                 spi = &cts.xport_specific.spi;
3110                 if (freq != 0) {
3111                         sbuf_printf(sb, " (%d.%03dMHz%s, offset %d", freq / 1000,
3112                                freq % 1000,
3113                                (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
3114                              ? " DT" : "",
3115                                spi->sync_offset);
3116                 }
3117                 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
3118                  && spi->bus_width > 0) {
3119                         if (freq != 0) {
3120                                 sbuf_cat(sb, ", ");
3121                         } else {
3122                                 sbuf_cat(sb, " (");
3123                         }
3124                         sbuf_printf(sb, "%dbit)", 8 * (0x01 << spi->bus_width));
3125                 } else if (freq != 0) {
3126                         sbuf_putc(sb, ')');
3127                 }
3128         }
3129         if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
3130                 struct  ccb_trans_settings_fc *fc;
3131
3132                 fc = &cts.xport_specific.fc;
3133                 if (fc->valid & CTS_FC_VALID_WWNN)
3134                         sbuf_printf(sb, " WWNN 0x%llx", (long long) fc->wwnn);
3135                 if (fc->valid & CTS_FC_VALID_WWPN)
3136                         sbuf_printf(sb, " WWPN 0x%llx", (long long) fc->wwpn);
3137                 if (fc->valid & CTS_FC_VALID_PORT)
3138                         sbuf_printf(sb, " PortID 0x%x", fc->port);
3139         }
3140         sbuf_putc(sb, '\n');
3141 }
3142
3143 static void
3144 scsi_proto_announce_sbuf(struct cam_ed *device, struct sbuf *sb)
3145 {
3146         scsi_print_inquiry_sbuf(sb, &device->inq_data);
3147 }
3148
3149 static void
3150 scsi_proto_denounce_sbuf(struct cam_ed *device, struct sbuf *sb)
3151 {
3152         scsi_print_inquiry_short_sbuf(sb, &device->inq_data);
3153 }
3154
3155 static void
3156 scsi_proto_debug_out(union ccb *ccb)
3157 {
3158         char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
3159         struct cam_ed *device;
3160
3161         if (ccb->ccb_h.func_code != XPT_SCSI_IO)
3162                 return;
3163
3164         device = ccb->ccb_h.path->device;
3165         CAM_DEBUG(ccb->ccb_h.path,
3166             CAM_DEBUG_CDB,("%s. CDB: %s\n",
3167                 scsi_op_desc(scsiio_cdb_ptr(&ccb->csio)[0], &device->inq_data),
3168                 scsi_cdb_string(scsiio_cdb_ptr(&ccb->csio), cdb_str, sizeof(cdb_str))));
3169 }