2 * Implementation of SCSI Direct Access Peripheral driver for CAM.
4 * Copyright (c) 1997 Justin T. Gibbs.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
38 #include <sys/sysctl.h>
39 #include <sys/taskqueue.h>
41 #include <sys/mutex.h>
43 #include <sys/devicestat.h>
44 #include <sys/eventhandler.h>
45 #include <sys/malloc.h>
47 #include <sys/endian.h>
50 #include <geom/geom.h>
51 #include <geom/geom_disk.h>
60 #include <cam/cam_ccb.h>
61 #include <cam/cam_periph.h>
62 #include <cam/cam_xpt_periph.h>
63 #include <cam/cam_sim.h>
64 #include <cam/cam_iosched.h>
66 #include <cam/scsi/scsi_message.h>
67 #include <cam/scsi/scsi_da.h>
71 * Note that there are probe ordering dependencies here. The order isn't
72 * controlled by this enumeration, but by explicit state transitions in
73 * dastart() and dadone(). Here are some of the dependencies:
75 * 1. RC should come first, before RC16, unless there is evidence that RC16
77 * 2. BDC needs to come before any of the ATA probes, or the ZONE probe.
78 * 3. The ATA probes should go in this order:
79 * ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE
85 DA_STATE_PROBE_BLK_LIMITS,
88 DA_STATE_PROBE_ATA_LOGDIR,
89 DA_STATE_PROBE_ATA_IDDIR,
90 DA_STATE_PROBE_ATA_SUP,
91 DA_STATE_PROBE_ATA_ZONE,
97 DA_FLAG_PACK_INVALID = 0x000001,
98 DA_FLAG_NEW_PACK = 0x000002,
99 DA_FLAG_PACK_LOCKED = 0x000004,
100 DA_FLAG_PACK_REMOVABLE = 0x000008,
101 DA_FLAG_NEED_OTAG = 0x000020,
102 DA_FLAG_WAS_OTAG = 0x000040,
103 DA_FLAG_RETRY_UA = 0x000080,
104 DA_FLAG_OPEN = 0x000100,
105 DA_FLAG_SCTX_INIT = 0x000200,
106 DA_FLAG_CAN_RC16 = 0x000400,
107 DA_FLAG_PROBED = 0x000800,
108 DA_FLAG_DIRTY = 0x001000,
109 DA_FLAG_ANNOUNCED = 0x002000,
110 DA_FLAG_CAN_ATA_DMA = 0x004000,
111 DA_FLAG_CAN_ATA_LOG = 0x008000,
112 DA_FLAG_CAN_ATA_IDLOG = 0x010000,
113 DA_FLAG_CAN_ATA_SUPCAP = 0x020000,
114 DA_FLAG_CAN_ATA_ZONE = 0x040000
119 DA_Q_NO_SYNC_CACHE = 0x01,
120 DA_Q_NO_6_BYTE = 0x02,
121 DA_Q_NO_PREVENT = 0x04,
124 DA_Q_NO_UNMAP = 0x20,
125 DA_Q_RETRY_BUSY = 0x40,
127 DA_Q_STRICT_UNMAP = 0x100
130 #define DA_Q_BIT_STRING \
132 "\001NO_SYNC_CACHE" \
143 DA_CCB_PROBE_RC = 0x01,
144 DA_CCB_PROBE_RC16 = 0x02,
145 DA_CCB_PROBE_LBP = 0x03,
146 DA_CCB_PROBE_BLK_LIMITS = 0x04,
147 DA_CCB_PROBE_BDC = 0x05,
148 DA_CCB_PROBE_ATA = 0x06,
149 DA_CCB_BUFFER_IO = 0x07,
151 DA_CCB_DELETE = 0x0B,
153 DA_CCB_PROBE_ZONE = 0x0D,
154 DA_CCB_PROBE_ATA_LOGDIR = 0x0E,
155 DA_CCB_PROBE_ATA_IDDIR = 0x0F,
156 DA_CCB_PROBE_ATA_SUP = 0x10,
157 DA_CCB_PROBE_ATA_ZONE = 0x11,
158 DA_CCB_TYPE_MASK = 0x1F,
159 DA_CCB_RETRY_UA = 0x20
163 * Order here is important for method choice
165 * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to
166 * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes
167 * using ATA_TRIM than the corresponding UNMAP results for a real world mysql
168 * import taking 5mins.
179 DA_DELETE_MIN = DA_DELETE_ATA_TRIM,
180 DA_DELETE_MAX = DA_DELETE_ZERO
184 * For SCSI, host managed drives show up as a separate device type. For
185 * ATA, host managed drives also have a different device signature.
186 * XXX KDM figure out the ATA host managed signature.
190 DA_ZONE_DRIVE_MANAGED = 0x01,
191 DA_ZONE_HOST_AWARE = 0x02,
192 DA_ZONE_HOST_MANAGED = 0x03
196 * We distinguish between these interface cases in addition to the drive type:
197 * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC
198 * o ATA drive behind a SCSI translation layer that does not know about
199 * ZBC/ZAC, and so needs to be managed via ATA passthrough. In this
200 * case, we would need to share the ATA code with the ada(4) driver.
210 DA_ZONE_FLAG_RZ_SUP = 0x0001,
211 DA_ZONE_FLAG_OPEN_SUP = 0x0002,
212 DA_ZONE_FLAG_CLOSE_SUP = 0x0004,
213 DA_ZONE_FLAG_FINISH_SUP = 0x0008,
214 DA_ZONE_FLAG_RWP_SUP = 0x0010,
215 DA_ZONE_FLAG_SUP_MASK = (DA_ZONE_FLAG_RZ_SUP |
216 DA_ZONE_FLAG_OPEN_SUP |
217 DA_ZONE_FLAG_CLOSE_SUP |
218 DA_ZONE_FLAG_FINISH_SUP |
219 DA_ZONE_FLAG_RWP_SUP),
220 DA_ZONE_FLAG_URSWRZ = 0x0020,
221 DA_ZONE_FLAG_OPT_SEQ_SET = 0x0040,
222 DA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080,
223 DA_ZONE_FLAG_MAX_SEQ_SET = 0x0100,
224 DA_ZONE_FLAG_SET_MASK = (DA_ZONE_FLAG_OPT_SEQ_SET |
225 DA_ZONE_FLAG_OPT_NONSEQ_SET |
226 DA_ZONE_FLAG_MAX_SEQ_SET)
229 static struct da_zone_desc {
232 } da_zone_desc_table[] = {
233 {DA_ZONE_FLAG_RZ_SUP, "Report Zones" },
234 {DA_ZONE_FLAG_OPEN_SUP, "Open" },
235 {DA_ZONE_FLAG_CLOSE_SUP, "Close" },
236 {DA_ZONE_FLAG_FINISH_SUP, "Finish" },
237 {DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" },
240 typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb,
242 static da_delete_func_t da_delete_trim;
243 static da_delete_func_t da_delete_unmap;
244 static da_delete_func_t da_delete_ws;
246 static const void * da_delete_functions[] = {
256 static const char *da_delete_method_names[] =
257 { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" };
258 static const char *da_delete_method_desc[] =
259 { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP",
260 "WRITE SAME(10) with UNMAP", "ZERO" };
262 /* Offsets into our private area for storing information */
263 #define ccb_state ppriv_field0
264 #define ccb_bp ppriv_ptr1
269 u_int8_t secs_per_track;
270 u_int32_t secsize; /* Number of bytes/sector */
271 u_int64_t sectors; /* total number sectors */
276 #define UNMAP_RANGE_MAX 0xffffffff
277 #define UNMAP_HEAD_SIZE 8
278 #define UNMAP_RANGE_SIZE 16
279 #define UNMAP_MAX_RANGES 2048 /* Protocol Max is 4095 */
280 #define UNMAP_BUF_SIZE ((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \
283 #define WS10_MAX_BLKS 0xffff
284 #define WS16_MAX_BLKS 0xffffffff
285 #define ATA_TRIM_MAX_RANGES ((UNMAP_BUF_SIZE / \
286 (ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE)
288 #define DA_WORK_TUR (1 << 16)
291 struct cam_iosched_softc *cam_iosched;
292 struct bio_queue_head delete_run_queue;
293 LIST_HEAD(, ccb_hdr) pending_ccbs;
294 int refcount; /* Active xpt_action() calls */
298 int minimum_cmd_size;
301 int delete_available; /* Delete methods possibly available */
302 da_zone_mode zone_mode;
303 da_zone_interface zone_interface;
304 da_zone_flags zone_flags;
305 struct ata_gp_log_dir ata_logdir;
306 int valid_logdir_len;
307 struct ata_identify_log_pages ata_iddir;
309 uint64_t optimal_seq_zones;
310 uint64_t optimal_nonseq_zones;
311 uint64_t max_seq_zones;
313 uint32_t unmap_max_ranges;
314 uint32_t unmap_max_lba; /* Max LBAs in UNMAP req */
316 uint32_t unmap_gran_align;
317 uint64_t ws_max_blks;
318 da_delete_methods delete_method_pref;
319 da_delete_methods delete_method;
320 da_delete_func_t *delete_func;
323 struct disk_params params;
326 struct task sysctl_task;
327 struct sysctl_ctx_list sysctl_ctx;
328 struct sysctl_oid *sysctl_tree;
329 struct callout sendordered_c;
331 uint8_t unmap_buf[UNMAP_BUF_SIZE];
332 struct scsi_read_capacity_data_long rcaplong;
333 struct callout mediapoll_c;
335 struct sysctl_ctx_list sysctl_stats_ctx;
336 struct sysctl_oid *sysctl_stats_tree;
343 #define dadeleteflag(softc, delete_method, enable) \
345 softc->delete_available |= (1 << delete_method); \
347 softc->delete_available &= ~(1 << delete_method); \
350 struct da_quirk_entry {
351 struct scsi_inquiry_pattern inq_pat;
355 static const char quantum[] = "QUANTUM";
356 static const char microp[] = "MICROP";
358 static struct da_quirk_entry da_quirk_table[] =
360 /* SPI, FC devices */
363 * Fujitsu M2513A MO drives.
364 * Tested devices: M2513A2 firmware versions 1200 & 1300.
365 * (dip switch selects whether T_DIRECT or T_OPTICAL device)
366 * Reported by: W.Scholten <whs@xs4all.nl>
368 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
369 /*quirks*/ DA_Q_NO_SYNC_CACHE
373 {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
374 /*quirks*/ DA_Q_NO_SYNC_CACHE
378 * This particular Fujitsu drive doesn't like the
379 * synchronize cache command.
380 * Reported by: Tom Jackson <toj@gorilla.net>
382 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
383 /*quirks*/ DA_Q_NO_SYNC_CACHE
387 * This drive doesn't like the synchronize cache command
388 * either. Reported by: Matthew Jacob <mjacob@feral.com>
389 * in NetBSD PR kern/6027, August 24, 1998.
391 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
392 /*quirks*/ DA_Q_NO_SYNC_CACHE
396 * This drive doesn't like the synchronize cache command
397 * either. Reported by: Hellmuth Michaelis (hm@kts.org)
400 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
401 /*quirks*/ DA_Q_NO_SYNC_CACHE
405 * Doesn't like the synchronize cache command.
406 * Reported by: Blaz Zupan <blaz@gold.amis.net>
408 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
409 /*quirks*/ DA_Q_NO_SYNC_CACHE
413 * Doesn't like the synchronize cache command.
414 * Reported by: Blaz Zupan <blaz@gold.amis.net>
416 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
417 /*quirks*/ DA_Q_NO_SYNC_CACHE
421 * Doesn't like the synchronize cache command.
423 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
424 /*quirks*/ DA_Q_NO_SYNC_CACHE
428 * Doesn't like the synchronize cache command.
429 * Reported by: walter@pelissero.de
431 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"},
432 /*quirks*/ DA_Q_NO_SYNC_CACHE
436 * Doesn't work correctly with 6 byte reads/writes.
437 * Returns illegal request, and points to byte 9 of the
439 * Reported by: Adam McDougall <bsdx@spawnet.com>
441 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
442 /*quirks*/ DA_Q_NO_6_BYTE
446 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
447 /*quirks*/ DA_Q_NO_6_BYTE
451 * Doesn't like the synchronize cache command.
452 * Reported by: walter@pelissero.de
454 {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"},
455 /*quirks*/ DA_Q_NO_SYNC_CACHE
459 * The CISS RAID controllers do not support SYNC_CACHE
461 {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"},
462 /*quirks*/ DA_Q_NO_SYNC_CACHE
466 * The STEC SSDs sometimes hang on UNMAP.
468 {T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"},
469 /*quirks*/ DA_Q_NO_UNMAP
473 * VMware returns BUSY status when storage has transient
474 * connectivity problems, so better wait.
475 * Also VMware returns odd errors on misaligned UNMAPs.
477 {T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"},
478 /*quirks*/ DA_Q_RETRY_BUSY | DA_Q_STRICT_UNMAP
480 /* USB mass storage devices supported by umass(4) */
483 * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player
486 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"},
487 /*quirks*/ DA_Q_NO_SYNC_CACHE
491 * Power Quotient Int. (PQI) USB flash key
494 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*",
495 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
499 * Creative Nomad MUVO mp3 player (USB)
502 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"},
503 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
507 * Jungsoft NEXDISK USB flash key
510 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"},
511 /*quirks*/ DA_Q_NO_SYNC_CACHE
515 * FreeDik USB Mini Data Drive
518 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive",
519 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
523 * Sigmatel USB Flash MP3 Player
526 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"},
527 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
531 * Neuros USB Digital Audio Computer
534 {T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.",
535 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
539 * SEAGRAND NP-900 MP3 Player
542 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"},
543 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
547 * iRiver iFP MP3 player (with UMS Firmware)
548 * PR: kern/54881, i386/63941, kern/66124
550 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"},
551 /*quirks*/ DA_Q_NO_SYNC_CACHE
555 * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01
558 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"},
559 /*quirks*/ DA_Q_NO_SYNC_CACHE
563 * ZICPlay USB MP3 Player with FM
566 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"},
567 /*quirks*/ DA_Q_NO_SYNC_CACHE
571 * TEAC USB floppy mechanisms
573 {T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"},
574 /*quirks*/ DA_Q_NO_SYNC_CACHE
578 * Kingston DataTraveler II+ USB Pen-Drive.
579 * Reported by: Pawel Jakub Dawidek <pjd@FreeBSD.org>
581 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+",
582 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
590 {T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"},
591 /*quirks*/ DA_Q_NO_SYNC_CACHE
595 * Motorola E398 Mobile Phone (TransFlash memory card).
596 * Reported by: Wojciech A. Koszek <dunstan@FreeBSD.czest.pl>
599 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone",
600 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
604 * Qware BeatZkey! Pro
607 {T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE",
608 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
612 * Time DPA20B 1GB MP3 Player
615 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*",
616 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
620 * Samsung USB key 128Mb
623 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb",
624 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
628 * Kingston DataTraveler 2.0 USB Flash memory.
631 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0",
632 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
636 * Creative MUVO Slim mp3 player (USB)
639 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim",
640 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
644 * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3)
647 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK",
648 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
652 * SanDisk Micro Cruzer 128MB
655 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer",
656 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
660 * TOSHIBA TransMemory USB sticks
663 {T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory",
664 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
668 * PNY USB 3.0 Flash Drives
670 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*",
671 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16
676 * PR: usb/75578, usb/72344, usb/65436
678 {T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*",
679 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
683 * Genesys 6-in-1 Card Reader
686 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*",
687 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
691 * Rekam Digital CAMERA
694 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*",
695 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
699 * iRiver H10 MP3 player
702 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*",
703 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
707 * iRiver U10 MP3 player
710 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*",
711 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
718 {T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk",
719 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
723 * EasyMP3 EM732X USB 2.0 Flash MP3 Player
726 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*",
727 "1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
734 {T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER",
735 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
739 * Philips USB Key Audio KEY013
742 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"},
743 /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT
750 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*",
751 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
758 {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"},
759 /*quirks*/ DA_Q_NO_SYNC_CACHE
763 * I/O Magic USB flash - Giga Bank
766 {T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"},
767 /*quirks*/ DA_Q_NO_SYNC_CACHE
771 * JoyFly 128mb USB Flash Drive
774 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*",
775 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
782 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*",
783 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
787 * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A
790 {T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*",
791 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
795 * Samsung YP-U3 mp3-player
798 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3",
799 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
802 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*",
803 "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
807 * Sony Cyber-Shot DSC cameras
810 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"},
811 /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT
814 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3",
815 "1.00"}, /*quirks*/ DA_Q_NO_PREVENT
818 /* At least several Transcent USB sticks lie on RC16. */
819 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*",
820 "*"}, /*quirks*/ DA_Q_NO_RC16
824 * I-O Data USB Flash Disk
827 {T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*",
828 "*"}, /*quirks*/ DA_Q_NO_RC16
830 /* ATA/SATA devices over SAS/USB/... */
832 /* Hitachi Advanced Format (4k) drives */
833 { T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" },
837 /* Micron Advanced Format (4k) drives */
838 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Micron 5100 MTFDDAK*", "*" },
842 /* Samsung Advanced Format (4k) drives */
843 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" },
847 /* Samsung Advanced Format (4k) drives */
848 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" },
852 /* Samsung Advanced Format (4k) drives */
853 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" },
857 /* Samsung Advanced Format (4k) drives */
858 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" },
862 /* Seagate Barracuda Green Advanced Format (4k) drives */
863 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" },
867 /* Seagate Barracuda Green Advanced Format (4k) drives */
868 { T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" },
872 /* Seagate Barracuda Green Advanced Format (4k) drives */
873 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" },
877 /* Seagate Barracuda Green Advanced Format (4k) drives */
878 { T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" },
882 /* Seagate Barracuda Green Advanced Format (4k) drives */
883 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" },
887 /* Seagate Barracuda Green Advanced Format (4k) drives */
888 { T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" },
892 /* Seagate Momentus Advanced Format (4k) drives */
893 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" },
897 /* Seagate Momentus Advanced Format (4k) drives */
898 { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" },
902 /* Seagate Momentus Advanced Format (4k) drives */
903 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" },
907 /* Seagate Momentus Advanced Format (4k) drives */
908 { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" },
912 /* Seagate Momentus Advanced Format (4k) drives */
913 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" },
917 /* Seagate Momentus Advanced Format (4k) drives */
918 { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" },
922 /* Seagate Momentus Advanced Format (4k) drives */
923 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" },
927 /* Seagate Momentus Advanced Format (4k) drives */
928 { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" },
932 /* Seagate Momentus Advanced Format (4k) drives */
933 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" },
937 /* Seagate Momentus Advanced Format (4k) drives */
938 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" },
942 /* Seagate Momentus Advanced Format (4k) drives */
943 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" },
947 /* Seagate Momentus Advanced Format (4k) drives */
948 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" },
952 /* Seagate Momentus Advanced Format (4k) drives */
953 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" },
957 /* Seagate Momentus Advanced Format (4k) drives */
958 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" },
962 /* Seagate Momentus Thin Advanced Format (4k) drives */
963 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" },
967 /* Seagate Momentus Thin Advanced Format (4k) drives */
968 { T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" },
972 /* WDC Caviar Green Advanced Format (4k) drives */
973 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" },
977 /* WDC Caviar Green Advanced Format (4k) drives */
978 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" },
982 /* WDC Caviar Green Advanced Format (4k) drives */
983 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" },
987 /* WDC Caviar Green Advanced Format (4k) drives */
988 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" },
992 /* WDC Caviar Green Advanced Format (4k) drives */
993 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" },
997 /* WDC Caviar Green Advanced Format (4k) drives */
998 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" },
1002 /* WDC Caviar Green Advanced Format (4k) drives */
1003 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" },
1007 /* WDC Caviar Green Advanced Format (4k) drives */
1008 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" },
1012 /* WDC Scorpio Black Advanced Format (4k) drives */
1013 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" },
1017 /* WDC Scorpio Black Advanced Format (4k) drives */
1018 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" },
1022 /* WDC Scorpio Black Advanced Format (4k) drives */
1023 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" },
1027 /* WDC Scorpio Black Advanced Format (4k) drives */
1028 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" },
1032 /* WDC Scorpio Blue Advanced Format (4k) drives */
1033 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" },
1037 /* WDC Scorpio Blue Advanced Format (4k) drives */
1038 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" },
1042 /* WDC Scorpio Blue Advanced Format (4k) drives */
1043 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" },
1047 /* WDC Scorpio Blue Advanced Format (4k) drives */
1048 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" },
1053 * Olympus FE-210 camera
1055 {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*",
1056 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1060 * LG UP3S MP3 player
1062 {T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S",
1063 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1067 * Laser MP3-2GA13 MP3 player
1069 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk",
1070 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1074 * LaCie external 250GB Hard drive des by Porsche
1075 * Submitted by: Ben Stuyts <ben@altesco.nl>
1078 {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"},
1079 /*quirks*/ DA_Q_NO_SYNC_CACHE
1084 * Corsair Force 2 SSDs
1085 * 4k optimised & trim only works in 4k requests + 4k aligned
1087 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" },
1092 * Corsair Force 3 SSDs
1093 * 4k optimised & trim only works in 4k requests + 4k aligned
1095 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" },
1100 * Corsair Neutron GTX SSDs
1101 * 4k optimised & trim only works in 4k requests + 4k aligned
1103 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" },
1108 * Corsair Force GT & GS SSDs
1109 * 4k optimised & trim only works in 4k requests + 4k aligned
1111 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" },
1117 * 4k optimised & trim only works in 4k requests + 4k aligned
1119 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" },
1124 * Crucial RealSSD C300 SSDs
1127 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*",
1128 "*" }, /*quirks*/DA_Q_4K
1132 * Intel 320 Series SSDs
1133 * 4k optimised & trim only works in 4k requests + 4k aligned
1135 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" },
1140 * Intel 330 Series SSDs
1141 * 4k optimised & trim only works in 4k requests + 4k aligned
1143 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" },
1148 * Intel 510 Series SSDs
1149 * 4k optimised & trim only works in 4k requests + 4k aligned
1151 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" },
1156 * Intel 520 Series SSDs
1157 * 4k optimised & trim only works in 4k requests + 4k aligned
1159 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" },
1164 * Intel S3610 Series SSDs
1165 * 4k optimised & trim only works in 4k requests + 4k aligned
1167 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BX*", "*" },
1172 * Intel X25-M Series SSDs
1173 * 4k optimised & trim only works in 4k requests + 4k aligned
1175 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" },
1180 * Kingston E100 Series SSDs
1181 * 4k optimised & trim only works in 4k requests + 4k aligned
1183 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" },
1188 * Kingston HyperX 3k SSDs
1189 * 4k optimised & trim only works in 4k requests + 4k aligned
1191 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" },
1196 * Marvell SSDs (entry taken from OpenSolaris)
1197 * 4k optimised & trim only works in 4k requests + 4k aligned
1199 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" },
1204 * OCZ Agility 2 SSDs
1205 * 4k optimised & trim only works in 4k requests + 4k aligned
1207 { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" },
1212 * OCZ Agility 3 SSDs
1213 * 4k optimised & trim only works in 4k requests + 4k aligned
1215 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" },
1220 * OCZ Deneva R Series SSDs
1221 * 4k optimised & trim only works in 4k requests + 4k aligned
1223 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" },
1228 * OCZ Vertex 2 SSDs (inc pro series)
1229 * 4k optimised & trim only works in 4k requests + 4k aligned
1231 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" },
1237 * 4k optimised & trim only works in 4k requests + 4k aligned
1239 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" },
1245 * 4k optimised & trim only works in 4k requests + 4k aligned
1247 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" },
1252 * Samsung 830 Series SSDs
1253 * 4k optimised & trim only works in 4k requests + 4k aligned
1255 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" },
1261 * 4k optimised & trim only works in 4k requests + 4k aligned
1263 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" },
1269 * 4k optimised & trim only works in 4k requests + 4k aligned
1271 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" },
1276 * Samsung 843T Series SSDs (MZ7WD*)
1277 * Samsung PM851 Series SSDs (MZ7TE*)
1278 * Samsung PM853T Series SSDs (MZ7GE*)
1279 * Samsung SM863 Series SSDs (MZ7KM*)
1282 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" },
1287 * SuperTalent TeraDrive CT SSDs
1288 * 4k optimised & trim only works in 4k requests + 4k aligned
1290 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" },
1295 * XceedIOPS SATA SSDs
1298 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" },
1303 * Hama Innostor USB-Stick
1305 { T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" },
1306 /*quirks*/DA_Q_NO_RC16
1310 * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR)
1311 * Drive Managed SATA hard drive. This drive doesn't report
1312 * in firmware that it is a drive managed SMR drive.
1314 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS0002*", "*" },
1315 /*quirks*/DA_Q_SMR_DM
1319 * MX-ES USB Drive by Mach Xtreme
1321 { T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"},
1322 /*quirks*/DA_Q_NO_RC16
1326 static disk_strategy_t dastrategy;
1327 static dumper_t dadump;
1328 static periph_init_t dainit;
1329 static void daasync(void *callback_arg, u_int32_t code,
1330 struct cam_path *path, void *arg);
1331 static void dasysctlinit(void *context, int pending);
1332 static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS);
1333 static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
1334 static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS);
1335 static int dazonemodesysctl(SYSCTL_HANDLER_ARGS);
1336 static int dazonesupsysctl(SYSCTL_HANDLER_ARGS);
1337 static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS);
1338 static void dadeletemethodset(struct da_softc *softc,
1339 da_delete_methods delete_method);
1340 static off_t dadeletemaxsize(struct da_softc *softc,
1341 da_delete_methods delete_method);
1342 static void dadeletemethodchoose(struct da_softc *softc,
1343 da_delete_methods default_method);
1344 static void daprobedone(struct cam_periph *periph, union ccb *ccb);
1346 static periph_ctor_t daregister;
1347 static periph_dtor_t dacleanup;
1348 static periph_start_t dastart;
1349 static periph_oninv_t daoninvalidate;
1350 static void dazonedone(struct cam_periph *periph, union ccb *ccb);
1351 static void dadone(struct cam_periph *periph,
1352 union ccb *done_ccb);
1353 static int daerror(union ccb *ccb, u_int32_t cam_flags,
1354 u_int32_t sense_flags);
1355 static void daprevent(struct cam_periph *periph, int action);
1356 static void dareprobe(struct cam_periph *periph);
1357 static void dasetgeom(struct cam_periph *periph, uint32_t block_len,
1359 struct scsi_read_capacity_data_long *rcaplong,
1361 static timeout_t dasendorderedtag;
1362 static void dashutdown(void *arg, int howto);
1363 static timeout_t damediapoll;
1365 #ifndef DA_DEFAULT_POLL_PERIOD
1366 #define DA_DEFAULT_POLL_PERIOD 3
1369 #ifndef DA_DEFAULT_TIMEOUT
1370 #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */
1373 #ifndef DA_DEFAULT_SOFTTIMEOUT
1374 #define DA_DEFAULT_SOFTTIMEOUT 0
1377 #ifndef DA_DEFAULT_RETRY
1378 #define DA_DEFAULT_RETRY 4
1381 #ifndef DA_DEFAULT_SEND_ORDERED
1382 #define DA_DEFAULT_SEND_ORDERED 1
1385 static int da_poll_period = DA_DEFAULT_POLL_PERIOD;
1386 static int da_retry_count = DA_DEFAULT_RETRY;
1387 static int da_default_timeout = DA_DEFAULT_TIMEOUT;
1388 static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT;
1389 static int da_send_ordered = DA_DEFAULT_SEND_ORDERED;
1391 static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0,
1392 "CAM Direct Access Disk driver");
1393 SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN,
1394 &da_poll_period, 0, "Media polling period in seconds");
1395 SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN,
1396 &da_retry_count, 0, "Normal I/O retry count");
1397 SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN,
1398 &da_default_timeout, 0, "Normal I/O timeout (in seconds)");
1399 SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN,
1400 &da_send_ordered, 0, "Send Ordered Tags");
1402 SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout,
1403 CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, dasysctlsofttimeout, "I",
1404 "Soft I/O timeout (ms)");
1405 TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout);
1408 * DA_ORDEREDTAG_INTERVAL determines how often, relative
1409 * to the default timeout, we check to see whether an ordered
1410 * tagged transaction is appropriate to prevent simple tag
1411 * starvation. Since we'd like to ensure that there is at least
1412 * 1/2 of the timeout length left for a starved transaction to
1413 * complete after we've sent an ordered tag, we must poll at least
1414 * four times in every timeout period. This takes care of the worst
1415 * case where a starved transaction starts during an interval that
1416 * meets the requirement "don't send an ordered tag" test so it takes
1417 * us two intervals to determine that a tag must be sent.
1419 #ifndef DA_ORDEREDTAG_INTERVAL
1420 #define DA_ORDEREDTAG_INTERVAL 4
1423 static struct periph_driver dadriver =
1426 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
1429 PERIPHDRIVER_DECLARE(da, dadriver);
1431 static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers");
1434 daopen(struct disk *dp)
1436 struct cam_periph *periph;
1437 struct da_softc *softc;
1440 periph = (struct cam_periph *)dp->d_drv1;
1441 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
1445 cam_periph_lock(periph);
1446 if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
1447 cam_periph_unlock(periph);
1448 cam_periph_release(periph);
1452 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
1455 softc = (struct da_softc *)periph->softc;
1458 /* Wait for the disk size update. */
1459 error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO,
1462 xpt_print(periph->path, "unable to retrieve capacity data\n");
1464 if (periph->flags & CAM_PERIPH_INVALID)
1467 if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
1468 (softc->quirks & DA_Q_NO_PREVENT) == 0)
1469 daprevent(periph, PR_PREVENT);
1472 softc->flags &= ~DA_FLAG_PACK_INVALID;
1473 softc->flags |= DA_FLAG_OPEN;
1476 cam_periph_unhold(periph);
1477 cam_periph_unlock(periph);
1480 cam_periph_release(periph);
1486 daclose(struct disk *dp)
1488 struct cam_periph *periph;
1489 struct da_softc *softc;
1493 periph = (struct cam_periph *)dp->d_drv1;
1494 softc = (struct da_softc *)periph->softc;
1495 cam_periph_lock(periph);
1496 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
1499 if (cam_periph_hold(periph, PRIBIO) == 0) {
1501 /* Flush disk cache. */
1502 if ((softc->flags & DA_FLAG_DIRTY) != 0 &&
1503 (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 &&
1504 (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
1505 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1506 scsi_synchronize_cache(&ccb->csio, /*retries*/1,
1507 /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG,
1508 /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE,
1510 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
1511 /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR,
1512 softc->disk->d_devstat);
1513 softc->flags &= ~DA_FLAG_DIRTY;
1514 xpt_release_ccb(ccb);
1517 /* Allow medium removal. */
1518 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
1519 (softc->quirks & DA_Q_NO_PREVENT) == 0)
1520 daprevent(periph, PR_ALLOW);
1522 cam_periph_unhold(periph);
1526 * If we've got removeable media, mark the blocksize as
1527 * unavailable, since it could change when new media is
1530 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)
1531 softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE;
1533 softc->flags &= ~DA_FLAG_OPEN;
1534 while (softc->refcount != 0)
1535 cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1);
1536 cam_periph_unlock(periph);
1537 cam_periph_release(periph);
1542 daschedule(struct cam_periph *periph)
1544 struct da_softc *softc = (struct da_softc *)periph->softc;
1546 if (softc->state != DA_STATE_NORMAL)
1549 cam_iosched_schedule(softc->cam_iosched, periph);
1553 * Actually translate the requested transfer into one the physical driver
1554 * can understand. The transfer is described by a buf and will include
1555 * only one physical transfer.
1558 dastrategy(struct bio *bp)
1560 struct cam_periph *periph;
1561 struct da_softc *softc;
1563 periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1564 softc = (struct da_softc *)periph->softc;
1566 cam_periph_lock(periph);
1569 * If the device has been made invalid, error out
1571 if ((softc->flags & DA_FLAG_PACK_INVALID)) {
1572 cam_periph_unlock(periph);
1573 biofinish(bp, NULL, ENXIO);
1577 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp));
1580 * Zone commands must be ordered, because they can depend on the
1581 * effects of previously issued commands, and they may affect
1582 * commands after them.
1584 if (bp->bio_cmd == BIO_ZONE)
1585 bp->bio_flags |= BIO_ORDERED;
1588 * Place it in the queue of disk activities for this disk
1590 cam_iosched_queue_work(softc->cam_iosched, bp);
1593 * Schedule ourselves for performing the work.
1596 cam_periph_unlock(periph);
1602 dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
1604 struct cam_periph *periph;
1605 struct da_softc *softc;
1607 struct ccb_scsiio csio;
1612 periph = dp->d_drv1;
1613 softc = (struct da_softc *)periph->softc;
1614 cam_periph_lock(periph);
1615 secsize = softc->params.secsize;
1617 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
1618 cam_periph_unlock(periph);
1623 xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1624 csio.ccb_h.ccb_state = DA_CCB_DUMP;
1625 scsi_read_write(&csio,
1629 /*read*/SCSI_RW_WRITE,
1631 /*minimum_cmd_size*/ softc->minimum_cmd_size,
1634 /*data_ptr*/(u_int8_t *) virtual,
1635 /*dxfer_len*/length,
1636 /*sense_len*/SSD_FULL_SIZE,
1637 da_default_timeout * 1000);
1638 xpt_polled_action((union ccb *)&csio);
1640 error = cam_periph_error((union ccb *)&csio,
1641 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
1642 if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0)
1643 cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0,
1644 /*reduction*/0, /*timeout*/0, /*getcount_only*/0);
1646 printf("Aborting dump due to I/O error.\n");
1647 cam_periph_unlock(periph);
1652 * Sync the disk cache contents to the physical media.
1654 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
1656 xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1657 csio.ccb_h.ccb_state = DA_CCB_DUMP;
1658 scsi_synchronize_cache(&csio,
1662 /*begin_lba*/0,/* Cover the whole disk */
1666 xpt_polled_action((union ccb *)&csio);
1668 error = cam_periph_error((union ccb *)&csio,
1669 0, SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, NULL);
1670 if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0)
1671 cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0,
1672 /*reduction*/0, /*timeout*/0, /*getcount_only*/0);
1674 xpt_print(periph->path, "Synchronize cache failed\n");
1676 cam_periph_unlock(periph);
1681 dagetattr(struct bio *bp)
1684 struct cam_periph *periph;
1686 periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1687 cam_periph_lock(periph);
1688 ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute,
1690 cam_periph_unlock(periph);
1692 bp->bio_completed = bp->bio_length;
1702 * Install a global async callback. This callback will
1703 * receive async callbacks like "new device found".
1705 status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL);
1707 if (status != CAM_REQ_CMP) {
1708 printf("da: Failed to attach master async callback "
1709 "due to status 0x%x!\n", status);
1710 } else if (da_send_ordered) {
1712 /* Register our shutdown event handler */
1713 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
1714 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
1715 printf("dainit: shutdown event registration failed!\n");
1720 * Callback from GEOM, called when it has finished cleaning up its
1724 dadiskgonecb(struct disk *dp)
1726 struct cam_periph *periph;
1728 periph = (struct cam_periph *)dp->d_drv1;
1729 cam_periph_release(periph);
1733 daoninvalidate(struct cam_periph *periph)
1735 struct da_softc *softc;
1737 softc = (struct da_softc *)periph->softc;
1740 * De-register any async callbacks.
1742 xpt_register_async(0, daasync, periph, periph->path);
1744 softc->flags |= DA_FLAG_PACK_INVALID;
1746 softc->invalidations++;
1750 * Return all queued I/O with ENXIO.
1751 * XXX Handle any transactions queued to the card
1752 * with XPT_ABORT_CCB.
1754 cam_iosched_flush(softc->cam_iosched, NULL, ENXIO);
1757 * Tell GEOM that we've gone away, we'll get a callback when it is
1758 * done cleaning up its resources.
1760 disk_gone(softc->disk);
1764 dacleanup(struct cam_periph *periph)
1766 struct da_softc *softc;
1768 softc = (struct da_softc *)periph->softc;
1770 cam_periph_unlock(periph);
1772 cam_iosched_fini(softc->cam_iosched);
1775 * If we can't free the sysctl tree, oh well...
1777 if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) {
1779 if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0)
1780 xpt_print(periph->path,
1781 "can't remove sysctl stats context\n");
1783 if (sysctl_ctx_free(&softc->sysctl_ctx) != 0)
1784 xpt_print(periph->path,
1785 "can't remove sysctl context\n");
1788 callout_drain(&softc->mediapoll_c);
1789 disk_destroy(softc->disk);
1790 callout_drain(&softc->sendordered_c);
1791 free(softc, M_DEVBUF);
1792 cam_periph_lock(periph);
1796 daasync(void *callback_arg, u_int32_t code,
1797 struct cam_path *path, void *arg)
1799 struct cam_periph *periph;
1800 struct da_softc *softc;
1802 periph = (struct cam_periph *)callback_arg;
1804 case AC_FOUND_DEVICE:
1806 struct ccb_getdev *cgd;
1809 cgd = (struct ccb_getdev *)arg;
1813 if (cgd->protocol != PROTO_SCSI)
1815 if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED)
1817 if (SID_TYPE(&cgd->inq_data) != T_DIRECT
1818 && SID_TYPE(&cgd->inq_data) != T_RBC
1819 && SID_TYPE(&cgd->inq_data) != T_OPTICAL
1820 && SID_TYPE(&cgd->inq_data) != T_ZBC_HM)
1824 * Allocate a peripheral instance for
1825 * this device and start the probe
1828 status = cam_periph_alloc(daregister, daoninvalidate,
1830 "da", CAM_PERIPH_BIO,
1832 AC_FOUND_DEVICE, cgd);
1834 if (status != CAM_REQ_CMP
1835 && status != CAM_REQ_INPROG)
1836 printf("daasync: Unable to attach to new device "
1837 "due to status 0x%x\n", status);
1840 case AC_ADVINFO_CHANGED:
1844 buftype = (uintptr_t)arg;
1845 if (buftype == CDAI_TYPE_PHYS_PATH) {
1846 struct da_softc *softc;
1848 softc = periph->softc;
1849 disk_attr_changed(softc->disk, "GEOM::physpath",
1854 case AC_UNIT_ATTENTION:
1857 int error_code, sense_key, asc, ascq;
1859 softc = (struct da_softc *)periph->softc;
1860 ccb = (union ccb *)arg;
1863 * Handle all UNIT ATTENTIONs except our own,
1864 * as they will be handled by daerror().
1866 if (xpt_path_periph(ccb->ccb_h.path) != periph &&
1867 scsi_extract_sense_ccb(ccb,
1868 &error_code, &sense_key, &asc, &ascq)) {
1869 if (asc == 0x2A && ascq == 0x09) {
1870 xpt_print(ccb->ccb_h.path,
1871 "Capacity data has changed\n");
1872 softc->flags &= ~DA_FLAG_PROBED;
1874 } else if (asc == 0x28 && ascq == 0x00) {
1875 softc->flags &= ~DA_FLAG_PROBED;
1876 disk_media_changed(softc->disk, M_NOWAIT);
1877 } else if (asc == 0x3F && ascq == 0x03) {
1878 xpt_print(ccb->ccb_h.path,
1879 "INQUIRY data has changed\n");
1880 softc->flags &= ~DA_FLAG_PROBED;
1884 cam_periph_async(periph, code, path, arg);
1888 softc = (struct da_softc *)periph->softc;
1889 if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) {
1890 if (cam_periph_acquire(periph) == CAM_REQ_CMP) {
1891 cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR);
1899 struct ccb_hdr *ccbh;
1901 softc = (struct da_softc *)periph->softc;
1903 * Don't fail on the expected unit attention
1906 softc->flags |= DA_FLAG_RETRY_UA;
1907 LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
1908 ccbh->ccb_state |= DA_CCB_RETRY_UA;
1911 case AC_INQ_CHANGED:
1912 softc = (struct da_softc *)periph->softc;
1913 softc->flags &= ~DA_FLAG_PROBED;
1919 cam_periph_async(periph, code, path, arg);
1923 dasysctlinit(void *context, int pending)
1925 struct cam_periph *periph;
1926 struct da_softc *softc;
1927 char tmpstr[80], tmpstr2[80];
1928 struct ccb_trans_settings cts;
1930 periph = (struct cam_periph *)context;
1932 * periph was held for us when this task was enqueued
1934 if (periph->flags & CAM_PERIPH_INVALID) {
1935 cam_periph_release(periph);
1939 softc = (struct da_softc *)periph->softc;
1940 snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number);
1941 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
1943 sysctl_ctx_init(&softc->sysctl_ctx);
1944 softc->flags |= DA_FLAG_SCTX_INIT;
1945 softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx,
1946 SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2,
1947 CTLFLAG_RD, 0, tmpstr, "device_index");
1948 if (softc->sysctl_tree == NULL) {
1949 printf("dasysctlinit: unable to allocate sysctl tree\n");
1950 cam_periph_release(periph);
1955 * Now register the sysctl handler, so the user can change the value on
1958 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1959 OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RWTUN,
1960 softc, 0, dadeletemethodsysctl, "A",
1961 "BIO_DELETE execution method");
1962 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1963 OID_AUTO, "delete_max", CTLTYPE_U64 | CTLFLAG_RW,
1964 softc, 0, dadeletemaxsysctl, "Q",
1965 "Maximum BIO_DELETE size");
1966 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1967 OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW,
1968 &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
1969 "Minimum CDB size");
1971 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1972 OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD,
1973 softc, 0, dazonemodesysctl, "A",
1975 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1976 OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD,
1977 softc, 0, dazonesupsysctl, "A",
1979 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
1980 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
1981 "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones,
1982 "Optimal Number of Open Sequential Write Preferred Zones");
1983 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
1984 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
1985 "optimal_nonseq_zones", CTLFLAG_RD,
1986 &softc->optimal_nonseq_zones,
1987 "Optimal Number of Non-Sequentially Written Sequential Write "
1989 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
1990 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
1991 "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones,
1992 "Maximum Number of Open Sequential Write Required Zones");
1994 SYSCTL_ADD_INT(&softc->sysctl_ctx,
1995 SYSCTL_CHILDREN(softc->sysctl_tree),
1999 &softc->error_inject,
2001 "error_inject leaf");
2003 SYSCTL_ADD_INT(&softc->sysctl_ctx,
2004 SYSCTL_CHILDREN(softc->sysctl_tree),
2010 "Unmapped I/O leaf");
2012 SYSCTL_ADD_INT(&softc->sysctl_ctx,
2013 SYSCTL_CHILDREN(softc->sysctl_tree),
2022 * Add some addressing info.
2024 memset(&cts, 0, sizeof (cts));
2025 xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE);
2026 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
2027 cts.type = CTS_TYPE_CURRENT_SETTINGS;
2028 cam_periph_lock(periph);
2029 xpt_action((union ccb *)&cts);
2030 cam_periph_unlock(periph);
2031 if (cts.ccb_h.status != CAM_REQ_CMP) {
2032 cam_periph_release(periph);
2035 if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) {
2036 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
2037 if (fc->valid & CTS_FC_VALID_WWPN) {
2038 softc->wwpn = fc->wwpn;
2039 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2040 SYSCTL_CHILDREN(softc->sysctl_tree),
2041 OID_AUTO, "wwpn", CTLFLAG_RD,
2042 &softc->wwpn, "World Wide Port Name");
2048 * Now add some useful stats.
2049 * XXX These should live in cam_periph and be common to all periphs
2051 softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx,
2052 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats",
2053 CTLFLAG_RD, 0, "Statistics");
2054 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2055 SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2061 "Transport errors reported by the SIM");
2062 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2063 SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2069 "Device timeouts reported by the SIM");
2070 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2071 SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2073 "pack_invalidations",
2075 &softc->invalidations,
2077 "Device pack invalidations");
2080 cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx,
2081 softc->sysctl_tree);
2083 cam_periph_release(periph);
2087 dadeletemaxsysctl(SYSCTL_HANDLER_ARGS)
2091 struct da_softc *softc;
2093 softc = (struct da_softc *)arg1;
2095 value = softc->disk->d_delmaxsize;
2096 error = sysctl_handle_64(oidp, &value, 0, req);
2097 if ((error != 0) || (req->newptr == NULL))
2100 /* only accept values smaller than the calculated value */
2101 if (value > dadeletemaxsize(softc, softc->delete_method)) {
2104 softc->disk->d_delmaxsize = value;
2110 dacmdsizesysctl(SYSCTL_HANDLER_ARGS)
2114 value = *(int *)arg1;
2116 error = sysctl_handle_int(oidp, &value, 0, req);
2119 || (req->newptr == NULL))
2123 * Acceptable values here are 6, 10, 12 or 16.
2127 else if ((value > 6)
2130 else if ((value > 10)
2133 else if (value > 12)
2136 *(int *)arg1 = value;
2142 dasysctlsofttimeout(SYSCTL_HANDLER_ARGS)
2147 value = da_default_softtimeout / SBT_1MS;
2149 error = sysctl_handle_int(oidp, (int *)&value, 0, req);
2150 if ((error != 0) || (req->newptr == NULL))
2153 /* XXX Should clip this to a reasonable level */
2154 if (value > da_default_timeout * 1000)
2157 da_default_softtimeout = value * SBT_1MS;
2162 dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method)
2165 softc->delete_method = delete_method;
2166 softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method);
2167 softc->delete_func = da_delete_functions[delete_method];
2169 if (softc->delete_method > DA_DELETE_DISABLE)
2170 softc->disk->d_flags |= DISKFLAG_CANDELETE;
2172 softc->disk->d_flags &= ~DISKFLAG_CANDELETE;
2176 dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method)
2180 switch(delete_method) {
2181 case DA_DELETE_UNMAP:
2182 sectors = (off_t)softc->unmap_max_lba;
2184 case DA_DELETE_ATA_TRIM:
2185 sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges;
2187 case DA_DELETE_WS16:
2188 sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS);
2190 case DA_DELETE_ZERO:
2191 case DA_DELETE_WS10:
2192 sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS);
2198 return (off_t)softc->params.secsize *
2199 omin(sectors, softc->params.sectors);
2203 daprobedone(struct cam_periph *periph, union ccb *ccb)
2205 struct da_softc *softc;
2207 softc = (struct da_softc *)periph->softc;
2209 dadeletemethodchoose(softc, DA_DELETE_NONE);
2211 if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2215 snprintf(buf, sizeof(buf), "Delete methods: <");
2217 for (i = 0; i <= DA_DELETE_MAX; i++) {
2218 if ((softc->delete_available & (1 << i)) == 0 &&
2219 i != softc->delete_method)
2222 strlcat(buf, ",", sizeof(buf));
2223 strlcat(buf, da_delete_method_names[i],
2225 if (i == softc->delete_method)
2226 strlcat(buf, "(*)", sizeof(buf));
2229 strlcat(buf, ">", sizeof(buf));
2230 printf("%s%d: %s\n", periph->periph_name,
2231 periph->unit_number, buf);
2235 * Since our peripheral may be invalidated by an error
2236 * above or an external event, we must release our CCB
2237 * before releasing the probe lock on the peripheral.
2238 * The peripheral will only go away once the last lock
2239 * is removed, and we need it around for the CCB release
2242 xpt_release_ccb(ccb);
2243 softc->state = DA_STATE_NORMAL;
2244 softc->flags |= DA_FLAG_PROBED;
2246 wakeup(&softc->disk->d_mediasize);
2247 if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2248 softc->flags |= DA_FLAG_ANNOUNCED;
2249 cam_periph_unhold(periph);
2251 cam_periph_release_locked(periph);
2255 dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method)
2259 /* If available, prefer the method requested by user. */
2260 i = softc->delete_method_pref;
2261 methods = softc->delete_available | (1 << DA_DELETE_DISABLE);
2262 if (methods & (1 << i)) {
2263 dadeletemethodset(softc, i);
2267 /* Use the pre-defined order to choose the best performing delete. */
2268 for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) {
2269 if (i == DA_DELETE_ZERO)
2271 if (softc->delete_available & (1 << i)) {
2272 dadeletemethodset(softc, i);
2277 /* Fallback to default. */
2278 dadeletemethodset(softc, default_method);
2282 dadeletemethodsysctl(SYSCTL_HANDLER_ARGS)
2286 struct da_softc *softc;
2287 int i, error, methods, value;
2289 softc = (struct da_softc *)arg1;
2291 value = softc->delete_method;
2292 if (value < 0 || value > DA_DELETE_MAX)
2295 p = da_delete_method_names[value];
2296 strncpy(buf, p, sizeof(buf));
2297 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
2298 if (error != 0 || req->newptr == NULL)
2300 methods = softc->delete_available | (1 << DA_DELETE_DISABLE);
2301 for (i = 0; i <= DA_DELETE_MAX; i++) {
2302 if (strcmp(buf, da_delete_method_names[i]) == 0)
2305 if (i > DA_DELETE_MAX)
2307 softc->delete_method_pref = i;
2308 dadeletemethodchoose(softc, DA_DELETE_NONE);
2313 dazonemodesysctl(SYSCTL_HANDLER_ARGS)
2316 struct da_softc *softc;
2319 softc = (struct da_softc *)arg1;
2321 switch (softc->zone_mode) {
2322 case DA_ZONE_DRIVE_MANAGED:
2323 snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed");
2325 case DA_ZONE_HOST_AWARE:
2326 snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware");
2328 case DA_ZONE_HOST_MANAGED:
2329 snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed");
2333 snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned");
2337 error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req);
2343 dazonesupsysctl(SYSCTL_HANDLER_ARGS)
2346 struct da_softc *softc;
2351 softc = (struct da_softc *)arg1;
2355 sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0);
2357 for (i = 0; i < sizeof(da_zone_desc_table) /
2358 sizeof(da_zone_desc_table[0]); i++) {
2359 if (softc->zone_flags & da_zone_desc_table[i].value) {
2361 sbuf_printf(&sb, ", ");
2364 sbuf_cat(&sb, da_zone_desc_table[i].desc);
2369 sbuf_printf(&sb, "None");
2373 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
2379 daregister(struct cam_periph *periph, void *arg)
2381 struct da_softc *softc;
2382 struct ccb_pathinq cpi;
2383 struct ccb_getdev *cgd;
2387 cgd = (struct ccb_getdev *)arg;
2389 printf("daregister: no getdev CCB, can't register device\n");
2390 return(CAM_REQ_CMP_ERR);
2393 softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF,
2396 if (softc == NULL) {
2397 printf("daregister: Unable to probe new device. "
2398 "Unable to allocate softc\n");
2399 return(CAM_REQ_CMP_ERR);
2402 if (cam_iosched_init(&softc->cam_iosched, periph) != 0) {
2403 printf("daregister: Unable to probe new device. "
2404 "Unable to allocate iosched memory\n");
2405 free(softc, M_DEVBUF);
2406 return(CAM_REQ_CMP_ERR);
2409 LIST_INIT(&softc->pending_ccbs);
2410 softc->state = DA_STATE_PROBE_RC;
2411 bioq_init(&softc->delete_run_queue);
2412 if (SID_IS_REMOVABLE(&cgd->inq_data))
2413 softc->flags |= DA_FLAG_PACK_REMOVABLE;
2414 softc->unmap_max_ranges = UNMAP_MAX_RANGES;
2415 softc->unmap_max_lba = UNMAP_RANGE_MAX;
2416 softc->unmap_gran = 0;
2417 softc->unmap_gran_align = 0;
2418 softc->ws_max_blks = WS16_MAX_BLKS;
2419 softc->trim_max_ranges = ATA_TRIM_MAX_RANGES;
2420 softc->rotating = 1;
2422 periph->softc = softc;
2425 * See if this device has any quirks.
2427 match = cam_quirkmatch((caddr_t)&cgd->inq_data,
2428 (caddr_t)da_quirk_table,
2429 nitems(da_quirk_table),
2430 sizeof(*da_quirk_table), scsi_inquiry_match);
2433 softc->quirks = ((struct da_quirk_entry *)match)->quirks;
2435 softc->quirks = DA_Q_NONE;
2437 /* Check if the SIM does not want 6 byte commands */
2438 bzero(&cpi, sizeof(cpi));
2439 xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
2440 cpi.ccb_h.func_code = XPT_PATH_INQ;
2441 xpt_action((union ccb *)&cpi);
2442 if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE))
2443 softc->quirks |= DA_Q_NO_6_BYTE;
2445 if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM)
2446 softc->zone_mode = DA_ZONE_HOST_MANAGED;
2447 else if (softc->quirks & DA_Q_SMR_DM)
2448 softc->zone_mode = DA_ZONE_DRIVE_MANAGED;
2450 softc->zone_mode = DA_ZONE_NONE;
2452 if (softc->zone_mode != DA_ZONE_NONE) {
2453 if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
2454 if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC))
2455 softc->zone_interface = DA_ZONE_IF_ATA_SAT;
2457 softc->zone_interface = DA_ZONE_IF_ATA_PASS;
2459 softc->zone_interface = DA_ZONE_IF_SCSI;
2462 TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph);
2465 * Take an exclusive refcount on the periph while dastart is called
2466 * to finish the probe. The reference will be dropped in dadone at
2469 (void)cam_periph_hold(periph, PRIBIO);
2472 * Schedule a periodic event to occasionally send an
2473 * ordered tag to a device.
2475 callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0);
2476 callout_reset(&softc->sendordered_c,
2477 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL,
2478 dasendorderedtag, softc);
2480 cam_periph_unlock(periph);
2482 * RBC devices don't have to support READ(6), only READ(10).
2484 if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC)
2485 softc->minimum_cmd_size = 10;
2487 softc->minimum_cmd_size = 6;
2490 * Load the user's default, if any.
2492 snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size",
2493 periph->unit_number);
2494 TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size);
2497 * 6, 10, 12 and 16 are the currently permissible values.
2499 if (softc->minimum_cmd_size < 6)
2500 softc->minimum_cmd_size = 6;
2501 else if ((softc->minimum_cmd_size > 6)
2502 && (softc->minimum_cmd_size <= 10))
2503 softc->minimum_cmd_size = 10;
2504 else if ((softc->minimum_cmd_size > 10)
2505 && (softc->minimum_cmd_size <= 12))
2506 softc->minimum_cmd_size = 12;
2507 else if (softc->minimum_cmd_size > 12)
2508 softc->minimum_cmd_size = 16;
2510 /* Predict whether device may support READ CAPACITY(16). */
2511 if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 &&
2512 (softc->quirks & DA_Q_NO_RC16) == 0) {
2513 softc->flags |= DA_FLAG_CAN_RC16;
2514 softc->state = DA_STATE_PROBE_RC16;
2518 * Register this media as a disk.
2520 softc->disk = disk_alloc();
2521 softc->disk->d_devstat = devstat_new_entry(periph->periph_name,
2522 periph->unit_number, 0,
2523 DEVSTAT_BS_UNAVAILABLE,
2524 SID_TYPE(&cgd->inq_data) |
2525 XPORT_DEVSTAT_TYPE(cpi.transport),
2526 DEVSTAT_PRIORITY_DISK);
2527 softc->disk->d_open = daopen;
2528 softc->disk->d_close = daclose;
2529 softc->disk->d_strategy = dastrategy;
2530 softc->disk->d_dump = dadump;
2531 softc->disk->d_getattr = dagetattr;
2532 softc->disk->d_gone = dadiskgonecb;
2533 softc->disk->d_name = "da";
2534 softc->disk->d_drv1 = periph;
2536 softc->maxio = DFLTPHYS; /* traditional default */
2537 else if (cpi.maxio > MAXPHYS)
2538 softc->maxio = MAXPHYS; /* for safety */
2540 softc->maxio = cpi.maxio;
2541 softc->disk->d_maxsize = softc->maxio;
2542 softc->disk->d_unit = periph->unit_number;
2543 softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE;
2544 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0)
2545 softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
2546 if ((cpi.hba_misc & PIM_UNMAPPED) != 0) {
2547 softc->unmappedio = 1;
2548 softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
2549 xpt_print(periph->path, "UNMAPPED\n");
2551 cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor,
2552 sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr));
2553 strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr));
2554 cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)],
2555 cgd->inq_data.product, sizeof(cgd->inq_data.product),
2556 sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr));
2557 softc->disk->d_hba_vendor = cpi.hba_vendor;
2558 softc->disk->d_hba_device = cpi.hba_device;
2559 softc->disk->d_hba_subvendor = cpi.hba_subvendor;
2560 softc->disk->d_hba_subdevice = cpi.hba_subdevice;
2563 * Acquire a reference to the periph before we register with GEOM.
2564 * We'll release this reference once GEOM calls us back (via
2565 * dadiskgonecb()) telling us that our provider has been freed.
2567 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
2568 xpt_print(periph->path, "%s: lost periph during "
2569 "registration!\n", __func__);
2570 cam_periph_lock(periph);
2571 return (CAM_REQ_CMP_ERR);
2574 disk_create(softc->disk, DISK_VERSION);
2575 cam_periph_lock(periph);
2578 * Add async callbacks for events of interest.
2579 * I don't bother checking if this fails as,
2580 * in most cases, the system will function just
2581 * fine without them and the only alternative
2582 * would be to not attach the device on failure.
2584 xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE |
2585 AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION |
2586 AC_INQ_CHANGED, daasync, periph, periph->path);
2589 * Emit an attribute changed notification just in case
2590 * physical path information arrived before our async
2591 * event handler was registered, but after anyone attaching
2592 * to our disk device polled it.
2594 disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT);
2597 * Schedule a periodic media polling events.
2599 callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0);
2600 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) &&
2601 (cgd->inq_flags & SID_AEN) == 0 &&
2602 da_poll_period != 0)
2603 callout_reset(&softc->mediapoll_c, da_poll_period * hz,
2604 damediapoll, periph);
2606 xpt_schedule(periph, CAM_PRIORITY_DEV);
2608 return(CAM_REQ_CMP);
2612 da_zone_bio_to_scsi(int disk_zone_cmd)
2614 switch (disk_zone_cmd) {
2615 case DISK_ZONE_OPEN:
2616 return ZBC_OUT_SA_OPEN;
2617 case DISK_ZONE_CLOSE:
2618 return ZBC_OUT_SA_CLOSE;
2619 case DISK_ZONE_FINISH:
2620 return ZBC_OUT_SA_FINISH;
2622 return ZBC_OUT_SA_RWP;
2629 da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp,
2632 struct da_softc *softc;
2637 if (bp->bio_cmd != BIO_ZONE) {
2642 softc = periph->softc;
2644 switch (bp->bio_zone.zone_cmd) {
2645 case DISK_ZONE_OPEN:
2646 case DISK_ZONE_CLOSE:
2647 case DISK_ZONE_FINISH:
2648 case DISK_ZONE_RWP: {
2653 zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd);
2654 if (zone_sa == -1) {
2655 xpt_print(periph->path, "Cannot translate zone "
2656 "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd);
2662 lba = bp->bio_zone.zone_params.rwp.id;
2664 if (bp->bio_zone.zone_params.rwp.flags &
2665 DISK_ZONE_RWP_FLAG_ALL)
2666 zone_flags |= ZBC_OUT_ALL;
2668 if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
2669 scsi_zbc_out(&ccb->csio,
2670 /*retries*/ da_retry_count,
2672 /*tag_action*/ MSG_SIMPLE_Q_TAG,
2673 /*service_action*/ zone_sa,
2675 /*zone_flags*/ zone_flags,
2678 /*sense_len*/ SSD_FULL_SIZE,
2679 /*timeout*/ da_default_timeout * 1000);
2682 * Note that in this case, even though we can
2683 * technically use NCQ, we don't bother for several
2685 * 1. It hasn't been tested on a SAT layer that
2686 * supports it. This is new as of SAT-4.
2687 * 2. Even when there is a SAT layer that supports
2688 * it, that SAT layer will also probably support
2689 * ZBC -> ZAC translation, since they are both
2690 * in the SAT-4 spec.
2691 * 3. Translation will likely be preferable to ATA
2692 * passthrough. LSI / Avago at least single
2693 * steps ATA passthrough commands in the HBA,
2694 * regardless of protocol, so unless that
2695 * changes, there is a performance penalty for
2696 * doing ATA passthrough no matter whether
2697 * you're using NCQ/FPDMA, DMA or PIO.
2698 * 4. It requires a 32-byte CDB, which at least at
2699 * this point in CAM requires a CDB pointer, which
2700 * would require us to allocate an additional bit
2701 * of storage separate from the CCB.
2703 error = scsi_ata_zac_mgmt_out(&ccb->csio,
2704 /*retries*/ da_retry_count,
2706 /*tag_action*/ MSG_SIMPLE_Q_TAG,
2708 /*zm_action*/ zone_sa,
2710 /*zone_flags*/ zone_flags,
2713 /*cdb_storage*/ NULL,
2714 /*cdb_storage_len*/ 0,
2715 /*sense_len*/ SSD_FULL_SIZE,
2716 /*timeout*/ da_default_timeout * 1000);
2719 xpt_print(periph->path,
2720 "scsi_ata_zac_mgmt_out() returned an "
2729 case DISK_ZONE_REPORT_ZONES: {
2731 uint32_t num_entries, alloc_size;
2732 struct disk_zone_report *rep;
2734 rep = &bp->bio_zone.zone_params.report;
2736 num_entries = rep->entries_allocated;
2737 if (num_entries == 0) {
2738 xpt_print(periph->path, "No entries allocated for "
2739 "Report Zones request\n");
2743 alloc_size = sizeof(struct scsi_report_zones_hdr) +
2744 (sizeof(struct scsi_report_zones_desc) * num_entries);
2745 alloc_size = min(alloc_size, softc->disk->d_maxsize);
2746 rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO);
2747 if (rz_ptr == NULL) {
2748 xpt_print(periph->path, "Unable to allocate memory "
2749 "for Report Zones request\n");
2754 if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
2755 scsi_zbc_in(&ccb->csio,
2756 /*retries*/ da_retry_count,
2758 /*tag_action*/ MSG_SIMPLE_Q_TAG,
2759 /*service_action*/ ZBC_IN_SA_REPORT_ZONES,
2760 /*zone_start_lba*/ rep->starting_id,
2761 /*zone_options*/ rep->rep_options,
2762 /*data_ptr*/ rz_ptr,
2763 /*dxfer_len*/ alloc_size,
2764 /*sense_len*/ SSD_FULL_SIZE,
2765 /*timeout*/ da_default_timeout * 1000);
2768 * Note that in this case, even though we can
2769 * technically use NCQ, we don't bother for several
2771 * 1. It hasn't been tested on a SAT layer that
2772 * supports it. This is new as of SAT-4.
2773 * 2. Even when there is a SAT layer that supports
2774 * it, that SAT layer will also probably support
2775 * ZBC -> ZAC translation, since they are both
2776 * in the SAT-4 spec.
2777 * 3. Translation will likely be preferable to ATA
2778 * passthrough. LSI / Avago at least single
2779 * steps ATA passthrough commands in the HBA,
2780 * regardless of protocol, so unless that
2781 * changes, there is a performance penalty for
2782 * doing ATA passthrough no matter whether
2783 * you're using NCQ/FPDMA, DMA or PIO.
2784 * 4. It requires a 32-byte CDB, which at least at
2785 * this point in CAM requires a CDB pointer, which
2786 * would require us to allocate an additional bit
2787 * of storage separate from the CCB.
2789 error = scsi_ata_zac_mgmt_in(&ccb->csio,
2790 /*retries*/ da_retry_count,
2792 /*tag_action*/ MSG_SIMPLE_Q_TAG,
2794 /*zm_action*/ ATA_ZM_REPORT_ZONES,
2795 /*zone_id*/ rep->starting_id,
2796 /*zone_flags*/ rep->rep_options,
2797 /*data_ptr*/ rz_ptr,
2798 /*dxfer_len*/ alloc_size,
2799 /*cdb_storage*/ NULL,
2800 /*cdb_storage_len*/ 0,
2801 /*sense_len*/ SSD_FULL_SIZE,
2802 /*timeout*/ da_default_timeout * 1000);
2805 xpt_print(periph->path,
2806 "scsi_ata_zac_mgmt_in() returned an "
2813 * For BIO_ZONE, this isn't normally needed. However, it
2814 * is used by devstat_end_transaction_bio() to determine
2815 * how much data was transferred.
2818 * XXX KDM we have a problem. But I'm not sure how to fix
2819 * it. devstat uses bio_bcount - bio_resid to calculate
2820 * the amount of data transferred. The GEOM disk code
2821 * uses bio_length - bio_resid to calculate the amount of
2822 * data in bio_completed. We have different structure
2823 * sizes above and below the ada(4) driver. So, if we
2824 * use the sizes above, the amount transferred won't be
2825 * quite accurate for devstat. If we use different sizes
2826 * for bio_bcount and bio_length (above and below
2827 * respectively), then the residual needs to match one or
2828 * the other. Everything is calculated after the bio
2829 * leaves the driver, so changing the values around isn't
2830 * really an option. For now, just set the count to the
2831 * passed in length. This means that the calculations
2832 * above (e.g. bio_completed) will be correct, but the
2833 * amount of data reported to devstat will be slightly
2834 * under or overstated.
2836 bp->bio_bcount = bp->bio_length;
2842 case DISK_ZONE_GET_PARAMS: {
2843 struct disk_zone_disk_params *params;
2845 params = &bp->bio_zone.zone_params.disk_params;
2846 bzero(params, sizeof(*params));
2848 switch (softc->zone_mode) {
2849 case DA_ZONE_DRIVE_MANAGED:
2850 params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED;
2852 case DA_ZONE_HOST_AWARE:
2853 params->zone_mode = DISK_ZONE_MODE_HOST_AWARE;
2855 case DA_ZONE_HOST_MANAGED:
2856 params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED;
2860 params->zone_mode = DISK_ZONE_MODE_NONE;
2864 if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ)
2865 params->flags |= DISK_ZONE_DISK_URSWRZ;
2867 if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) {
2868 params->optimal_seq_zones = softc->optimal_seq_zones;
2869 params->flags |= DISK_ZONE_OPT_SEQ_SET;
2872 if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) {
2873 params->optimal_nonseq_zones =
2874 softc->optimal_nonseq_zones;
2875 params->flags |= DISK_ZONE_OPT_NONSEQ_SET;
2878 if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) {
2879 params->max_seq_zones = softc->max_seq_zones;
2880 params->flags |= DISK_ZONE_MAX_SEQ_SET;
2882 if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP)
2883 params->flags |= DISK_ZONE_RZ_SUP;
2885 if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP)
2886 params->flags |= DISK_ZONE_OPEN_SUP;
2888 if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP)
2889 params->flags |= DISK_ZONE_CLOSE_SUP;
2891 if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP)
2892 params->flags |= DISK_ZONE_FINISH_SUP;
2894 if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP)
2895 params->flags |= DISK_ZONE_RWP_SUP;
2906 dastart(struct cam_periph *periph, union ccb *start_ccb)
2908 struct da_softc *softc;
2910 softc = (struct da_softc *)periph->softc;
2912 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n"));
2915 switch (softc->state) {
2916 case DA_STATE_NORMAL:
2922 bp = cam_iosched_next_bio(softc->cam_iosched);
2924 if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) {
2925 cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR);
2926 scsi_test_unit_ready(&start_ccb->csio,
2927 /*retries*/ da_retry_count,
2931 da_default_timeout * 1000);
2932 start_ccb->ccb_h.ccb_bp = NULL;
2933 start_ccb->ccb_h.ccb_state = DA_CCB_TUR;
2934 xpt_action(start_ccb);
2936 xpt_release_ccb(start_ccb);
2940 if (bp->bio_cmd == BIO_DELETE) {
2941 if (softc->delete_func != NULL) {
2942 softc->delete_func(periph, start_ccb, bp);
2945 /* Not sure this is possible, but failsafe by lying and saying "sure, done." */
2946 biofinish(bp, NULL, 0);
2951 if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) {
2952 cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR);
2953 cam_periph_release_locked(periph); /* XXX is this still valid? I think so but unverified */
2956 if ((bp->bio_flags & BIO_ORDERED) != 0 ||
2957 (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
2958 softc->flags &= ~DA_FLAG_NEED_OTAG;
2959 softc->flags |= DA_FLAG_WAS_OTAG;
2960 tag_code = MSG_ORDERED_Q_TAG;
2962 tag_code = MSG_SIMPLE_Q_TAG;
2965 switch (bp->bio_cmd) {
2972 biotrack(bp, __func__);
2974 if (bp->bio_cmd == BIO_WRITE) {
2975 softc->flags |= DA_FLAG_DIRTY;
2976 rw_op = SCSI_RW_WRITE;
2978 rw_op = SCSI_RW_READ;
2981 data_ptr = bp->bio_data;
2982 if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) {
2983 rw_op |= SCSI_RW_BIO;
2987 scsi_read_write(&start_ccb->csio,
2988 /*retries*/da_retry_count,
2990 /*tag_action*/tag_code,
2993 softc->minimum_cmd_size,
2994 /*lba*/bp->bio_pblkno,
2995 /*block_count*/bp->bio_bcount /
2996 softc->params.secsize,
2998 /*dxfer_len*/ bp->bio_bcount,
2999 /*sense_len*/SSD_FULL_SIZE,
3000 da_default_timeout * 1000);
3001 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
3002 start_ccb->csio.bio = bp;
3008 * BIO_FLUSH doesn't currently communicate
3009 * range data, so we synchronize the cache
3010 * over the whole disk. We also force
3011 * ordered tag semantics the flush applies
3012 * to all previously queued I/O.
3014 scsi_synchronize_cache(&start_ccb->csio,
3021 da_default_timeout*1000);
3024 int error, queue_ccb;
3028 error = da_zone_cmd(periph, start_ccb, bp,&queue_ccb);
3030 || (queue_ccb == 0)) {
3031 biofinish(bp, NULL, error);
3032 xpt_release_ccb(start_ccb);
3038 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
3039 start_ccb->ccb_h.flags |= CAM_UNLOCKED;
3040 start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout);
3043 LIST_INSERT_HEAD(&softc->pending_ccbs,
3044 &start_ccb->ccb_h, periph_links.le);
3046 /* We expect a unit attention from this device */
3047 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
3048 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
3049 softc->flags &= ~DA_FLAG_RETRY_UA;
3052 start_ccb->ccb_h.ccb_bp = bp;
3054 cam_periph_unlock(periph);
3055 xpt_action(start_ccb);
3056 cam_periph_lock(periph);
3059 /* May have more work to do, so ensure we stay scheduled */
3063 case DA_STATE_PROBE_RC:
3065 struct scsi_read_capacity_data *rcap;
3067 rcap = (struct scsi_read_capacity_data *)
3068 malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO);
3070 printf("dastart: Couldn't malloc read_capacity data\n");
3071 /* da_free_periph??? */
3074 scsi_read_capacity(&start_ccb->csio,
3075 /*retries*/da_retry_count,
3081 start_ccb->ccb_h.ccb_bp = NULL;
3082 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC;
3083 xpt_action(start_ccb);
3086 case DA_STATE_PROBE_RC16:
3088 struct scsi_read_capacity_data_long *rcaplong;
3090 rcaplong = (struct scsi_read_capacity_data_long *)
3091 malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO);
3092 if (rcaplong == NULL) {
3093 printf("dastart: Couldn't malloc read_capacity data\n");
3094 /* da_free_periph??? */
3097 scsi_read_capacity_16(&start_ccb->csio,
3098 /*retries*/ da_retry_count,
3100 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3104 /*rcap_buf*/ (uint8_t *)rcaplong,
3105 /*rcap_buf_len*/ sizeof(*rcaplong),
3106 /*sense_len*/ SSD_FULL_SIZE,
3107 /*timeout*/ da_default_timeout * 1000);
3108 start_ccb->ccb_h.ccb_bp = NULL;
3109 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16;
3110 xpt_action(start_ccb);
3113 case DA_STATE_PROBE_LBP:
3115 struct scsi_vpd_logical_block_prov *lbp;
3117 if (!scsi_vpd_supported_page(periph, SVPD_LBP)) {
3119 * If we get here we don't support any SBC-3 delete
3120 * methods with UNMAP as the Logical Block Provisioning
3121 * VPD page support is required for devices which
3122 * support it according to T10/1799-D Revision 31
3123 * however older revisions of the spec don't mandate
3124 * this so we currently don't remove these methods
3125 * from the available set.
3127 softc->state = DA_STATE_PROBE_BLK_LIMITS;
3131 lbp = (struct scsi_vpd_logical_block_prov *)
3132 malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO);
3135 printf("dastart: Couldn't malloc lbp data\n");
3136 /* da_free_periph??? */
3140 scsi_inquiry(&start_ccb->csio,
3141 /*retries*/da_retry_count,
3143 /*tag_action*/MSG_SIMPLE_Q_TAG,
3144 /*inq_buf*/(u_int8_t *)lbp,
3145 /*inq_len*/sizeof(*lbp),
3147 /*page_code*/SVPD_LBP,
3148 /*sense_len*/SSD_MIN_SIZE,
3149 /*timeout*/da_default_timeout * 1000);
3150 start_ccb->ccb_h.ccb_bp = NULL;
3151 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP;
3152 xpt_action(start_ccb);
3155 case DA_STATE_PROBE_BLK_LIMITS:
3157 struct scsi_vpd_block_limits *block_limits;
3159 if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) {
3160 /* Not supported skip to next probe */
3161 softc->state = DA_STATE_PROBE_BDC;
3165 block_limits = (struct scsi_vpd_block_limits *)
3166 malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO);
3168 if (block_limits == NULL) {
3169 printf("dastart: Couldn't malloc block_limits data\n");
3170 /* da_free_periph??? */
3174 scsi_inquiry(&start_ccb->csio,
3175 /*retries*/da_retry_count,
3177 /*tag_action*/MSG_SIMPLE_Q_TAG,
3178 /*inq_buf*/(u_int8_t *)block_limits,
3179 /*inq_len*/sizeof(*block_limits),
3181 /*page_code*/SVPD_BLOCK_LIMITS,
3182 /*sense_len*/SSD_MIN_SIZE,
3183 /*timeout*/da_default_timeout * 1000);
3184 start_ccb->ccb_h.ccb_bp = NULL;
3185 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS;
3186 xpt_action(start_ccb);
3189 case DA_STATE_PROBE_BDC:
3191 struct scsi_vpd_block_characteristics *bdc;
3193 if (!scsi_vpd_supported_page(periph, SVPD_BDC)) {
3194 softc->state = DA_STATE_PROBE_ATA;
3198 bdc = (struct scsi_vpd_block_characteristics *)
3199 malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
3202 printf("dastart: Couldn't malloc bdc data\n");
3203 /* da_free_periph??? */
3207 scsi_inquiry(&start_ccb->csio,
3208 /*retries*/da_retry_count,
3210 /*tag_action*/MSG_SIMPLE_Q_TAG,
3211 /*inq_buf*/(u_int8_t *)bdc,
3212 /*inq_len*/sizeof(*bdc),
3214 /*page_code*/SVPD_BDC,
3215 /*sense_len*/SSD_MIN_SIZE,
3216 /*timeout*/da_default_timeout * 1000);
3217 start_ccb->ccb_h.ccb_bp = NULL;
3218 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC;
3219 xpt_action(start_ccb);
3222 case DA_STATE_PROBE_ATA:
3224 struct ata_params *ata_params;
3226 if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
3227 if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
3228 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
3230 * Note that if the ATA VPD page isn't
3231 * supported, we aren't talking to an ATA
3232 * device anyway. Support for that VPD
3233 * page is mandatory for SCSI to ATA (SAT)
3234 * translation layers.
3236 softc->state = DA_STATE_PROBE_ZONE;
3239 daprobedone(periph, start_ccb);
3243 ata_params = (struct ata_params*)
3244 malloc(sizeof(*ata_params), M_SCSIDA,M_NOWAIT|M_ZERO);
3246 if (ata_params == NULL) {
3247 xpt_print(periph->path, "Couldn't malloc ata_params "
3249 /* da_free_periph??? */
3253 scsi_ata_identify(&start_ccb->csio,
3254 /*retries*/da_retry_count,
3256 /*tag_action*/MSG_SIMPLE_Q_TAG,
3257 /*data_ptr*/(u_int8_t *)ata_params,
3258 /*dxfer_len*/sizeof(*ata_params),
3259 /*sense_len*/SSD_FULL_SIZE,
3260 /*timeout*/da_default_timeout * 1000);
3261 start_ccb->ccb_h.ccb_bp = NULL;
3262 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA;
3263 xpt_action(start_ccb);
3266 case DA_STATE_PROBE_ATA_LOGDIR:
3268 struct ata_gp_log_dir *log_dir;
3273 if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) {
3275 * If we don't have log support, not much point in
3276 * trying to probe zone support.
3278 daprobedone(periph, start_ccb);
3283 * If we have an ATA device (the SCSI ATA Information VPD
3284 * page should be present and the ATA identify should have
3285 * succeeded) and it supports logs, ask for the log directory.
3288 log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO);
3289 if (log_dir == NULL) {
3290 xpt_print(periph->path, "Couldn't malloc log_dir "
3292 daprobedone(periph, start_ccb);
3296 retval = scsi_ata_read_log(&start_ccb->csio,
3297 /*retries*/ da_retry_count,
3299 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3300 /*log_address*/ ATA_LOG_DIRECTORY,
3303 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3304 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3305 /*data_ptr*/ (uint8_t *)log_dir,
3306 /*dxfer_len*/ sizeof(*log_dir),
3307 /*sense_len*/ SSD_FULL_SIZE,
3308 /*timeout*/ da_default_timeout * 1000);
3311 xpt_print(periph->path, "scsi_ata_read_log() failed!");
3312 free(log_dir, M_SCSIDA);
3313 daprobedone(periph, start_ccb);
3316 start_ccb->ccb_h.ccb_bp = NULL;
3317 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR;
3318 xpt_action(start_ccb);
3321 case DA_STATE_PROBE_ATA_IDDIR:
3323 struct ata_identify_log_pages *id_dir;
3329 * Check here to see whether the Identify Device log is
3330 * supported in the directory of logs. If so, continue
3331 * with requesting the log of identify device pages.
3333 if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) {
3334 daprobedone(periph, start_ccb);
3338 id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO);
3339 if (id_dir == NULL) {
3340 xpt_print(periph->path, "Couldn't malloc id_dir "
3342 daprobedone(periph, start_ccb);
3346 retval = scsi_ata_read_log(&start_ccb->csio,
3347 /*retries*/ da_retry_count,
3349 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3350 /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3351 /*page_number*/ ATA_IDL_PAGE_LIST,
3353 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3354 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3355 /*data_ptr*/ (uint8_t *)id_dir,
3356 /*dxfer_len*/ sizeof(*id_dir),
3357 /*sense_len*/ SSD_FULL_SIZE,
3358 /*timeout*/ da_default_timeout * 1000);
3361 xpt_print(periph->path, "scsi_ata_read_log() failed!");
3362 free(id_dir, M_SCSIDA);
3363 daprobedone(periph, start_ccb);
3366 start_ccb->ccb_h.ccb_bp = NULL;
3367 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR;
3368 xpt_action(start_ccb);
3371 case DA_STATE_PROBE_ATA_SUP:
3373 struct ata_identify_log_sup_cap *sup_cap;
3379 * Check here to see whether the Supported Capabilities log
3380 * is in the list of Identify Device logs.
3382 if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) {
3383 daprobedone(periph, start_ccb);
3387 sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO);
3388 if (sup_cap == NULL) {
3389 xpt_print(periph->path, "Couldn't malloc sup_cap "
3391 daprobedone(periph, start_ccb);
3395 retval = scsi_ata_read_log(&start_ccb->csio,
3396 /*retries*/ da_retry_count,
3398 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3399 /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3400 /*page_number*/ ATA_IDL_SUP_CAP,
3402 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3403 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3404 /*data_ptr*/ (uint8_t *)sup_cap,
3405 /*dxfer_len*/ sizeof(*sup_cap),
3406 /*sense_len*/ SSD_FULL_SIZE,
3407 /*timeout*/ da_default_timeout * 1000);
3410 xpt_print(periph->path, "scsi_ata_read_log() failed!");
3411 free(sup_cap, M_SCSIDA);
3412 daprobedone(periph, start_ccb);
3417 start_ccb->ccb_h.ccb_bp = NULL;
3418 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP;
3419 xpt_action(start_ccb);
3422 case DA_STATE_PROBE_ATA_ZONE:
3424 struct ata_zoned_info_log *ata_zone;
3430 * Check here to see whether the zoned device information
3431 * page is supported. If so, continue on to request it.
3432 * If not, skip to DA_STATE_PROBE_LOG or done.
3434 if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) {
3435 daprobedone(periph, start_ccb);
3438 ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA,
3440 if (ata_zone == NULL) {
3441 xpt_print(periph->path, "Couldn't malloc ata_zone "
3443 daprobedone(periph, start_ccb);
3447 retval = scsi_ata_read_log(&start_ccb->csio,
3448 /*retries*/ da_retry_count,
3450 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3451 /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3452 /*page_number*/ ATA_IDL_ZDI,
3454 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3455 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3456 /*data_ptr*/ (uint8_t *)ata_zone,
3457 /*dxfer_len*/ sizeof(*ata_zone),
3458 /*sense_len*/ SSD_FULL_SIZE,
3459 /*timeout*/ da_default_timeout * 1000);
3462 xpt_print(periph->path, "scsi_ata_read_log() failed!");
3463 free(ata_zone, M_SCSIDA);
3464 daprobedone(periph, start_ccb);
3467 start_ccb->ccb_h.ccb_bp = NULL;
3468 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE;
3469 xpt_action(start_ccb);
3473 case DA_STATE_PROBE_ZONE:
3475 struct scsi_vpd_zoned_bdc *bdc;
3478 * Note that this page will be supported for SCSI protocol
3479 * devices that support ZBC (SMR devices), as well as ATA
3480 * protocol devices that are behind a SAT (SCSI to ATA
3481 * Translation) layer that supports converting ZBC commands
3482 * to their ZAC equivalents.
3484 if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) {
3485 daprobedone(periph, start_ccb);
3488 bdc = (struct scsi_vpd_zoned_bdc *)
3489 malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
3492 xpt_release_ccb(start_ccb);
3493 xpt_print(periph->path, "Couldn't malloc zone VPD "
3497 scsi_inquiry(&start_ccb->csio,
3498 /*retries*/da_retry_count,
3500 /*tag_action*/MSG_SIMPLE_Q_TAG,
3501 /*inq_buf*/(u_int8_t *)bdc,
3502 /*inq_len*/sizeof(*bdc),
3504 /*page_code*/SVPD_ZONED_BDC,
3505 /*sense_len*/SSD_FULL_SIZE,
3506 /*timeout*/da_default_timeout * 1000);
3507 start_ccb->ccb_h.ccb_bp = NULL;
3508 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE;
3509 xpt_action(start_ccb);
3516 * In each of the methods below, while its the caller's
3517 * responsibility to ensure the request will fit into a
3518 * single device request, we might have changed the delete
3519 * method due to the device incorrectly advertising either
3520 * its supported methods or limits.
3522 * To prevent this causing further issues we validate the
3523 * against the methods limits, and warn which would
3524 * otherwise be unnecessary.
3527 da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
3529 struct da_softc *softc = (struct da_softc *)periph->softc;;
3531 uint8_t *buf = softc->unmap_buf;
3532 struct scsi_unmap_desc *d = (void *)&buf[UNMAP_HEAD_SIZE];
3533 uint64_t lba, lastlba = (uint64_t)-1;
3534 uint64_t totalcount = 0;
3536 uint32_t c, lastcount = 0, ranges = 0;
3539 * Currently this doesn't take the UNMAP
3540 * Granularity and Granularity Alignment
3541 * fields into account.
3543 * This could result in both unoptimal unmap
3544 * requests as as well as UNMAP calls unmapping
3545 * fewer LBA's than requested.
3548 bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
3552 * Note: ada and da are different in how they store the
3553 * pending bp's in a trim. ada stores all of them in the
3554 * trim_req.bps. da stores all but the first one in the
3555 * delete_run_queue. ada then completes all the bps in
3556 * its adadone() loop. da completes all the bps in the
3557 * delete_run_queue in dadone, and relies on the biodone
3558 * after to complete. This should be reconciled since there's
3559 * no real reason to do it differently. XXX
3562 bioq_insert_tail(&softc->delete_run_queue, bp1);
3563 lba = bp1->bio_pblkno;
3564 count = bp1->bio_bcount / softc->params.secsize;
3566 /* Try to extend the previous range. */
3567 if (lba == lastlba) {
3568 c = omin(count, UNMAP_RANGE_MAX - lastcount);
3571 scsi_ulto4b(lastcount, d[ranges - 1].length);
3575 } else if ((softc->quirks & DA_Q_STRICT_UNMAP) &&
3576 softc->unmap_gran != 0) {
3577 /* Align length of the previous range. */
3578 if ((c = lastcount % softc->unmap_gran) != 0) {
3579 if (lastcount <= c) {
3580 totalcount -= lastcount;
3581 lastlba = (uint64_t)-1;
3588 scsi_ulto4b(lastcount, d[ranges - 1].length);
3591 /* Align beginning of the new range. */
3592 c = (lba - softc->unmap_gran_align) % softc->unmap_gran;
3594 c = softc->unmap_gran - c;
3605 c = omin(count, UNMAP_RANGE_MAX);
3606 if (totalcount + c > softc->unmap_max_lba ||
3607 ranges >= softc->unmap_max_ranges) {
3608 xpt_print(periph->path,
3609 "%s issuing short delete %ld > %ld"
3611 da_delete_method_desc[softc->delete_method],
3612 totalcount + c, softc->unmap_max_lba,
3613 ranges, softc->unmap_max_ranges);
3616 scsi_u64to8b(lba, d[ranges].lba);
3617 scsi_ulto4b(c, d[ranges].length);
3625 bp1 = cam_iosched_next_trim(softc->cam_iosched);
3628 if (ranges >= softc->unmap_max_ranges ||
3629 totalcount + bp1->bio_bcount /
3630 softc->params.secsize > softc->unmap_max_lba) {
3631 cam_iosched_put_back_trim(softc->cam_iosched, bp1);
3636 /* Align length of the last range. */
3637 if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0 &&
3638 (c = lastcount % softc->unmap_gran) != 0) {
3642 scsi_ulto4b(lastcount - c, d[ranges - 1].length);
3645 scsi_ulto2b(ranges * 16 + 6, &buf[0]);
3646 scsi_ulto2b(ranges * 16, &buf[2]);
3648 scsi_unmap(&ccb->csio,
3649 /*retries*/da_retry_count,
3651 /*tag_action*/MSG_SIMPLE_Q_TAG,
3654 /*dxfer_len*/ ranges * 16 + 8,
3655 /*sense_len*/SSD_FULL_SIZE,
3656 da_default_timeout * 1000);
3657 ccb->ccb_h.ccb_state = DA_CCB_DELETE;
3658 ccb->ccb_h.flags |= CAM_UNLOCKED;
3659 cam_iosched_submit_trim(softc->cam_iosched);
3663 da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
3665 struct da_softc *softc = (struct da_softc *)periph->softc;
3667 uint8_t *buf = softc->unmap_buf;
3668 uint64_t lastlba = (uint64_t)-1;
3671 uint32_t lastcount = 0, c, requestcount;
3672 int ranges = 0, off, block_count;
3674 bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
3677 if (bp1 != bp)//XXX imp XXX
3678 bioq_insert_tail(&softc->delete_run_queue, bp1);
3679 lba = bp1->bio_pblkno;
3680 count = bp1->bio_bcount / softc->params.secsize;
3681 requestcount = count;
3683 /* Try to extend the previous range. */
3684 if (lba == lastlba) {
3685 c = omin(count, ATA_DSM_RANGE_MAX - lastcount);
3687 off = (ranges - 1) * 8;
3688 buf[off + 6] = lastcount & 0xff;
3689 buf[off + 7] = (lastcount >> 8) & 0xff;
3695 c = omin(count, ATA_DSM_RANGE_MAX);
3698 buf[off + 0] = lba & 0xff;
3699 buf[off + 1] = (lba >> 8) & 0xff;
3700 buf[off + 2] = (lba >> 16) & 0xff;
3701 buf[off + 3] = (lba >> 24) & 0xff;
3702 buf[off + 4] = (lba >> 32) & 0xff;
3703 buf[off + 5] = (lba >> 40) & 0xff;
3704 buf[off + 6] = c & 0xff;
3705 buf[off + 7] = (c >> 8) & 0xff;
3710 if (count != 0 && ranges == softc->trim_max_ranges) {
3711 xpt_print(periph->path,
3712 "%s issuing short delete %ld > %ld\n",
3713 da_delete_method_desc[softc->delete_method],
3715 (softc->trim_max_ranges - ranges) *
3721 bp1 = cam_iosched_next_trim(softc->cam_iosched);
3724 if (bp1->bio_bcount / softc->params.secsize >
3725 (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) {
3726 cam_iosched_put_back_trim(softc->cam_iosched, bp1);
3731 block_count = howmany(ranges, ATA_DSM_BLK_RANGES);
3732 scsi_ata_trim(&ccb->csio,
3733 /*retries*/da_retry_count,
3735 /*tag_action*/MSG_SIMPLE_Q_TAG,
3738 /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE,
3739 /*sense_len*/SSD_FULL_SIZE,
3740 da_default_timeout * 1000);
3741 ccb->ccb_h.ccb_state = DA_CCB_DELETE;
3742 ccb->ccb_h.flags |= CAM_UNLOCKED;
3743 cam_iosched_submit_trim(softc->cam_iosched);
3747 * We calculate ws_max_blks here based off d_delmaxsize instead
3748 * of using softc->ws_max_blks as it is absolute max for the
3749 * device not the protocol max which may well be lower.
3752 da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
3754 struct da_softc *softc;
3756 uint64_t ws_max_blks;
3758 uint64_t count; /* forward compat with WS32 */
3760 softc = (struct da_softc *)periph->softc;
3761 ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize;
3762 lba = bp->bio_pblkno;
3766 if (bp1 != bp)//XXX imp XXX
3767 bioq_insert_tail(&softc->delete_run_queue, bp1);
3768 count += bp1->bio_bcount / softc->params.secsize;
3769 if (count > ws_max_blks) {
3770 xpt_print(periph->path,
3771 "%s issuing short delete %ld > %ld\n",
3772 da_delete_method_desc[softc->delete_method],
3773 count, ws_max_blks);
3774 count = omin(count, ws_max_blks);
3777 bp1 = cam_iosched_next_trim(softc->cam_iosched);
3780 if (lba + count != bp1->bio_pblkno ||
3781 count + bp1->bio_bcount /
3782 softc->params.secsize > ws_max_blks) {
3783 cam_iosched_put_back_trim(softc->cam_iosched, bp1);
3788 scsi_write_same(&ccb->csio,
3789 /*retries*/da_retry_count,
3791 /*tag_action*/MSG_SIMPLE_Q_TAG,
3792 /*byte2*/softc->delete_method ==
3793 DA_DELETE_ZERO ? 0 : SWS_UNMAP,
3794 softc->delete_method == DA_DELETE_WS16 ? 16 : 10,
3796 /*block_count*/count,
3797 /*data_ptr*/ __DECONST(void *, zero_region),
3798 /*dxfer_len*/ softc->params.secsize,
3799 /*sense_len*/SSD_FULL_SIZE,
3800 da_default_timeout * 1000);
3801 ccb->ccb_h.ccb_state = DA_CCB_DELETE;
3802 ccb->ccb_h.flags |= CAM_UNLOCKED;
3803 cam_iosched_submit_trim(softc->cam_iosched);
3807 cmd6workaround(union ccb *ccb)
3809 struct scsi_rw_6 cmd6;
3810 struct scsi_rw_10 *cmd10;
3811 struct da_softc *softc;
3816 cdb = ccb->csio.cdb_io.cdb_bytes;
3817 softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc;
3819 if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) {
3820 da_delete_methods old_method = softc->delete_method;
3823 * Typically there are two reasons for failure here
3824 * 1. Delete method was detected as supported but isn't
3825 * 2. Delete failed due to invalid params e.g. too big
3827 * While we will attempt to choose an alternative delete method
3828 * this may result in short deletes if the existing delete
3829 * requests from geom are big for the new method chosen.
3831 * This method assumes that the error which triggered this
3832 * will not retry the io otherwise a panic will occur
3834 dadeleteflag(softc, old_method, 0);
3835 dadeletemethodchoose(softc, DA_DELETE_DISABLE);
3836 if (softc->delete_method == DA_DELETE_DISABLE)
3837 xpt_print(ccb->ccb_h.path,
3838 "%s failed, disabling BIO_DELETE\n",
3839 da_delete_method_desc[old_method]);
3841 xpt_print(ccb->ccb_h.path,
3842 "%s failed, switching to %s BIO_DELETE\n",
3843 da_delete_method_desc[old_method],
3844 da_delete_method_desc[softc->delete_method]);
3846 while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL)
3847 cam_iosched_queue_work(softc->cam_iosched, bp);
3848 cam_iosched_queue_work(softc->cam_iosched,
3849 (struct bio *)ccb->ccb_h.ccb_bp);
3850 ccb->ccb_h.ccb_bp = NULL;
3854 /* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */
3855 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
3856 (*cdb == PREVENT_ALLOW) &&
3857 (softc->quirks & DA_Q_NO_PREVENT) == 0) {
3859 xpt_print(ccb->ccb_h.path,
3860 "PREVENT ALLOW MEDIUM REMOVAL not supported.\n");
3861 softc->quirks |= DA_Q_NO_PREVENT;
3865 /* Detect unsupported SYNCHRONIZE CACHE(10). */
3866 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
3867 (*cdb == SYNCHRONIZE_CACHE) &&
3868 (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
3870 xpt_print(ccb->ccb_h.path,
3871 "SYNCHRONIZE CACHE(10) not supported.\n");
3872 softc->quirks |= DA_Q_NO_SYNC_CACHE;
3873 softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE;
3877 /* Translation only possible if CDB is an array and cmd is R/W6 */
3878 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 ||
3879 (*cdb != READ_6 && *cdb != WRITE_6))
3882 xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, "
3883 "increasing minimum_cmd_size to 10.\n");
3884 softc->minimum_cmd_size = 10;
3886 bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6));
3887 cmd10 = (struct scsi_rw_10 *)cdb;
3888 cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10;
3890 scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr);
3891 cmd10->reserved = 0;
3892 scsi_ulto2b(cmd6.length, cmd10->length);
3893 cmd10->control = cmd6.control;
3894 ccb->csio.cdb_len = sizeof(*cmd10);
3896 /* Requeue request, unfreezing queue if necessary */
3897 frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
3898 ccb->ccb_h.status = CAM_REQUEUE_REQ;
3901 cam_release_devq(ccb->ccb_h.path,
3905 /*getcount_only*/0);
3911 dazonedone(struct cam_periph *periph, union ccb *ccb)
3913 struct da_softc *softc;
3916 softc = periph->softc;
3917 bp = (struct bio *)ccb->ccb_h.ccb_bp;
3919 switch (bp->bio_zone.zone_cmd) {
3920 case DISK_ZONE_OPEN:
3921 case DISK_ZONE_CLOSE:
3922 case DISK_ZONE_FINISH:
3925 case DISK_ZONE_REPORT_ZONES: {
3927 struct disk_zone_report *rep;
3928 struct scsi_report_zones_hdr *hdr;
3929 struct scsi_report_zones_desc *desc;
3930 struct disk_zone_rep_entry *entry;
3931 uint32_t num_alloced, hdr_len, num_avail;
3932 uint32_t num_to_fill, i;
3935 rep = &bp->bio_zone.zone_params.report;
3936 avail_len = ccb->csio.dxfer_len - ccb->csio.resid;
3938 * Note that bio_resid isn't normally used for zone
3939 * commands, but it is used by devstat_end_transaction_bio()
3940 * to determine how much data was transferred. Because
3941 * the size of the SCSI/ATA data structures is different
3942 * than the size of the BIO interface structures, the
3943 * amount of data actually transferred from the drive will
3944 * be different than the amount of data transferred to
3947 bp->bio_resid = ccb->csio.resid;
3948 num_alloced = rep->entries_allocated;
3949 hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr;
3950 if (avail_len < sizeof(*hdr)) {
3952 * Is there a better error than EIO here? We asked
3953 * for at least the header, and we got less than
3956 bp->bio_error = EIO;
3957 bp->bio_flags |= BIO_ERROR;
3958 bp->bio_resid = bp->bio_bcount;
3962 if (softc->zone_interface == DA_ZONE_IF_ATA_PASS)
3967 hdr_len = ata ? le32dec(hdr->length) :
3968 scsi_4btoul(hdr->length);
3970 rep->entries_available = hdr_len / sizeof(*desc);
3972 rep->entries_available = 0;
3974 * NOTE: using the same values for the BIO version of the
3975 * same field as the SCSI/ATA values. This means we could
3976 * get some additional values that aren't defined in bio.h
3977 * if more values of the same field are defined later.
3979 rep->header.same = hdr->byte4 & SRZ_SAME_MASK;
3980 rep->header.maximum_lba = ata ? le64dec(hdr->maximum_lba) :
3981 scsi_8btou64(hdr->maximum_lba);
3983 * If the drive reports no entries that match the query,
3987 rep->entries_filled = 0;
3991 num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc),
3992 hdr_len / sizeof(*desc));
3994 * If the drive didn't return any data, then we're done.
3996 if (num_avail == 0) {
3997 rep->entries_filled = 0;
4001 num_to_fill = min(num_avail, rep->entries_allocated);
4003 * If the user didn't allocate any entries for us to fill,
4006 if (num_to_fill == 0) {
4007 rep->entries_filled = 0;
4011 for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0];
4012 i < num_to_fill; i++, desc++, entry++) {
4014 * NOTE: we're mapping the values here directly
4015 * from the SCSI/ATA bit definitions to the bio.h
4016 * definitons. There is also a warning in
4017 * disk_zone.h, but the impact is that if
4018 * additional values are added in the SCSI/ATA
4019 * specs these will be visible to consumers of
4022 entry->zone_type = desc->zone_type & SRZ_TYPE_MASK;
4023 entry->zone_condition =
4024 (desc->zone_flags & SRZ_ZONE_COND_MASK) >>
4025 SRZ_ZONE_COND_SHIFT;
4026 entry->zone_flags |= desc->zone_flags &
4027 (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET);
4028 entry->zone_length =
4029 ata ? le64dec(desc->zone_length) :
4030 scsi_8btou64(desc->zone_length);
4031 entry->zone_start_lba =
4032 ata ? le64dec(desc->zone_start_lba) :
4033 scsi_8btou64(desc->zone_start_lba);
4034 entry->write_pointer_lba =
4035 ata ? le64dec(desc->write_pointer_lba) :
4036 scsi_8btou64(desc->write_pointer_lba);
4038 rep->entries_filled = num_to_fill;
4041 case DISK_ZONE_GET_PARAMS:
4044 * In theory we should not get a GET_PARAMS bio, since it
4045 * should be handled without queueing the command to the
4048 panic("%s: Invalid zone command %d", __func__,
4049 bp->bio_zone.zone_cmd);
4053 if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)
4054 free(ccb->csio.data_ptr, M_SCSIDA);
4058 dadone(struct cam_periph *periph, union ccb *done_ccb)
4060 struct da_softc *softc;
4061 struct ccb_scsiio *csio;
4065 softc = (struct da_softc *)periph->softc;
4066 priority = done_ccb->ccb_h.pinfo.priority;
4068 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n"));
4070 csio = &done_ccb->csio;
4071 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4072 if (csio->bio != NULL)
4073 biotrack(csio->bio, __func__);
4075 state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK;
4077 case DA_CCB_BUFFER_IO:
4080 struct bio *bp, *bp1;
4082 cam_periph_lock(periph);
4083 bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
4084 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
4088 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
4093 error = daerror(done_ccb, CAM_RETRY_SELTO, sf);
4094 if (error == ERESTART) {
4096 * A retry was scheduled, so
4099 cam_periph_unlock(periph);
4102 bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
4107 * return all queued I/O with EIO, so that
4108 * the client can retry these I/Os in the
4109 * proper order should it attempt to recover.
4114 && (softc->flags & DA_FLAG_PACK_INVALID)== 0) {
4116 * Catastrophic error. Mark our pack as
4120 * XXX See if this is really a media
4123 xpt_print(periph->path,
4124 "Invalidating pack\n");
4125 softc->flags |= DA_FLAG_PACK_INVALID;
4127 softc->invalidations++;
4129 queued_error = ENXIO;
4131 cam_iosched_flush(softc->cam_iosched, NULL,
4134 bp->bio_error = error;
4135 bp->bio_resid = bp->bio_bcount;
4136 bp->bio_flags |= BIO_ERROR;
4138 } else if (bp != NULL) {
4139 if (state == DA_CCB_DELETE)
4142 bp->bio_resid = csio->resid;
4144 if (bp->bio_resid != 0)
4145 bp->bio_flags |= BIO_ERROR;
4147 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4148 cam_release_devq(done_ccb->ccb_h.path,
4152 /*getcount_only*/0);
4153 } else if (bp != NULL) {
4154 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4155 panic("REQ_CMP with QFRZN");
4156 if (bp->bio_cmd == BIO_ZONE)
4157 dazonedone(periph, done_ccb);
4158 else if (state == DA_CCB_DELETE)
4161 bp->bio_resid = csio->resid;
4162 if ((csio->resid > 0)
4163 && (bp->bio_cmd != BIO_ZONE))
4164 bp->bio_flags |= BIO_ERROR;
4165 if (softc->error_inject != 0) {
4166 bp->bio_error = softc->error_inject;
4167 bp->bio_resid = bp->bio_bcount;
4168 bp->bio_flags |= BIO_ERROR;
4169 softc->error_inject = 0;
4173 biotrack(bp, __func__);
4174 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
4175 if (LIST_EMPTY(&softc->pending_ccbs))
4176 softc->flags |= DA_FLAG_WAS_OTAG;
4178 cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb);
4179 xpt_release_ccb(done_ccb);
4180 if (state == DA_CCB_DELETE) {
4181 TAILQ_HEAD(, bio) queue;
4184 TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue);
4185 softc->delete_run_queue.insert_point = NULL;
4187 * Normally, the xpt_release_ccb() above would make sure
4188 * that when we have more work to do, that work would
4189 * get kicked off. However, we specifically keep
4190 * delete_running set to 0 before the call above to
4191 * allow other I/O to progress when many BIO_DELETE
4192 * requests are pushed down. We set delete_running to 0
4193 * and call daschedule again so that we don't stall if
4194 * there are no other I/Os pending apart from BIO_DELETEs.
4196 cam_iosched_trim_done(softc->cam_iosched);
4198 cam_periph_unlock(periph);
4199 while ((bp1 = TAILQ_FIRST(&queue)) != NULL) {
4200 TAILQ_REMOVE(&queue, bp1, bio_queue);
4201 bp1->bio_error = bp->bio_error;
4202 if (bp->bio_flags & BIO_ERROR) {
4203 bp1->bio_flags |= BIO_ERROR;
4204 bp1->bio_resid = bp1->bio_bcount;
4211 cam_periph_unlock(periph);
4217 case DA_CCB_PROBE_RC:
4218 case DA_CCB_PROBE_RC16:
4220 struct scsi_read_capacity_data *rdcap;
4221 struct scsi_read_capacity_data_long *rcaplong;
4222 char announce_buf[80];
4228 if (state == DA_CCB_PROBE_RC)
4229 rdcap =(struct scsi_read_capacity_data *)csio->data_ptr;
4231 rcaplong = (struct scsi_read_capacity_data_long *)
4234 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4235 struct disk_params *dp;
4236 uint32_t block_size;
4238 u_int lalba; /* Lowest aligned LBA. */
4240 if (state == DA_CCB_PROBE_RC) {
4241 block_size = scsi_4btoul(rdcap->length);
4242 maxsector = scsi_4btoul(rdcap->addr);
4246 * According to SBC-2, if the standard 10
4247 * byte READ CAPACITY command returns 2^32,
4248 * we should issue the 16 byte version of
4249 * the command, since the device in question
4250 * has more sectors than can be represented
4251 * with the short version of the command.
4253 if (maxsector == 0xffffffff) {
4254 free(rdcap, M_SCSIDA);
4255 xpt_release_ccb(done_ccb);
4256 softc->state = DA_STATE_PROBE_RC16;
4257 xpt_schedule(periph, priority);
4261 block_size = scsi_4btoul(rcaplong->length);
4262 maxsector = scsi_8btou64(rcaplong->addr);
4263 lalba = scsi_2btoul(rcaplong->lalba_lbp);
4267 * Because GEOM code just will panic us if we
4268 * give them an 'illegal' value we'll avoid that
4271 if (block_size == 0) {
4276 if (block_size >= MAXPHYS) {
4277 xpt_print(periph->path,
4278 "unsupportable block size %ju\n",
4279 (uintmax_t) block_size);
4280 announce_buf[0] = '\0';
4281 cam_periph_invalidate(periph);
4284 * We pass rcaplong into dasetgeom(),
4285 * because it will only use it if it is
4288 dasetgeom(periph, block_size, maxsector,
4289 rcaplong, sizeof(*rcaplong));
4290 lbp = (lalba & SRC16_LBPME_A);
4291 dp = &softc->params;
4292 snprintf(announce_buf, sizeof(announce_buf),
4293 "%juMB (%ju %u byte sectors)",
4294 ((uintmax_t)dp->secsize * dp->sectors) /
4296 (uintmax_t)dp->sectors, dp->secsize);
4301 announce_buf[0] = '\0';
4304 * Retry any UNIT ATTENTION type errors. They
4305 * are expected at boot.
4307 error = daerror(done_ccb, CAM_RETRY_SELTO,
4308 SF_RETRY_UA|SF_NO_PRINT);
4309 if (error == ERESTART) {
4311 * A retry was scheuled, so
4315 } else if (error != 0) {
4317 int sense_key, error_code;
4320 struct ccb_getdev cgd;
4322 /* Don't wedge this device's queue */
4323 status = done_ccb->ccb_h.status;
4324 if ((status & CAM_DEV_QFRZN) != 0)
4325 cam_release_devq(done_ccb->ccb_h.path,
4329 /*getcount_only*/0);
4332 xpt_setup_ccb(&cgd.ccb_h,
4333 done_ccb->ccb_h.path,
4334 CAM_PRIORITY_NORMAL);
4335 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
4336 xpt_action((union ccb *)&cgd);
4338 if (scsi_extract_sense_ccb(done_ccb,
4339 &error_code, &sense_key, &asc, &ascq))
4345 * If we tried READ CAPACITY(16) and failed,
4346 * fallback to READ CAPACITY(10).
4348 if ((state == DA_CCB_PROBE_RC16) &&
4349 (softc->flags & DA_FLAG_CAN_RC16) &&
4350 (((csio->ccb_h.status & CAM_STATUS_MASK) ==
4353 (error_code == SSD_CURRENT_ERROR) &&
4354 (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) {
4355 softc->flags &= ~DA_FLAG_CAN_RC16;
4356 free(rdcap, M_SCSIDA);
4357 xpt_release_ccb(done_ccb);
4358 softc->state = DA_STATE_PROBE_RC;
4359 xpt_schedule(periph, priority);
4364 * Attach to anything that claims to be a
4365 * direct access or optical disk device,
4366 * as long as it doesn't return a "Logical
4367 * unit not supported" (0x25) error.
4368 * "Internal Target Failure" (0x44) is also
4369 * special and typically means that the
4370 * device is a SATA drive behind a SATL
4371 * translation that's fallen into a
4372 * terminally fatal state.
4375 && (asc != 0x25) && (asc != 0x44)
4376 && (error_code == SSD_CURRENT_ERROR)) {
4377 const char *sense_key_desc;
4378 const char *asc_desc;
4380 dasetgeom(periph, 512, -1, NULL, 0);
4381 scsi_sense_desc(sense_key, asc, ascq,
4385 snprintf(announce_buf,
4386 sizeof(announce_buf),
4387 "Attempt to query device "
4388 "size failed: %s, %s",
4396 xpt_print(periph->path,
4397 "got CAM status %#x\n",
4398 done_ccb->ccb_h.status);
4401 xpt_print(periph->path, "fatal error, "
4402 "failed to attach to device\n");
4405 * Free up resources.
4407 cam_periph_invalidate(periph);
4411 free(csio->data_ptr, M_SCSIDA);
4412 if (announce_buf[0] != '\0' &&
4413 ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) {
4415 * Create our sysctl variables, now that we know
4416 * we have successfully attached.
4418 /* increase the refcount */
4419 if (cam_periph_acquire(periph) == CAM_REQ_CMP) {
4420 taskqueue_enqueue(taskqueue_thread,
4421 &softc->sysctl_task);
4422 xpt_announce_periph(periph, announce_buf);
4423 xpt_announce_quirks(periph, softc->quirks,
4426 xpt_print(periph->path, "fatal error, "
4427 "could not acquire reference count\n");
4431 /* We already probed the device. */
4432 if (softc->flags & DA_FLAG_PROBED) {
4433 daprobedone(periph, done_ccb);
4437 /* Ensure re-probe doesn't see old delete. */
4438 softc->delete_available = 0;
4439 dadeleteflag(softc, DA_DELETE_ZERO, 1);
4440 if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) {
4442 * Based on older SBC-3 spec revisions
4443 * any of the UNMAP methods "may" be
4444 * available via LBP given this flag so
4445 * we flag all of them as available and
4446 * then remove those which further
4447 * probes confirm aren't available
4450 * We could also check readcap(16) p_type
4451 * flag to exclude one or more invalid
4452 * write same (X) types here
4454 dadeleteflag(softc, DA_DELETE_WS16, 1);
4455 dadeleteflag(softc, DA_DELETE_WS10, 1);
4456 dadeleteflag(softc, DA_DELETE_UNMAP, 1);
4458 xpt_release_ccb(done_ccb);
4459 softc->state = DA_STATE_PROBE_LBP;
4460 xpt_schedule(periph, priority);
4464 xpt_release_ccb(done_ccb);
4465 softc->state = DA_STATE_PROBE_BDC;
4466 xpt_schedule(periph, priority);
4469 case DA_CCB_PROBE_LBP:
4471 struct scsi_vpd_logical_block_prov *lbp;
4473 lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr;
4475 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4477 * T10/1799-D Revision 31 states at least one of these
4478 * must be supported but we don't currently enforce this.
4480 dadeleteflag(softc, DA_DELETE_WS16,
4481 (lbp->flags & SVPD_LBP_WS16));
4482 dadeleteflag(softc, DA_DELETE_WS10,
4483 (lbp->flags & SVPD_LBP_WS10));
4484 dadeleteflag(softc, DA_DELETE_UNMAP,
4485 (lbp->flags & SVPD_LBP_UNMAP));
4488 error = daerror(done_ccb, CAM_RETRY_SELTO,
4489 SF_RETRY_UA|SF_NO_PRINT);
4490 if (error == ERESTART)
4492 else if (error != 0) {
4493 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4494 /* Don't wedge this device's queue */
4495 cam_release_devq(done_ccb->ccb_h.path,
4499 /*getcount_only*/0);
4503 * Failure indicates we don't support any SBC-3
4504 * delete methods with UNMAP
4509 free(lbp, M_SCSIDA);
4510 xpt_release_ccb(done_ccb);
4511 softc->state = DA_STATE_PROBE_BLK_LIMITS;
4512 xpt_schedule(periph, priority);
4515 case DA_CCB_PROBE_BLK_LIMITS:
4517 struct scsi_vpd_block_limits *block_limits;
4519 block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr;
4521 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4522 uint32_t max_txfer_len = scsi_4btoul(
4523 block_limits->max_txfer_len);
4524 uint32_t max_unmap_lba_cnt = scsi_4btoul(
4525 block_limits->max_unmap_lba_cnt);
4526 uint32_t max_unmap_blk_cnt = scsi_4btoul(
4527 block_limits->max_unmap_blk_cnt);
4528 uint32_t unmap_gran = scsi_4btoul(
4529 block_limits->opt_unmap_grain);
4530 uint32_t unmap_gran_align = scsi_4btoul(
4531 block_limits->unmap_grain_align);
4532 uint64_t ws_max_blks = scsi_8btou64(
4533 block_limits->max_write_same_length);
4535 if (max_txfer_len != 0) {
4536 softc->disk->d_maxsize = MIN(softc->maxio,
4537 (off_t)max_txfer_len * softc->params.secsize);
4541 * We should already support UNMAP but we check lba
4542 * and block count to be sure
4544 if (max_unmap_lba_cnt != 0x00L &&
4545 max_unmap_blk_cnt != 0x00L) {
4546 softc->unmap_max_lba = max_unmap_lba_cnt;
4547 softc->unmap_max_ranges = min(max_unmap_blk_cnt,
4549 if (unmap_gran > 1) {
4550 softc->unmap_gran = unmap_gran;
4551 if (unmap_gran_align & 0x80000000) {
4552 softc->unmap_gran_align =
4559 * Unexpected UNMAP limits which means the
4560 * device doesn't actually support UNMAP
4562 dadeleteflag(softc, DA_DELETE_UNMAP, 0);
4565 if (ws_max_blks != 0x00L)
4566 softc->ws_max_blks = ws_max_blks;
4569 error = daerror(done_ccb, CAM_RETRY_SELTO,
4570 SF_RETRY_UA|SF_NO_PRINT);
4571 if (error == ERESTART)
4573 else if (error != 0) {
4574 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4575 /* Don't wedge this device's queue */
4576 cam_release_devq(done_ccb->ccb_h.path,
4580 /*getcount_only*/0);
4584 * Failure here doesn't mean UNMAP is not
4585 * supported as this is an optional page.
4587 softc->unmap_max_lba = 1;
4588 softc->unmap_max_ranges = 1;
4592 free(block_limits, M_SCSIDA);
4593 xpt_release_ccb(done_ccb);
4594 softc->state = DA_STATE_PROBE_BDC;
4595 xpt_schedule(periph, priority);
4598 case DA_CCB_PROBE_BDC:
4600 struct scsi_vpd_block_device_characteristics *bdc;
4602 bdc = (struct scsi_vpd_block_device_characteristics *)
4605 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4609 * Disable queue sorting for non-rotational media
4612 u_int16_t old_rate = softc->disk->d_rotation_rate;
4614 valid_len = csio->dxfer_len - csio->resid;
4615 if (SBDC_IS_PRESENT(bdc, valid_len,
4616 medium_rotation_rate)) {
4617 softc->disk->d_rotation_rate =
4618 scsi_2btoul(bdc->medium_rotation_rate);
4619 if (softc->disk->d_rotation_rate ==
4620 SVPD_BDC_RATE_NON_ROTATING) {
4621 cam_iosched_set_sort_queue(
4622 softc->cam_iosched, 0);
4623 softc->rotating = 0;
4625 if (softc->disk->d_rotation_rate != old_rate) {
4626 disk_attr_changed(softc->disk,
4627 "GEOM::rotation_rate", M_NOWAIT);
4630 if ((SBDC_IS_PRESENT(bdc, valid_len, flags))
4631 && (softc->zone_mode == DA_ZONE_NONE)) {
4634 if (scsi_vpd_supported_page(periph,
4635 SVPD_ATA_INFORMATION))
4641 * The Zoned field will only be set for
4642 * Drive Managed and Host Aware drives. If
4643 * they are Host Managed, the device type
4644 * in the standard INQUIRY data should be
4645 * set to T_ZBC_HM (0x14).
4647 if ((bdc->flags & SVPD_ZBC_MASK) ==
4649 softc->zone_mode = DA_ZONE_HOST_AWARE;
4650 softc->zone_interface = (ata_proto) ?
4651 DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
4652 } else if ((bdc->flags & SVPD_ZBC_MASK) ==
4654 softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
4655 softc->zone_interface = (ata_proto) ?
4656 DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
4657 } else if ((bdc->flags & SVPD_ZBC_MASK) !=
4659 xpt_print(periph->path, "Unknown zoned "
4661 bdc->flags & SVPD_ZBC_MASK);
4666 error = daerror(done_ccb, CAM_RETRY_SELTO,
4667 SF_RETRY_UA|SF_NO_PRINT);
4668 if (error == ERESTART)
4670 else if (error != 0) {
4671 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4672 /* Don't wedge this device's queue */
4673 cam_release_devq(done_ccb->ccb_h.path,
4677 /*getcount_only*/0);
4682 free(bdc, M_SCSIDA);
4683 xpt_release_ccb(done_ccb);
4684 softc->state = DA_STATE_PROBE_ATA;
4685 xpt_schedule(periph, priority);
4688 case DA_CCB_PROBE_ATA:
4691 struct ata_params *ata_params;
4696 ata_params = (struct ata_params *)csio->data_ptr;
4697 ptr = (uint16_t *)ata_params;
4701 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4704 for (i = 0; i < sizeof(*ata_params) / 2; i++)
4705 ptr[i] = le16toh(ptr[i]);
4706 if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM &&
4707 (softc->quirks & DA_Q_NO_UNMAP) == 0) {
4708 dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1);
4709 if (ata_params->max_dsm_blocks != 0)
4710 softc->trim_max_ranges = min(
4711 softc->trim_max_ranges,
4712 ata_params->max_dsm_blocks *
4713 ATA_DSM_BLK_RANGES);
4716 * Disable queue sorting for non-rotational media
4719 old_rate = softc->disk->d_rotation_rate;
4720 softc->disk->d_rotation_rate =
4721 ata_params->media_rotation_rate;
4722 if (softc->disk->d_rotation_rate ==
4723 ATA_RATE_NON_ROTATING) {
4724 cam_iosched_set_sort_queue(softc->cam_iosched, 0);
4725 softc->rotating = 0;
4727 if (softc->disk->d_rotation_rate != old_rate) {
4728 disk_attr_changed(softc->disk,
4729 "GEOM::rotation_rate", M_NOWAIT);
4732 if (ata_params->capabilities1 & ATA_SUPPORT_DMA)
4733 softc->flags |= DA_FLAG_CAN_ATA_DMA;
4735 if (ata_params->support.extension &
4737 softc->flags |= DA_FLAG_CAN_ATA_LOG;
4740 * At this point, if we have a SATA host aware drive,
4741 * we communicate via ATA passthrough unless the
4742 * SAT layer supports ZBC -> ZAC translation. In
4746 * XXX KDM figure out how to detect a host managed
4749 if (softc->zone_mode == DA_ZONE_NONE) {
4751 * Note that we don't override the zone
4752 * mode or interface if it has already been
4753 * set. This is because it has either been
4754 * set as a quirk, or when we probed the
4755 * SCSI Block Device Characteristics page,
4756 * the zoned field was set. The latter
4757 * means that the SAT layer supports ZBC to
4758 * ZAC translation, and we would prefer to
4759 * use that if it is available.
4761 if ((ata_params->support3 &
4762 ATA_SUPPORT_ZONE_MASK) ==
4763 ATA_SUPPORT_ZONE_HOST_AWARE) {
4764 softc->zone_mode = DA_ZONE_HOST_AWARE;
4765 softc->zone_interface =
4766 DA_ZONE_IF_ATA_PASS;
4767 } else if ((ata_params->support3 &
4768 ATA_SUPPORT_ZONE_MASK) ==
4769 ATA_SUPPORT_ZONE_DEV_MANAGED) {
4770 softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
4771 softc->zone_interface =
4772 DA_ZONE_IF_ATA_PASS;
4777 error = daerror(done_ccb, CAM_RETRY_SELTO,
4778 SF_RETRY_UA|SF_NO_PRINT);
4779 if (error == ERESTART)
4781 else if (error != 0) {
4782 if ((done_ccb->ccb_h.status &
4783 CAM_DEV_QFRZN) != 0) {
4784 /* Don't wedge this device's queue */
4785 cam_release_devq(done_ccb->ccb_h.path,
4789 /*getcount_only*/0);
4794 free(ata_params, M_SCSIDA);
4795 if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
4796 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
4798 * If the ATA IDENTIFY failed, we could be talking
4799 * to a SCSI drive, although that seems unlikely,
4800 * since the drive did report that it supported the
4801 * ATA Information VPD page. If the ATA IDENTIFY
4802 * succeeded, and the SAT layer doesn't support
4803 * ZBC -> ZAC translation, continue on to get the
4804 * directory of ATA logs, and complete the rest of
4805 * the ZAC probe. If the SAT layer does support
4806 * ZBC -> ZAC translation, we want to use that,
4807 * and we'll probe the SCSI Zoned Block Device
4808 * Characteristics VPD page next.
4811 && (softc->flags & DA_FLAG_CAN_ATA_LOG)
4812 && (softc->zone_interface == DA_ZONE_IF_ATA_PASS))
4813 softc->state = DA_STATE_PROBE_ATA_LOGDIR;
4815 softc->state = DA_STATE_PROBE_ZONE;
4818 if (continue_probe != 0) {
4819 xpt_release_ccb(done_ccb);
4820 xpt_schedule(periph, priority);
4823 daprobedone(periph, done_ccb);
4826 case DA_CCB_PROBE_ATA_LOGDIR:
4830 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4832 softc->valid_logdir_len = 0;
4833 bzero(&softc->ata_logdir, sizeof(softc->ata_logdir));
4834 softc->valid_logdir_len =
4835 csio->dxfer_len - csio->resid;
4836 if (softc->valid_logdir_len > 0)
4837 bcopy(csio->data_ptr, &softc->ata_logdir,
4838 min(softc->valid_logdir_len,
4839 sizeof(softc->ata_logdir)));
4841 * Figure out whether the Identify Device log is
4842 * supported. The General Purpose log directory
4843 * has a header, and lists the number of pages
4844 * available for each GP log identified by the
4845 * offset into the list.
4847 if ((softc->valid_logdir_len >=
4848 ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t)))
4849 && (le16dec(softc->ata_logdir.header) ==
4850 ATA_GP_LOG_DIR_VERSION)
4851 && (le16dec(&softc->ata_logdir.num_pages[
4852 (ATA_IDENTIFY_DATA_LOG *
4853 sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){
4854 softc->flags |= DA_FLAG_CAN_ATA_IDLOG;
4856 softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
4859 error = daerror(done_ccb, CAM_RETRY_SELTO,
4860 SF_RETRY_UA|SF_NO_PRINT);
4861 if (error == ERESTART)
4863 else if (error != 0) {
4865 * If we can't get the ATA log directory,
4866 * then ATA logs are effectively not
4867 * supported even if the bit is set in the
4870 softc->flags &= ~(DA_FLAG_CAN_ATA_LOG |
4871 DA_FLAG_CAN_ATA_IDLOG);
4872 if ((done_ccb->ccb_h.status &
4873 CAM_DEV_QFRZN) != 0) {
4874 /* Don't wedge this device's queue */
4875 cam_release_devq(done_ccb->ccb_h.path,
4879 /*getcount_only*/0);
4884 free(csio->data_ptr, M_SCSIDA);
4887 && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) {
4888 softc->state = DA_STATE_PROBE_ATA_IDDIR;
4889 xpt_release_ccb(done_ccb);
4890 xpt_schedule(periph, priority);
4893 daprobedone(periph, done_ccb);
4896 case DA_CCB_PROBE_ATA_IDDIR:
4900 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4901 off_t entries_offset, max_entries;
4904 softc->valid_iddir_len = 0;
4905 bzero(&softc->ata_iddir, sizeof(softc->ata_iddir));
4906 softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP |
4907 DA_FLAG_CAN_ATA_ZONE);
4908 softc->valid_iddir_len =
4909 csio->dxfer_len - csio->resid;
4910 if (softc->valid_iddir_len > 0)
4911 bcopy(csio->data_ptr, &softc->ata_iddir,
4912 min(softc->valid_iddir_len,
4913 sizeof(softc->ata_iddir)));
4916 __offsetof(struct ata_identify_log_pages,entries);
4917 max_entries = softc->valid_iddir_len - entries_offset;
4918 if ((softc->valid_iddir_len > (entries_offset + 1))
4919 && (le64dec(softc->ata_iddir.header) ==
4921 && (softc->ata_iddir.entry_count > 0)) {
4924 num_entries = softc->ata_iddir.entry_count;
4925 num_entries = min(num_entries,
4926 softc->valid_iddir_len - entries_offset);
4927 for (i = 0; i < num_entries &&
4928 i < max_entries; i++) {
4929 if (softc->ata_iddir.entries[i] ==
4932 DA_FLAG_CAN_ATA_SUPCAP;
4933 else if (softc->ata_iddir.entries[i]==
4936 DA_FLAG_CAN_ATA_ZONE;
4939 DA_FLAG_CAN_ATA_SUPCAP)
4941 DA_FLAG_CAN_ATA_ZONE))
4946 error = daerror(done_ccb, CAM_RETRY_SELTO,
4947 SF_RETRY_UA|SF_NO_PRINT);
4948 if (error == ERESTART)
4950 else if (error != 0) {
4952 * If we can't get the ATA Identify Data log
4953 * directory, then it effectively isn't
4954 * supported even if the ATA Log directory
4955 * a non-zero number of pages present for
4958 softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
4959 if ((done_ccb->ccb_h.status &
4960 CAM_DEV_QFRZN) != 0) {
4961 /* Don't wedge this device's queue */
4962 cam_release_devq(done_ccb->ccb_h.path,
4966 /*getcount_only*/0);
4971 free(csio->data_ptr, M_SCSIDA);
4974 && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) {
4975 softc->state = DA_STATE_PROBE_ATA_SUP;
4976 xpt_release_ccb(done_ccb);
4977 xpt_schedule(periph, priority);
4980 daprobedone(periph, done_ccb);
4983 case DA_CCB_PROBE_ATA_SUP:
4987 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4990 struct ata_identify_log_sup_cap *sup_cap;
4993 sup_cap = (struct ata_identify_log_sup_cap *)
4995 valid_len = csio->dxfer_len - csio->resid;
4997 __offsetof(struct ata_identify_log_sup_cap,
4998 sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap);
4999 if (valid_len >= needed_size) {
5000 uint64_t zoned, zac_cap;
5002 zoned = le64dec(sup_cap->zoned_cap);
5003 if (zoned & ATA_ZONED_VALID) {
5005 * This should have already been
5006 * set, because this is also in the
5007 * ATA identify data.
5009 if ((zoned & ATA_ZONED_MASK) ==
5010 ATA_SUPPORT_ZONE_HOST_AWARE)
5013 else if ((zoned & ATA_ZONED_MASK) ==
5014 ATA_SUPPORT_ZONE_DEV_MANAGED)
5016 DA_ZONE_DRIVE_MANAGED;
5019 zac_cap = le64dec(sup_cap->sup_zac_cap);
5020 if (zac_cap & ATA_SUP_ZAC_CAP_VALID) {
5021 if (zac_cap & ATA_REPORT_ZONES_SUP)
5022 softc->zone_flags |=
5023 DA_ZONE_FLAG_RZ_SUP;
5024 if (zac_cap & ATA_ND_OPEN_ZONE_SUP)
5025 softc->zone_flags |=
5026 DA_ZONE_FLAG_OPEN_SUP;
5027 if (zac_cap & ATA_ND_CLOSE_ZONE_SUP)
5028 softc->zone_flags |=
5029 DA_ZONE_FLAG_CLOSE_SUP;
5030 if (zac_cap & ATA_ND_FINISH_ZONE_SUP)
5031 softc->zone_flags |=
5032 DA_ZONE_FLAG_FINISH_SUP;
5033 if (zac_cap & ATA_ND_RWP_SUP)
5034 softc->zone_flags |=
5035 DA_ZONE_FLAG_RWP_SUP;
5038 * This field was introduced in
5039 * ACS-4, r08 on April 28th, 2015.
5040 * If the drive firmware was written
5041 * to an earlier spec, it won't have
5042 * the field. So, assume all
5043 * commands are supported.
5045 softc->zone_flags |=
5046 DA_ZONE_FLAG_SUP_MASK;
5051 error = daerror(done_ccb, CAM_RETRY_SELTO,
5052 SF_RETRY_UA|SF_NO_PRINT);
5053 if (error == ERESTART)
5055 else if (error != 0) {
5057 * If we can't get the ATA Identify Data
5058 * Supported Capabilities page, clear the
5061 softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP;
5063 * And clear zone capabilities.
5065 softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK;
5066 if ((done_ccb->ccb_h.status &
5067 CAM_DEV_QFRZN) != 0) {
5068 /* Don't wedge this device's queue */
5069 cam_release_devq(done_ccb->ccb_h.path,
5073 /*getcount_only*/0);
5078 free(csio->data_ptr, M_SCSIDA);
5081 && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) {
5082 softc->state = DA_STATE_PROBE_ATA_ZONE;
5083 xpt_release_ccb(done_ccb);
5084 xpt_schedule(periph, priority);
5087 daprobedone(periph, done_ccb);
5090 case DA_CCB_PROBE_ATA_ZONE:
5094 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5095 struct ata_zoned_info_log *zi_log;
5099 zi_log = (struct ata_zoned_info_log *)csio->data_ptr;
5101 valid_len = csio->dxfer_len - csio->resid;
5102 needed_size = __offsetof(struct ata_zoned_info_log,
5103 version_info) + 1 + sizeof(zi_log->version_info);
5104 if (valid_len >= needed_size) {
5107 tmpvar = le64dec(zi_log->zoned_cap);
5108 if (tmpvar & ATA_ZDI_CAP_VALID) {
5109 if (tmpvar & ATA_ZDI_CAP_URSWRZ)
5110 softc->zone_flags |=
5111 DA_ZONE_FLAG_URSWRZ;
5113 softc->zone_flags &=
5114 ~DA_ZONE_FLAG_URSWRZ;
5116 tmpvar = le64dec(zi_log->optimal_seq_zones);
5117 if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) {
5118 softc->zone_flags |=
5119 DA_ZONE_FLAG_OPT_SEQ_SET;
5120 softc->optimal_seq_zones = (tmpvar &
5121 ATA_ZDI_OPT_SEQ_MASK);
5123 softc->zone_flags &=
5124 ~DA_ZONE_FLAG_OPT_SEQ_SET;
5125 softc->optimal_seq_zones = 0;
5128 tmpvar =le64dec(zi_log->optimal_nonseq_zones);
5129 if (tmpvar & ATA_ZDI_OPT_NS_VALID) {
5130 softc->zone_flags |=
5131 DA_ZONE_FLAG_OPT_NONSEQ_SET;
5132 softc->optimal_nonseq_zones =
5133 (tmpvar & ATA_ZDI_OPT_NS_MASK);
5135 softc->zone_flags &=
5136 ~DA_ZONE_FLAG_OPT_NONSEQ_SET;
5137 softc->optimal_nonseq_zones = 0;
5140 tmpvar = le64dec(zi_log->max_seq_req_zones);
5141 if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) {
5142 softc->zone_flags |=
5143 DA_ZONE_FLAG_MAX_SEQ_SET;
5144 softc->max_seq_zones =
5145 (tmpvar & ATA_ZDI_MAX_SEQ_MASK);
5147 softc->zone_flags &=
5148 ~DA_ZONE_FLAG_MAX_SEQ_SET;
5149 softc->max_seq_zones = 0;
5153 error = daerror(done_ccb, CAM_RETRY_SELTO,
5154 SF_RETRY_UA|SF_NO_PRINT);
5155 if (error == ERESTART)
5157 else if (error != 0) {
5158 softc->flags &= ~DA_FLAG_CAN_ATA_ZONE;
5159 softc->flags &= ~DA_ZONE_FLAG_SET_MASK;
5161 if ((done_ccb->ccb_h.status &
5162 CAM_DEV_QFRZN) != 0) {
5163 /* Don't wedge this device's queue */
5164 cam_release_devq(done_ccb->ccb_h.path,
5168 /*getcount_only*/0);
5173 free(csio->data_ptr, M_SCSIDA);
5175 daprobedone(periph, done_ccb);
5178 case DA_CCB_PROBE_ZONE:
5182 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5185 struct scsi_vpd_zoned_bdc *zoned_bdc;
5188 zoned_bdc = (struct scsi_vpd_zoned_bdc *)
5190 valid_len = csio->dxfer_len - csio->resid;
5191 needed_len = __offsetof(struct scsi_vpd_zoned_bdc,
5192 max_seq_req_zones) + 1 +
5193 sizeof(zoned_bdc->max_seq_req_zones);
5194 if ((valid_len >= needed_len)
5195 && (scsi_2btoul(zoned_bdc->page_length) >=
5197 if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ)
5198 softc->zone_flags |=
5199 DA_ZONE_FLAG_URSWRZ;
5201 softc->zone_flags &=
5202 ~DA_ZONE_FLAG_URSWRZ;
5203 softc->optimal_seq_zones =
5204 scsi_4btoul(zoned_bdc->optimal_seq_zones);
5205 softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET;
5206 softc->optimal_nonseq_zones = scsi_4btoul(
5207 zoned_bdc->optimal_nonseq_zones);
5208 softc->zone_flags |=
5209 DA_ZONE_FLAG_OPT_NONSEQ_SET;
5210 softc->max_seq_zones =
5211 scsi_4btoul(zoned_bdc->max_seq_req_zones);
5212 softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET;
5215 * All of the zone commands are mandatory for SCSI
5218 * XXX KDM this is valid as of September 2015.
5219 * Re-check this assumption once the SAT spec is
5220 * updated to support SCSI ZBC to ATA ZAC mapping.
5221 * Since ATA allows zone commands to be reported
5222 * as supported or not, this may not necessarily
5223 * be true for an ATA device behind a SAT (SCSI to
5224 * ATA Translation) layer.
5226 softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK;
5228 error = daerror(done_ccb, CAM_RETRY_SELTO,
5229 SF_RETRY_UA|SF_NO_PRINT);
5230 if (error == ERESTART)
5232 else if (error != 0) {
5233 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5234 /* Don't wedge this device's queue */
5235 cam_release_devq(done_ccb->ccb_h.path,
5239 /*getcount_only*/0);
5243 daprobedone(periph, done_ccb);
5247 /* No-op. We're polling */
5251 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5253 if (daerror(done_ccb, CAM_RETRY_SELTO,
5254 SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) ==
5257 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5258 cam_release_devq(done_ccb->ccb_h.path,
5262 /*getcount_only*/0);
5264 xpt_release_ccb(done_ccb);
5265 cam_periph_release_locked(periph);
5271 xpt_release_ccb(done_ccb);
5275 dareprobe(struct cam_periph *periph)
5277 struct da_softc *softc;
5280 softc = (struct da_softc *)periph->softc;
5282 /* Probe in progress; don't interfere. */
5283 if (softc->state != DA_STATE_NORMAL)
5286 status = cam_periph_acquire(periph);
5287 KASSERT(status == CAM_REQ_CMP,
5288 ("dareprobe: cam_periph_acquire failed"));
5290 if (softc->flags & DA_FLAG_CAN_RC16)
5291 softc->state = DA_STATE_PROBE_RC16;
5293 softc->state = DA_STATE_PROBE_RC;
5295 xpt_schedule(periph, CAM_PRIORITY_DEV);
5299 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
5301 struct da_softc *softc;
5302 struct cam_periph *periph;
5303 int error, error_code, sense_key, asc, ascq;
5305 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
5306 if (ccb->csio.bio != NULL)
5307 biotrack(ccb->csio.bio, __func__);
5310 periph = xpt_path_periph(ccb->ccb_h.path);
5311 softc = (struct da_softc *)periph->softc;
5314 * Automatically detect devices that do not support
5315 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs.
5318 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) {
5319 error = cmd6workaround(ccb);
5320 } else if (scsi_extract_sense_ccb(ccb,
5321 &error_code, &sense_key, &asc, &ascq)) {
5322 if (sense_key == SSD_KEY_ILLEGAL_REQUEST)
5323 error = cmd6workaround(ccb);
5325 * If the target replied with CAPACITY DATA HAS CHANGED UA,
5326 * query the capacity and notify upper layers.
5328 else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5329 asc == 0x2A && ascq == 0x09) {
5330 xpt_print(periph->path, "Capacity data has changed\n");
5331 softc->flags &= ~DA_FLAG_PROBED;
5333 sense_flags |= SF_NO_PRINT;
5334 } else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5335 asc == 0x28 && ascq == 0x00) {
5336 softc->flags &= ~DA_FLAG_PROBED;
5337 disk_media_changed(softc->disk, M_NOWAIT);
5338 } else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5339 asc == 0x3F && ascq == 0x03) {
5340 xpt_print(periph->path, "INQUIRY data has changed\n");
5341 softc->flags &= ~DA_FLAG_PROBED;
5343 sense_flags |= SF_NO_PRINT;
5344 } else if (sense_key == SSD_KEY_NOT_READY &&
5345 asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
5346 softc->flags |= DA_FLAG_PACK_INVALID;
5347 disk_media_gone(softc->disk, M_NOWAIT);
5350 if (error == ERESTART)
5354 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
5355 case CAM_CMD_TIMEOUT:
5358 case CAM_REQ_ABORTED:
5359 case CAM_REQ_CMP_ERR:
5360 case CAM_REQ_TERMIO:
5361 case CAM_UNREC_HBA_ERROR:
5362 case CAM_DATA_RUN_ERR:
5372 * Until we have a better way of doing pack validation,
5373 * don't treat UAs as errors.
5375 sense_flags |= SF_RETRY_UA;
5377 if (softc->quirks & DA_Q_RETRY_BUSY)
5378 sense_flags |= SF_RETRY_BUSY;
5379 return(cam_periph_error(ccb, cam_flags, sense_flags,
5380 &softc->saved_ccb));
5384 damediapoll(void *arg)
5386 struct cam_periph *periph = arg;
5387 struct da_softc *softc = periph->softc;
5389 if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) &&
5390 LIST_EMPTY(&softc->pending_ccbs)) {
5391 if (cam_periph_acquire(periph) == CAM_REQ_CMP) {
5392 cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR);
5396 /* Queue us up again */
5397 if (da_poll_period != 0)
5398 callout_schedule(&softc->mediapoll_c, da_poll_period * hz);
5402 daprevent(struct cam_periph *periph, int action)
5404 struct da_softc *softc;
5408 softc = (struct da_softc *)periph->softc;
5410 if (((action == PR_ALLOW)
5411 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
5412 || ((action == PR_PREVENT)
5413 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
5417 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
5419 scsi_prevent(&ccb->csio,
5427 error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO,
5428 SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat);
5431 if (action == PR_ALLOW)
5432 softc->flags &= ~DA_FLAG_PACK_LOCKED;
5434 softc->flags |= DA_FLAG_PACK_LOCKED;
5437 xpt_release_ccb(ccb);
5441 dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector,
5442 struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len)
5444 struct ccb_calc_geometry ccg;
5445 struct da_softc *softc;
5446 struct disk_params *dp;
5447 u_int lbppbe, lalba;
5450 softc = (struct da_softc *)periph->softc;
5452 dp = &softc->params;
5453 dp->secsize = block_len;
5454 dp->sectors = maxsector + 1;
5455 if (rcaplong != NULL) {
5456 lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE;
5457 lalba = scsi_2btoul(rcaplong->lalba_lbp);
5458 lalba &= SRC16_LALBA_A;
5465 dp->stripesize = block_len << lbppbe;
5466 dp->stripeoffset = (dp->stripesize - block_len * lalba) %
5468 } else if (softc->quirks & DA_Q_4K) {
5469 dp->stripesize = 4096;
5470 dp->stripeoffset = 0;
5471 } else if (softc->unmap_gran != 0) {
5472 dp->stripesize = block_len * softc->unmap_gran;
5473 dp->stripeoffset = (dp->stripesize - block_len *
5474 softc->unmap_gran_align) % dp->stripesize;
5477 dp->stripeoffset = 0;
5480 * Have the controller provide us with a geometry
5481 * for this disk. The only time the geometry
5482 * matters is when we boot and the controller
5483 * is the only one knowledgeable enough to come
5484 * up with something that will make this a bootable
5487 xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
5488 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
5489 ccg.block_size = dp->secsize;
5490 ccg.volume_size = dp->sectors;
5492 ccg.secs_per_track = 0;
5494 xpt_action((union ccb*)&ccg);
5495 if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5497 * We don't know what went wrong here- but just pick
5498 * a geometry so we don't have nasty things like divide
5502 dp->secs_per_track = 255;
5503 dp->cylinders = dp->sectors / (255 * 255);
5504 if (dp->cylinders == 0) {
5508 dp->heads = ccg.heads;
5509 dp->secs_per_track = ccg.secs_per_track;
5510 dp->cylinders = ccg.cylinders;
5514 * If the user supplied a read capacity buffer, and if it is
5515 * different than the previous buffer, update the data in the EDT.
5516 * If it's the same, we don't bother. This avoids sending an
5517 * update every time someone opens this device.
5519 if ((rcaplong != NULL)
5520 && (bcmp(rcaplong, &softc->rcaplong,
5521 min(sizeof(softc->rcaplong), rcap_len)) != 0)) {
5522 struct ccb_dev_advinfo cdai;
5524 xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
5525 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
5526 cdai.buftype = CDAI_TYPE_RCAPLONG;
5527 cdai.flags = CDAI_FLAG_STORE;
5528 cdai.bufsiz = rcap_len;
5529 cdai.buf = (uint8_t *)rcaplong;
5530 xpt_action((union ccb *)&cdai);
5531 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
5532 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
5533 if (cdai.ccb_h.status != CAM_REQ_CMP) {
5534 xpt_print(periph->path, "%s: failed to set read "
5535 "capacity advinfo\n", __func__);
5536 /* Use cam_error_print() to decode the status */
5537 cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS,
5540 bcopy(rcaplong, &softc->rcaplong,
5541 min(sizeof(softc->rcaplong), rcap_len));
5545 softc->disk->d_sectorsize = softc->params.secsize;
5546 softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors;
5547 softc->disk->d_stripesize = softc->params.stripesize;
5548 softc->disk->d_stripeoffset = softc->params.stripeoffset;
5549 /* XXX: these are not actually "firmware" values, so they may be wrong */
5550 softc->disk->d_fwsectors = softc->params.secs_per_track;
5551 softc->disk->d_fwheads = softc->params.heads;
5552 softc->disk->d_devstat->block_size = softc->params.secsize;
5553 softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE;
5555 error = disk_resize(softc->disk, M_NOWAIT);
5557 xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error);
5561 dasendorderedtag(void *arg)
5563 struct da_softc *softc = arg;
5565 if (da_send_ordered) {
5566 if (!LIST_EMPTY(&softc->pending_ccbs)) {
5567 if ((softc->flags & DA_FLAG_WAS_OTAG) == 0)
5568 softc->flags |= DA_FLAG_NEED_OTAG;
5569 softc->flags &= ~DA_FLAG_WAS_OTAG;
5572 /* Queue us up again */
5573 callout_reset(&softc->sendordered_c,
5574 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL,
5575 dasendorderedtag, softc);
5579 * Step through all DA peripheral drivers, and if the device is still open,
5580 * sync the disk cache to physical media.
5583 dashutdown(void * arg, int howto)
5585 struct cam_periph *periph;
5586 struct da_softc *softc;
5590 CAM_PERIPH_FOREACH(periph, &dadriver) {
5591 softc = (struct da_softc *)periph->softc;
5592 if (SCHEDULER_STOPPED()) {
5593 /* If we paniced with the lock held, do not recurse. */
5594 if (!cam_periph_owned(periph) &&
5595 (softc->flags & DA_FLAG_OPEN)) {
5596 dadump(softc->disk, NULL, 0, 0, 0);
5600 cam_periph_lock(periph);
5603 * We only sync the cache if the drive is still open, and
5604 * if the drive is capable of it..
5606 if (((softc->flags & DA_FLAG_OPEN) == 0)
5607 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) {
5608 cam_periph_unlock(periph);
5612 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
5613 scsi_synchronize_cache(&ccb->csio,
5617 /*begin_lba*/0, /* whole disk */
5622 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
5623 /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR,
5624 softc->disk->d_devstat);
5626 xpt_print(periph->path, "Synchronize cache failed\n");
5627 xpt_release_ccb(ccb);
5628 cam_periph_unlock(periph);
5632 #else /* !_KERNEL */
5635 * XXX These are only left out of the kernel build to silence warnings. If,
5636 * for some reason these functions are used in the kernel, the ifdefs should
5637 * be moved so they are included both in the kernel and userland.
5640 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
5641 void (*cbfcnp)(struct cam_periph *, union ccb *),
5642 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave,
5643 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
5646 struct scsi_format_unit *scsi_cmd;
5648 scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
5649 scsi_cmd->opcode = FORMAT_UNIT;
5650 scsi_cmd->byte2 = byte2;
5651 scsi_ulto2b(ileave, scsi_cmd->interleave);
5656 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
5666 scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries,
5667 void (*cbfcnp)(struct cam_periph *, union ccb *),
5668 uint8_t tag_action, uint8_t list_format,
5669 uint32_t addr_desc_index, uint8_t *data_ptr,
5670 uint32_t dxfer_len, int minimum_cmd_size,
5671 uint8_t sense_len, uint32_t timeout)
5676 * These conditions allow using the 10 byte command. Otherwise we
5677 * need to use the 12 byte command.
5679 if ((minimum_cmd_size <= 10)
5680 && (addr_desc_index == 0)
5681 && (dxfer_len <= SRDD10_MAX_LENGTH)) {
5682 struct scsi_read_defect_data_10 *cdb10;
5684 cdb10 = (struct scsi_read_defect_data_10 *)
5685 &csio->cdb_io.cdb_bytes;
5687 cdb_len = sizeof(*cdb10);
5688 bzero(cdb10, cdb_len);
5689 cdb10->opcode = READ_DEFECT_DATA_10;
5690 cdb10->format = list_format;
5691 scsi_ulto2b(dxfer_len, cdb10->alloc_length);
5693 struct scsi_read_defect_data_12 *cdb12;
5695 cdb12 = (struct scsi_read_defect_data_12 *)
5696 &csio->cdb_io.cdb_bytes;
5698 cdb_len = sizeof(*cdb12);
5699 bzero(cdb12, cdb_len);
5700 cdb12->opcode = READ_DEFECT_DATA_12;
5701 cdb12->format = list_format;
5702 scsi_ulto4b(dxfer_len, cdb12->alloc_length);
5703 scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index);
5709 /*flags*/ CAM_DIR_IN,
5719 scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries,
5720 void (*cbfcnp)(struct cam_periph *, union ccb *),
5721 u_int8_t tag_action, u_int8_t byte2, u_int16_t control,
5722 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
5725 struct scsi_sanitize *scsi_cmd;
5727 scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes;
5728 scsi_cmd->opcode = SANITIZE;
5729 scsi_cmd->byte2 = byte2;
5730 scsi_cmd->control = control;
5731 scsi_ulto2b(dxfer_len, scsi_cmd->length);
5736 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
5745 #endif /* _KERNEL */
5748 scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries,
5749 void (*cbfcnp)(struct cam_periph *, union ccb *),
5750 uint8_t tag_action, uint8_t service_action, uint64_t zone_id,
5751 uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len,
5752 uint8_t sense_len, uint32_t timeout)
5754 struct scsi_zbc_out *scsi_cmd;
5756 scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes;
5757 scsi_cmd->opcode = ZBC_OUT;
5758 scsi_cmd->service_action = service_action;
5759 scsi_u64to8b(zone_id, scsi_cmd->zone_id);
5760 scsi_cmd->zone_flags = zone_flags;
5765 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
5775 scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries,
5776 void (*cbfcnp)(struct cam_periph *, union ccb *),
5777 uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba,
5778 uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len,
5779 uint8_t sense_len, uint32_t timeout)
5781 struct scsi_zbc_in *scsi_cmd;
5783 scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes;
5784 scsi_cmd->opcode = ZBC_IN;
5785 scsi_cmd->service_action = service_action;
5786 scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba);
5787 scsi_cmd->zone_options = zone_options;
5792 /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE,
5803 scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries,
5804 void (*cbfcnp)(struct cam_periph *, union ccb *),
5805 uint8_t tag_action, int use_ncq,
5806 uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
5807 uint8_t *data_ptr, uint32_t dxfer_len,
5808 uint8_t *cdb_storage, size_t cdb_storage_len,
5809 uint8_t sense_len, uint32_t timeout)
5811 uint8_t command_out, protocol, ata_flags;
5812 uint16_t features_out;
5813 uint32_t sectors_out, auxiliary;
5819 command_out = ATA_ZAC_MANAGEMENT_OUT;
5820 features_out = (zm_action & 0xf) | (zone_flags << 8);
5821 ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
5822 if (dxfer_len == 0) {
5823 protocol = AP_PROTO_NON_DATA;
5824 ata_flags |= AP_FLAG_TLEN_NO_DATA;
5827 protocol = AP_PROTO_DMA;
5828 ata_flags |= AP_FLAG_TLEN_SECT_CNT |
5829 AP_FLAG_TDIR_TO_DEV;
5830 sectors_out = ((dxfer_len >> 9) & 0xffff);
5834 ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
5835 if (dxfer_len == 0) {
5836 command_out = ATA_NCQ_NON_DATA;
5837 features_out = ATA_NCQ_ZAC_MGMT_OUT;
5839 * We're assuming the SCSI to ATA translation layer
5840 * will set the NCQ tag number in the tag field.
5841 * That isn't clear from the SAT-4 spec (as of rev 05).
5844 ata_flags |= AP_FLAG_TLEN_NO_DATA;
5846 command_out = ATA_SEND_FPDMA_QUEUED;
5848 * Note that we're defaulting to normal priority,
5849 * and assuming that the SCSI to ATA translation
5850 * layer will insert the NCQ tag number in the tag
5851 * field. That isn't clear in the SAT-4 spec (as
5854 sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8;
5856 ata_flags |= AP_FLAG_TLEN_FEAT |
5857 AP_FLAG_TDIR_TO_DEV;
5860 * For SEND FPDMA QUEUED, the transfer length is
5861 * encoded in the FEATURE register, and 0 means
5862 * that 65536 512 byte blocks are to be tranferred.
5863 * In practice, it seems unlikely that we'll see
5864 * a transfer that large, and it may confuse the
5865 * the SAT layer, because generally that means that
5866 * 0 bytes should be transferred.
5868 if (dxfer_len == (65536 * 512)) {
5870 } else if (dxfer_len <= (65535 * 512)) {
5871 features_out = ((dxfer_len >> 9) & 0xffff);
5873 /* The transfer is too big. */
5880 auxiliary = (zm_action & 0xf) | (zone_flags << 8);
5881 protocol = AP_PROTO_FPDMA;
5884 protocol |= AP_EXTEND;
5886 retval = scsi_ata_pass(csio,
5889 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
5891 /*protocol*/ protocol,
5892 /*ata_flags*/ ata_flags,
5893 /*features*/ features_out,
5894 /*sector_count*/ sectors_out,
5896 /*command*/ command_out,
5899 /*auxiliary*/ auxiliary,
5901 /*data_ptr*/ data_ptr,
5902 /*dxfer_len*/ dxfer_len,
5903 /*cdb_storage*/ cdb_storage,
5904 /*cdb_storage_len*/ cdb_storage_len,
5905 /*minimum_cmd_size*/ 0,
5906 /*sense_len*/ SSD_FULL_SIZE,
5907 /*timeout*/ timeout);
5915 scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries,
5916 void (*cbfcnp)(struct cam_periph *, union ccb *),
5917 uint8_t tag_action, int use_ncq,
5918 uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
5919 uint8_t *data_ptr, uint32_t dxfer_len,
5920 uint8_t *cdb_storage, size_t cdb_storage_len,
5921 uint8_t sense_len, uint32_t timeout)
5923 uint8_t command_out, protocol;
5924 uint16_t features_out, sectors_out;
5930 ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS;
5933 command_out = ATA_ZAC_MANAGEMENT_IN;
5934 /* XXX KDM put a macro here */
5935 features_out = (zm_action & 0xf) | (zone_flags << 8);
5936 sectors_out = dxfer_len >> 9; /* XXX KDM macro */
5937 protocol = AP_PROTO_DMA;
5938 ata_flags |= AP_FLAG_TLEN_SECT_CNT;
5941 ata_flags |= AP_FLAG_TLEN_FEAT;
5943 command_out = ATA_RECV_FPDMA_QUEUED;
5944 sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8;
5947 * For RECEIVE FPDMA QUEUED, the transfer length is
5948 * encoded in the FEATURE register, and 0 means
5949 * that 65536 512 byte blocks are to be tranferred.
5950 * In practice, it seems unlikely that we'll see
5951 * a transfer that large, and it may confuse the
5952 * the SAT layer, because generally that means that
5953 * 0 bytes should be transferred.
5955 if (dxfer_len == (65536 * 512)) {
5957 } else if (dxfer_len <= (65535 * 512)) {
5958 features_out = ((dxfer_len >> 9) & 0xffff);
5960 /* The transfer is too big. */
5964 auxiliary = (zm_action & 0xf) | (zone_flags << 8),
5965 protocol = AP_PROTO_FPDMA;
5968 protocol |= AP_EXTEND;
5970 retval = scsi_ata_pass(csio,
5973 /*flags*/ CAM_DIR_IN,
5975 /*protocol*/ protocol,
5976 /*ata_flags*/ ata_flags,
5977 /*features*/ features_out,
5978 /*sector_count*/ sectors_out,
5980 /*command*/ command_out,
5983 /*auxiliary*/ auxiliary,
5985 /*data_ptr*/ data_ptr,
5986 /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */
5987 /*cdb_storage*/ cdb_storage,
5988 /*cdb_storage_len*/ cdb_storage_len,
5989 /*minimum_cmd_size*/ 0,
5990 /*sense_len*/ SSD_FULL_SIZE,
5991 /*timeout*/ timeout);