2 * Implementation of SCSI Direct Access Peripheral driver for CAM.
4 * Copyright (c) 1997 Justin T. Gibbs.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
38 #include <sys/sysctl.h>
39 #include <sys/taskqueue.h>
41 #include <sys/mutex.h>
43 #include <sys/devicestat.h>
44 #include <sys/eventhandler.h>
45 #include <sys/malloc.h>
47 #include <sys/endian.h>
50 #include <geom/geom.h>
51 #include <geom/geom_disk.h>
60 #include <cam/cam_ccb.h>
61 #include <cam/cam_periph.h>
62 #include <cam/cam_xpt_periph.h>
63 #include <cam/cam_sim.h>
64 #include <cam/cam_iosched.h>
66 #include <cam/scsi/scsi_message.h>
67 #include <cam/scsi/scsi_da.h>
71 * Note that there are probe ordering dependencies here. The order isn't
72 * controlled by this enumeration, but by explicit state transitions in
73 * dastart() and dadone(). Here are some of the dependencies:
75 * 1. RC should come first, before RC16, unless there is evidence that RC16
77 * 2. BDC needs to come before any of the ATA probes, or the ZONE probe.
78 * 3. The ATA probes should go in this order:
79 * ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE
85 DA_STATE_PROBE_BLK_LIMITS,
88 DA_STATE_PROBE_ATA_LOGDIR,
89 DA_STATE_PROBE_ATA_IDDIR,
90 DA_STATE_PROBE_ATA_SUP,
91 DA_STATE_PROBE_ATA_ZONE,
97 DA_FLAG_PACK_INVALID = 0x000001,
98 DA_FLAG_NEW_PACK = 0x000002,
99 DA_FLAG_PACK_LOCKED = 0x000004,
100 DA_FLAG_PACK_REMOVABLE = 0x000008,
101 DA_FLAG_NEED_OTAG = 0x000020,
102 DA_FLAG_WAS_OTAG = 0x000040,
103 DA_FLAG_RETRY_UA = 0x000080,
104 DA_FLAG_OPEN = 0x000100,
105 DA_FLAG_SCTX_INIT = 0x000200,
106 DA_FLAG_CAN_RC16 = 0x000400,
107 DA_FLAG_PROBED = 0x000800,
108 DA_FLAG_DIRTY = 0x001000,
109 DA_FLAG_ANNOUNCED = 0x002000,
110 DA_FLAG_CAN_ATA_DMA = 0x004000,
111 DA_FLAG_CAN_ATA_LOG = 0x008000,
112 DA_FLAG_CAN_ATA_IDLOG = 0x010000,
113 DA_FLAG_CAN_ATA_SUPCAP = 0x020000,
114 DA_FLAG_CAN_ATA_ZONE = 0x040000
119 DA_Q_NO_SYNC_CACHE = 0x01,
120 DA_Q_NO_6_BYTE = 0x02,
121 DA_Q_NO_PREVENT = 0x04,
124 DA_Q_NO_UNMAP = 0x20,
125 DA_Q_RETRY_BUSY = 0x40,
127 DA_Q_STRICT_UNMAP = 0x100
130 #define DA_Q_BIT_STRING \
132 "\001NO_SYNC_CACHE" \
143 DA_CCB_PROBE_RC = 0x01,
144 DA_CCB_PROBE_RC16 = 0x02,
145 DA_CCB_PROBE_LBP = 0x03,
146 DA_CCB_PROBE_BLK_LIMITS = 0x04,
147 DA_CCB_PROBE_BDC = 0x05,
148 DA_CCB_PROBE_ATA = 0x06,
149 DA_CCB_BUFFER_IO = 0x07,
151 DA_CCB_DELETE = 0x0B,
153 DA_CCB_PROBE_ZONE = 0x0D,
154 DA_CCB_PROBE_ATA_LOGDIR = 0x0E,
155 DA_CCB_PROBE_ATA_IDDIR = 0x0F,
156 DA_CCB_PROBE_ATA_SUP = 0x10,
157 DA_CCB_PROBE_ATA_ZONE = 0x11,
158 DA_CCB_TYPE_MASK = 0x1F,
159 DA_CCB_RETRY_UA = 0x20
163 * Order here is important for method choice
165 * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to
166 * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes
167 * using ATA_TRIM than the corresponding UNMAP results for a real world mysql
168 * import taking 5mins.
179 DA_DELETE_MIN = DA_DELETE_ATA_TRIM,
180 DA_DELETE_MAX = DA_DELETE_ZERO
184 * For SCSI, host managed drives show up as a separate device type. For
185 * ATA, host managed drives also have a different device signature.
186 * XXX KDM figure out the ATA host managed signature.
190 DA_ZONE_DRIVE_MANAGED = 0x01,
191 DA_ZONE_HOST_AWARE = 0x02,
192 DA_ZONE_HOST_MANAGED = 0x03
196 * We distinguish between these interface cases in addition to the drive type:
197 * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC
198 * o ATA drive behind a SCSI translation layer that does not know about
199 * ZBC/ZAC, and so needs to be managed via ATA passthrough. In this
200 * case, we would need to share the ATA code with the ada(4) driver.
210 DA_ZONE_FLAG_RZ_SUP = 0x0001,
211 DA_ZONE_FLAG_OPEN_SUP = 0x0002,
212 DA_ZONE_FLAG_CLOSE_SUP = 0x0004,
213 DA_ZONE_FLAG_FINISH_SUP = 0x0008,
214 DA_ZONE_FLAG_RWP_SUP = 0x0010,
215 DA_ZONE_FLAG_SUP_MASK = (DA_ZONE_FLAG_RZ_SUP |
216 DA_ZONE_FLAG_OPEN_SUP |
217 DA_ZONE_FLAG_CLOSE_SUP |
218 DA_ZONE_FLAG_FINISH_SUP |
219 DA_ZONE_FLAG_RWP_SUP),
220 DA_ZONE_FLAG_URSWRZ = 0x0020,
221 DA_ZONE_FLAG_OPT_SEQ_SET = 0x0040,
222 DA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080,
223 DA_ZONE_FLAG_MAX_SEQ_SET = 0x0100,
224 DA_ZONE_FLAG_SET_MASK = (DA_ZONE_FLAG_OPT_SEQ_SET |
225 DA_ZONE_FLAG_OPT_NONSEQ_SET |
226 DA_ZONE_FLAG_MAX_SEQ_SET)
229 static struct da_zone_desc {
232 } da_zone_desc_table[] = {
233 {DA_ZONE_FLAG_RZ_SUP, "Report Zones" },
234 {DA_ZONE_FLAG_OPEN_SUP, "Open" },
235 {DA_ZONE_FLAG_CLOSE_SUP, "Close" },
236 {DA_ZONE_FLAG_FINISH_SUP, "Finish" },
237 {DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" },
240 typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb,
242 static da_delete_func_t da_delete_trim;
243 static da_delete_func_t da_delete_unmap;
244 static da_delete_func_t da_delete_ws;
246 static const void * da_delete_functions[] = {
256 static const char *da_delete_method_names[] =
257 { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" };
258 static const char *da_delete_method_desc[] =
259 { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP",
260 "WRITE SAME(10) with UNMAP", "ZERO" };
262 /* Offsets into our private area for storing information */
263 #define ccb_state ppriv_field0
264 #define ccb_bp ppriv_ptr1
269 u_int8_t secs_per_track;
270 u_int32_t secsize; /* Number of bytes/sector */
271 u_int64_t sectors; /* total number sectors */
276 #define UNMAP_RANGE_MAX 0xffffffff
277 #define UNMAP_HEAD_SIZE 8
278 #define UNMAP_RANGE_SIZE 16
279 #define UNMAP_MAX_RANGES 2048 /* Protocol Max is 4095 */
280 #define UNMAP_BUF_SIZE ((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \
283 #define WS10_MAX_BLKS 0xffff
284 #define WS16_MAX_BLKS 0xffffffff
285 #define ATA_TRIM_MAX_RANGES ((UNMAP_BUF_SIZE / \
286 (ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE)
288 #define DA_WORK_TUR (1 << 16)
291 struct cam_iosched_softc *cam_iosched;
292 struct bio_queue_head delete_run_queue;
293 LIST_HEAD(, ccb_hdr) pending_ccbs;
294 int refcount; /* Active xpt_action() calls */
298 int minimum_cmd_size;
301 int delete_available; /* Delete methods possibly available */
302 da_zone_mode zone_mode;
303 da_zone_interface zone_interface;
304 da_zone_flags zone_flags;
305 struct ata_gp_log_dir ata_logdir;
306 int valid_logdir_len;
307 struct ata_identify_log_pages ata_iddir;
309 uint64_t optimal_seq_zones;
310 uint64_t optimal_nonseq_zones;
311 uint64_t max_seq_zones;
313 uint32_t unmap_max_ranges;
314 uint32_t unmap_max_lba; /* Max LBAs in UNMAP req */
316 uint32_t unmap_gran_align;
317 uint64_t ws_max_blks;
318 da_delete_methods delete_method_pref;
319 da_delete_methods delete_method;
320 da_delete_func_t *delete_func;
323 struct disk_params params;
326 struct task sysctl_task;
327 struct sysctl_ctx_list sysctl_ctx;
328 struct sysctl_oid *sysctl_tree;
329 struct callout sendordered_c;
331 uint8_t unmap_buf[UNMAP_BUF_SIZE];
332 struct scsi_read_capacity_data_long rcaplong;
333 struct callout mediapoll_c;
335 struct sysctl_ctx_list sysctl_stats_ctx;
336 struct sysctl_oid *sysctl_stats_tree;
341 #define DA_ANNOUNCETMP_SZ 80
342 char announce_temp[DA_ANNOUNCETMP_SZ];
343 #define DA_ANNOUNCE_SZ 400
344 char announcebuf[DA_ANNOUNCE_SZ];
347 #define dadeleteflag(softc, delete_method, enable) \
349 softc->delete_available |= (1 << delete_method); \
351 softc->delete_available &= ~(1 << delete_method); \
354 struct da_quirk_entry {
355 struct scsi_inquiry_pattern inq_pat;
359 static const char quantum[] = "QUANTUM";
360 static const char microp[] = "MICROP";
362 static struct da_quirk_entry da_quirk_table[] =
364 /* SPI, FC devices */
367 * Fujitsu M2513A MO drives.
368 * Tested devices: M2513A2 firmware versions 1200 & 1300.
369 * (dip switch selects whether T_DIRECT or T_OPTICAL device)
370 * Reported by: W.Scholten <whs@xs4all.nl>
372 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
373 /*quirks*/ DA_Q_NO_SYNC_CACHE
377 {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
378 /*quirks*/ DA_Q_NO_SYNC_CACHE
382 * This particular Fujitsu drive doesn't like the
383 * synchronize cache command.
384 * Reported by: Tom Jackson <toj@gorilla.net>
386 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
387 /*quirks*/ DA_Q_NO_SYNC_CACHE
391 * This drive doesn't like the synchronize cache command
392 * either. Reported by: Matthew Jacob <mjacob@feral.com>
393 * in NetBSD PR kern/6027, August 24, 1998.
395 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
396 /*quirks*/ DA_Q_NO_SYNC_CACHE
400 * This drive doesn't like the synchronize cache command
401 * either. Reported by: Hellmuth Michaelis (hm@kts.org)
404 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
405 /*quirks*/ DA_Q_NO_SYNC_CACHE
409 * Doesn't like the synchronize cache command.
410 * Reported by: Blaz Zupan <blaz@gold.amis.net>
412 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
413 /*quirks*/ DA_Q_NO_SYNC_CACHE
417 * Doesn't like the synchronize cache command.
418 * Reported by: Blaz Zupan <blaz@gold.amis.net>
420 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
421 /*quirks*/ DA_Q_NO_SYNC_CACHE
425 * Doesn't like the synchronize cache command.
427 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
428 /*quirks*/ DA_Q_NO_SYNC_CACHE
432 * Doesn't like the synchronize cache command.
433 * Reported by: walter@pelissero.de
435 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"},
436 /*quirks*/ DA_Q_NO_SYNC_CACHE
440 * Doesn't work correctly with 6 byte reads/writes.
441 * Returns illegal request, and points to byte 9 of the
443 * Reported by: Adam McDougall <bsdx@spawnet.com>
445 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
446 /*quirks*/ DA_Q_NO_6_BYTE
450 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
451 /*quirks*/ DA_Q_NO_6_BYTE
455 * Doesn't like the synchronize cache command.
456 * Reported by: walter@pelissero.de
458 {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"},
459 /*quirks*/ DA_Q_NO_SYNC_CACHE
463 * The CISS RAID controllers do not support SYNC_CACHE
465 {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"},
466 /*quirks*/ DA_Q_NO_SYNC_CACHE
470 * The STEC SSDs sometimes hang on UNMAP.
472 {T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"},
473 /*quirks*/ DA_Q_NO_UNMAP
477 * VMware returns BUSY status when storage has transient
478 * connectivity problems, so better wait.
479 * Also VMware returns odd errors on misaligned UNMAPs.
481 {T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"},
482 /*quirks*/ DA_Q_RETRY_BUSY | DA_Q_STRICT_UNMAP
484 /* USB mass storage devices supported by umass(4) */
487 * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player
490 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"},
491 /*quirks*/ DA_Q_NO_SYNC_CACHE
495 * Power Quotient Int. (PQI) USB flash key
498 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*",
499 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
503 * Creative Nomad MUVO mp3 player (USB)
506 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"},
507 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
511 * Jungsoft NEXDISK USB flash key
514 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"},
515 /*quirks*/ DA_Q_NO_SYNC_CACHE
519 * FreeDik USB Mini Data Drive
522 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive",
523 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
527 * Sigmatel USB Flash MP3 Player
530 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"},
531 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
535 * Neuros USB Digital Audio Computer
538 {T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.",
539 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
543 * SEAGRAND NP-900 MP3 Player
546 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"},
547 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
551 * iRiver iFP MP3 player (with UMS Firmware)
552 * PR: kern/54881, i386/63941, kern/66124
554 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"},
555 /*quirks*/ DA_Q_NO_SYNC_CACHE
559 * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01
562 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"},
563 /*quirks*/ DA_Q_NO_SYNC_CACHE
567 * ZICPlay USB MP3 Player with FM
570 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"},
571 /*quirks*/ DA_Q_NO_SYNC_CACHE
575 * TEAC USB floppy mechanisms
577 {T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"},
578 /*quirks*/ DA_Q_NO_SYNC_CACHE
582 * Kingston DataTraveler II+ USB Pen-Drive.
583 * Reported by: Pawel Jakub Dawidek <pjd@FreeBSD.org>
585 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+",
586 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
594 {T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"},
595 /*quirks*/ DA_Q_NO_SYNC_CACHE
599 * Motorola E398 Mobile Phone (TransFlash memory card).
600 * Reported by: Wojciech A. Koszek <dunstan@FreeBSD.czest.pl>
603 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone",
604 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
608 * Qware BeatZkey! Pro
611 {T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE",
612 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
616 * Time DPA20B 1GB MP3 Player
619 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*",
620 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
624 * Samsung USB key 128Mb
627 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb",
628 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
632 * Kingston DataTraveler 2.0 USB Flash memory.
635 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0",
636 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
640 * Creative MUVO Slim mp3 player (USB)
643 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim",
644 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
648 * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3)
651 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK",
652 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
656 * SanDisk Micro Cruzer 128MB
659 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer",
660 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
664 * TOSHIBA TransMemory USB sticks
667 {T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory",
668 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
672 * PNY USB 3.0 Flash Drives
674 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*",
675 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16
680 * PR: usb/75578, usb/72344, usb/65436
682 {T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*",
683 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
689 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*",
690 "120?"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_4K | DA_Q_NO_RC16
694 * Genesys 6-in-1 Card Reader
697 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*",
698 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
702 * Rekam Digital CAMERA
705 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*",
706 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
710 * iRiver H10 MP3 player
713 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*",
714 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
718 * iRiver U10 MP3 player
721 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*",
722 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
729 {T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk",
730 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
734 * EasyMP3 EM732X USB 2.0 Flash MP3 Player
737 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*",
738 "1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
745 {T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER",
746 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
750 * Philips USB Key Audio KEY013
753 {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"},
754 /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT
761 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*",
762 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
769 {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"},
770 /*quirks*/ DA_Q_NO_SYNC_CACHE
774 * I/O Magic USB flash - Giga Bank
777 {T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"},
778 /*quirks*/ DA_Q_NO_SYNC_CACHE
782 * JoyFly 128mb USB Flash Drive
785 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*",
786 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
793 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*",
794 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
798 * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A
801 {T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*",
802 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
806 * Samsung YP-U3 mp3-player
809 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3",
810 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
813 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*",
814 "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
818 * Sony Cyber-Shot DSC cameras
821 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"},
822 /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT
825 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3",
826 "1.00"}, /*quirks*/ DA_Q_NO_PREVENT
829 /* At least several Transcent USB sticks lie on RC16. */
830 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*",
831 "*"}, /*quirks*/ DA_Q_NO_RC16
835 * I-O Data USB Flash Disk
838 {T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*",
839 "*"}, /*quirks*/ DA_Q_NO_RC16
841 /* ATA/SATA devices over SAS/USB/... */
843 /* Hitachi Advanced Format (4k) drives */
844 { T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" },
848 /* Micron Advanced Format (4k) drives */
849 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Micron 5100 MTFDDAK*", "*" },
853 /* Samsung Advanced Format (4k) drives */
854 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" },
858 /* Samsung Advanced Format (4k) drives */
859 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" },
863 /* Samsung Advanced Format (4k) drives */
864 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" },
868 /* Samsung Advanced Format (4k) drives */
869 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" },
873 /* Seagate Barracuda Green Advanced Format (4k) drives */
874 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" },
878 /* Seagate Barracuda Green Advanced Format (4k) drives */
879 { T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" },
883 /* Seagate Barracuda Green Advanced Format (4k) drives */
884 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" },
888 /* Seagate Barracuda Green Advanced Format (4k) drives */
889 { T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" },
893 /* Seagate Barracuda Green Advanced Format (4k) drives */
894 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" },
898 /* Seagate Barracuda Green Advanced Format (4k) drives */
899 { T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" },
903 /* Seagate Momentus Advanced Format (4k) drives */
904 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" },
908 /* Seagate Momentus Advanced Format (4k) drives */
909 { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" },
913 /* Seagate Momentus Advanced Format (4k) drives */
914 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" },
918 /* Seagate Momentus Advanced Format (4k) drives */
919 { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" },
923 /* Seagate Momentus Advanced Format (4k) drives */
924 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" },
928 /* Seagate Momentus Advanced Format (4k) drives */
929 { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" },
933 /* Seagate Momentus Advanced Format (4k) drives */
934 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" },
938 /* Seagate Momentus Advanced Format (4k) drives */
939 { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" },
943 /* Seagate Momentus Advanced Format (4k) drives */
944 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" },
948 /* Seagate Momentus Advanced Format (4k) drives */
949 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" },
953 /* Seagate Momentus Advanced Format (4k) drives */
954 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" },
958 /* Seagate Momentus Advanced Format (4k) drives */
959 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" },
963 /* Seagate Momentus Advanced Format (4k) drives */
964 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" },
968 /* Seagate Momentus Advanced Format (4k) drives */
969 { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" },
973 /* Seagate Momentus Thin Advanced Format (4k) drives */
974 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" },
978 /* Seagate Momentus Thin Advanced Format (4k) drives */
979 { T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" },
983 /* WDC Caviar Green Advanced Format (4k) drives */
984 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" },
988 /* WDC Caviar Green Advanced Format (4k) drives */
989 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" },
993 /* WDC Caviar Green Advanced Format (4k) drives */
994 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" },
998 /* WDC Caviar Green Advanced Format (4k) drives */
999 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" },
1003 /* WDC Caviar Green Advanced Format (4k) drives */
1004 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" },
1008 /* WDC Caviar Green Advanced Format (4k) drives */
1009 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" },
1013 /* WDC Caviar Green Advanced Format (4k) drives */
1014 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" },
1018 /* WDC Caviar Green Advanced Format (4k) drives */
1019 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" },
1023 /* WDC Scorpio Black Advanced Format (4k) drives */
1024 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" },
1028 /* WDC Scorpio Black Advanced Format (4k) drives */
1029 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" },
1033 /* WDC Scorpio Black Advanced Format (4k) drives */
1034 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" },
1038 /* WDC Scorpio Black Advanced Format (4k) drives */
1039 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" },
1043 /* WDC Scorpio Blue Advanced Format (4k) drives */
1044 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" },
1048 /* WDC Scorpio Blue Advanced Format (4k) drives */
1049 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" },
1053 /* WDC Scorpio Blue Advanced Format (4k) drives */
1054 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" },
1058 /* WDC Scorpio Blue Advanced Format (4k) drives */
1059 { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" },
1064 * Olympus FE-210 camera
1066 {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*",
1067 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1071 * LG UP3S MP3 player
1073 {T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S",
1074 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1078 * Laser MP3-2GA13 MP3 player
1080 {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk",
1081 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1085 * LaCie external 250GB Hard drive des by Porsche
1086 * Submitted by: Ben Stuyts <ben@altesco.nl>
1089 {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"},
1090 /*quirks*/ DA_Q_NO_SYNC_CACHE
1095 * Corsair Force 2 SSDs
1096 * 4k optimised & trim only works in 4k requests + 4k aligned
1098 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" },
1103 * Corsair Force 3 SSDs
1104 * 4k optimised & trim only works in 4k requests + 4k aligned
1106 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" },
1111 * Corsair Neutron GTX SSDs
1112 * 4k optimised & trim only works in 4k requests + 4k aligned
1114 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" },
1119 * Corsair Force GT & GS SSDs
1120 * 4k optimised & trim only works in 4k requests + 4k aligned
1122 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" },
1128 * 4k optimised & trim only works in 4k requests + 4k aligned
1130 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" },
1135 * Crucial RealSSD C300 SSDs
1138 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*",
1139 "*" }, /*quirks*/DA_Q_4K
1143 * Intel 320 Series SSDs
1144 * 4k optimised & trim only works in 4k requests + 4k aligned
1146 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" },
1151 * Intel 330 Series SSDs
1152 * 4k optimised & trim only works in 4k requests + 4k aligned
1154 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" },
1159 * Intel 510 Series SSDs
1160 * 4k optimised & trim only works in 4k requests + 4k aligned
1162 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" },
1167 * Intel 520 Series SSDs
1168 * 4k optimised & trim only works in 4k requests + 4k aligned
1170 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" },
1175 * Intel S3610 Series SSDs
1176 * 4k optimised & trim only works in 4k requests + 4k aligned
1178 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BX*", "*" },
1183 * Intel X25-M Series SSDs
1184 * 4k optimised & trim only works in 4k requests + 4k aligned
1186 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" },
1191 * Kingston E100 Series SSDs
1192 * 4k optimised & trim only works in 4k requests + 4k aligned
1194 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" },
1199 * Kingston HyperX 3k SSDs
1200 * 4k optimised & trim only works in 4k requests + 4k aligned
1202 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" },
1207 * Marvell SSDs (entry taken from OpenSolaris)
1208 * 4k optimised & trim only works in 4k requests + 4k aligned
1210 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" },
1215 * OCZ Agility 2 SSDs
1216 * 4k optimised & trim only works in 4k requests + 4k aligned
1218 { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" },
1223 * OCZ Agility 3 SSDs
1224 * 4k optimised & trim only works in 4k requests + 4k aligned
1226 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" },
1231 * OCZ Deneva R Series SSDs
1232 * 4k optimised & trim only works in 4k requests + 4k aligned
1234 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" },
1239 * OCZ Vertex 2 SSDs (inc pro series)
1240 * 4k optimised & trim only works in 4k requests + 4k aligned
1242 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" },
1248 * 4k optimised & trim only works in 4k requests + 4k aligned
1250 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" },
1256 * 4k optimised & trim only works in 4k requests + 4k aligned
1258 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" },
1263 * Samsung 830 Series SSDs
1264 * 4k optimised & trim only works in 4k requests + 4k aligned
1266 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" },
1272 * 4k optimised & trim only works in 4k requests + 4k aligned
1274 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" },
1280 * 4k optimised & trim only works in 4k requests + 4k aligned
1282 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" },
1287 * Samsung 843T Series SSDs (MZ7WD*)
1288 * Samsung PM851 Series SSDs (MZ7TE*)
1289 * Samsung PM853T Series SSDs (MZ7GE*)
1290 * Samsung SM863 Series SSDs (MZ7KM*)
1293 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" },
1298 * SuperTalent TeraDrive CT SSDs
1299 * 4k optimised & trim only works in 4k requests + 4k aligned
1301 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" },
1306 * XceedIOPS SATA SSDs
1309 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" },
1314 * Hama Innostor USB-Stick
1316 { T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" },
1317 /*quirks*/DA_Q_NO_RC16
1321 * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR)
1322 * Drive Managed SATA hard drive. This drive doesn't report
1323 * in firmware that it is a drive managed SMR drive.
1325 { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS0002*", "*" },
1326 /*quirks*/DA_Q_SMR_DM
1330 * MX-ES USB Drive by Mach Xtreme
1332 { T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"},
1333 /*quirks*/DA_Q_NO_RC16
1337 static disk_strategy_t dastrategy;
1338 static dumper_t dadump;
1339 static periph_init_t dainit;
1340 static void daasync(void *callback_arg, u_int32_t code,
1341 struct cam_path *path, void *arg);
1342 static void dasysctlinit(void *context, int pending);
1343 static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS);
1344 static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
1345 static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS);
1346 static int dazonemodesysctl(SYSCTL_HANDLER_ARGS);
1347 static int dazonesupsysctl(SYSCTL_HANDLER_ARGS);
1348 static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS);
1349 static void dadeletemethodset(struct da_softc *softc,
1350 da_delete_methods delete_method);
1351 static off_t dadeletemaxsize(struct da_softc *softc,
1352 da_delete_methods delete_method);
1353 static void dadeletemethodchoose(struct da_softc *softc,
1354 da_delete_methods default_method);
1355 static void daprobedone(struct cam_periph *periph, union ccb *ccb);
1357 static periph_ctor_t daregister;
1358 static periph_dtor_t dacleanup;
1359 static periph_start_t dastart;
1360 static periph_oninv_t daoninvalidate;
1361 static void dazonedone(struct cam_periph *periph, union ccb *ccb);
1362 static void dadone(struct cam_periph *periph,
1363 union ccb *done_ccb);
1364 static int daerror(union ccb *ccb, u_int32_t cam_flags,
1365 u_int32_t sense_flags);
1366 static void daprevent(struct cam_periph *periph, int action);
1367 static void dareprobe(struct cam_periph *periph);
1368 static void dasetgeom(struct cam_periph *periph, uint32_t block_len,
1370 struct scsi_read_capacity_data_long *rcaplong,
1372 static timeout_t dasendorderedtag;
1373 static void dashutdown(void *arg, int howto);
1374 static timeout_t damediapoll;
1376 #ifndef DA_DEFAULT_POLL_PERIOD
1377 #define DA_DEFAULT_POLL_PERIOD 3
1380 #ifndef DA_DEFAULT_TIMEOUT
1381 #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */
1384 #ifndef DA_DEFAULT_SOFTTIMEOUT
1385 #define DA_DEFAULT_SOFTTIMEOUT 0
1388 #ifndef DA_DEFAULT_RETRY
1389 #define DA_DEFAULT_RETRY 4
1392 #ifndef DA_DEFAULT_SEND_ORDERED
1393 #define DA_DEFAULT_SEND_ORDERED 1
1396 static int da_poll_period = DA_DEFAULT_POLL_PERIOD;
1397 static int da_retry_count = DA_DEFAULT_RETRY;
1398 static int da_default_timeout = DA_DEFAULT_TIMEOUT;
1399 static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT;
1400 static int da_send_ordered = DA_DEFAULT_SEND_ORDERED;
1402 static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0,
1403 "CAM Direct Access Disk driver");
1404 SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN,
1405 &da_poll_period, 0, "Media polling period in seconds");
1406 SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN,
1407 &da_retry_count, 0, "Normal I/O retry count");
1408 SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN,
1409 &da_default_timeout, 0, "Normal I/O timeout (in seconds)");
1410 SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN,
1411 &da_send_ordered, 0, "Send Ordered Tags");
1413 SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout,
1414 CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, dasysctlsofttimeout, "I",
1415 "Soft I/O timeout (ms)");
1416 TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout);
1419 * DA_ORDEREDTAG_INTERVAL determines how often, relative
1420 * to the default timeout, we check to see whether an ordered
1421 * tagged transaction is appropriate to prevent simple tag
1422 * starvation. Since we'd like to ensure that there is at least
1423 * 1/2 of the timeout length left for a starved transaction to
1424 * complete after we've sent an ordered tag, we must poll at least
1425 * four times in every timeout period. This takes care of the worst
1426 * case where a starved transaction starts during an interval that
1427 * meets the requirement "don't send an ordered tag" test so it takes
1428 * us two intervals to determine that a tag must be sent.
1430 #ifndef DA_ORDEREDTAG_INTERVAL
1431 #define DA_ORDEREDTAG_INTERVAL 4
1434 static struct periph_driver dadriver =
1437 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
1440 PERIPHDRIVER_DECLARE(da, dadriver);
1442 static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers");
1445 daopen(struct disk *dp)
1447 struct cam_periph *periph;
1448 struct da_softc *softc;
1451 periph = (struct cam_periph *)dp->d_drv1;
1452 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
1456 cam_periph_lock(periph);
1457 if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
1458 cam_periph_unlock(periph);
1459 cam_periph_release(periph);
1463 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
1466 softc = (struct da_softc *)periph->softc;
1469 /* Wait for the disk size update. */
1470 error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO,
1473 xpt_print(periph->path, "unable to retrieve capacity data\n");
1475 if (periph->flags & CAM_PERIPH_INVALID)
1478 if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
1479 (softc->quirks & DA_Q_NO_PREVENT) == 0)
1480 daprevent(periph, PR_PREVENT);
1483 softc->flags &= ~DA_FLAG_PACK_INVALID;
1484 softc->flags |= DA_FLAG_OPEN;
1487 cam_periph_unhold(periph);
1488 cam_periph_unlock(periph);
1491 cam_periph_release(periph);
1497 daclose(struct disk *dp)
1499 struct cam_periph *periph;
1500 struct da_softc *softc;
1504 periph = (struct cam_periph *)dp->d_drv1;
1505 softc = (struct da_softc *)periph->softc;
1506 cam_periph_lock(periph);
1507 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
1510 if (cam_periph_hold(periph, PRIBIO) == 0) {
1512 /* Flush disk cache. */
1513 if ((softc->flags & DA_FLAG_DIRTY) != 0 &&
1514 (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 &&
1515 (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
1516 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1517 scsi_synchronize_cache(&ccb->csio, /*retries*/1,
1518 /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG,
1519 /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE,
1521 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
1522 /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR,
1523 softc->disk->d_devstat);
1524 softc->flags &= ~DA_FLAG_DIRTY;
1525 xpt_release_ccb(ccb);
1528 /* Allow medium removal. */
1529 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
1530 (softc->quirks & DA_Q_NO_PREVENT) == 0)
1531 daprevent(periph, PR_ALLOW);
1533 cam_periph_unhold(periph);
1537 * If we've got removeable media, mark the blocksize as
1538 * unavailable, since it could change when new media is
1541 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)
1542 softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE;
1544 softc->flags &= ~DA_FLAG_OPEN;
1545 while (softc->refcount != 0)
1546 cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1);
1547 cam_periph_unlock(periph);
1548 cam_periph_release(periph);
1553 daschedule(struct cam_periph *periph)
1555 struct da_softc *softc = (struct da_softc *)periph->softc;
1557 if (softc->state != DA_STATE_NORMAL)
1560 cam_iosched_schedule(softc->cam_iosched, periph);
1564 * Actually translate the requested transfer into one the physical driver
1565 * can understand. The transfer is described by a buf and will include
1566 * only one physical transfer.
1569 dastrategy(struct bio *bp)
1571 struct cam_periph *periph;
1572 struct da_softc *softc;
1574 periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1575 softc = (struct da_softc *)periph->softc;
1577 cam_periph_lock(periph);
1580 * If the device has been made invalid, error out
1582 if ((softc->flags & DA_FLAG_PACK_INVALID)) {
1583 cam_periph_unlock(periph);
1584 biofinish(bp, NULL, ENXIO);
1588 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp));
1591 * Zone commands must be ordered, because they can depend on the
1592 * effects of previously issued commands, and they may affect
1593 * commands after them.
1595 if (bp->bio_cmd == BIO_ZONE)
1596 bp->bio_flags |= BIO_ORDERED;
1599 * Place it in the queue of disk activities for this disk
1601 cam_iosched_queue_work(softc->cam_iosched, bp);
1604 * Schedule ourselves for performing the work.
1607 cam_periph_unlock(periph);
1613 dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
1615 struct cam_periph *periph;
1616 struct da_softc *softc;
1618 struct ccb_scsiio csio;
1623 periph = dp->d_drv1;
1624 softc = (struct da_softc *)periph->softc;
1625 cam_periph_lock(periph);
1626 secsize = softc->params.secsize;
1628 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
1629 cam_periph_unlock(periph);
1634 xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1635 csio.ccb_h.ccb_state = DA_CCB_DUMP;
1636 scsi_read_write(&csio,
1640 /*read*/SCSI_RW_WRITE,
1642 /*minimum_cmd_size*/ softc->minimum_cmd_size,
1645 /*data_ptr*/(u_int8_t *) virtual,
1646 /*dxfer_len*/length,
1647 /*sense_len*/SSD_FULL_SIZE,
1648 da_default_timeout * 1000);
1649 xpt_polled_action((union ccb *)&csio);
1651 error = cam_periph_error((union ccb *)&csio,
1652 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
1653 if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0)
1654 cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0,
1655 /*reduction*/0, /*timeout*/0, /*getcount_only*/0);
1657 printf("Aborting dump due to I/O error.\n");
1658 cam_periph_unlock(periph);
1663 * Sync the disk cache contents to the physical media.
1665 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
1667 xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1668 csio.ccb_h.ccb_state = DA_CCB_DUMP;
1669 scsi_synchronize_cache(&csio,
1673 /*begin_lba*/0,/* Cover the whole disk */
1677 xpt_polled_action((union ccb *)&csio);
1679 error = cam_periph_error((union ccb *)&csio,
1680 0, SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, NULL);
1681 if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0)
1682 cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0,
1683 /*reduction*/0, /*timeout*/0, /*getcount_only*/0);
1685 xpt_print(periph->path, "Synchronize cache failed\n");
1687 cam_periph_unlock(periph);
1692 dagetattr(struct bio *bp)
1695 struct cam_periph *periph;
1697 periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1698 cam_periph_lock(periph);
1699 ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute,
1701 cam_periph_unlock(periph);
1703 bp->bio_completed = bp->bio_length;
1713 * Install a global async callback. This callback will
1714 * receive async callbacks like "new device found".
1716 status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL);
1718 if (status != CAM_REQ_CMP) {
1719 printf("da: Failed to attach master async callback "
1720 "due to status 0x%x!\n", status);
1721 } else if (da_send_ordered) {
1723 /* Register our shutdown event handler */
1724 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
1725 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
1726 printf("dainit: shutdown event registration failed!\n");
1731 * Callback from GEOM, called when it has finished cleaning up its
1735 dadiskgonecb(struct disk *dp)
1737 struct cam_periph *periph;
1739 periph = (struct cam_periph *)dp->d_drv1;
1740 cam_periph_release(periph);
1744 daoninvalidate(struct cam_periph *periph)
1746 struct da_softc *softc;
1748 softc = (struct da_softc *)periph->softc;
1751 * De-register any async callbacks.
1753 xpt_register_async(0, daasync, periph, periph->path);
1755 softc->flags |= DA_FLAG_PACK_INVALID;
1757 softc->invalidations++;
1761 * Return all queued I/O with ENXIO.
1762 * XXX Handle any transactions queued to the card
1763 * with XPT_ABORT_CCB.
1765 cam_iosched_flush(softc->cam_iosched, NULL, ENXIO);
1768 * Tell GEOM that we've gone away, we'll get a callback when it is
1769 * done cleaning up its resources.
1771 disk_gone(softc->disk);
1775 dacleanup(struct cam_periph *periph)
1777 struct da_softc *softc;
1779 softc = (struct da_softc *)periph->softc;
1781 cam_periph_unlock(periph);
1783 cam_iosched_fini(softc->cam_iosched);
1786 * If we can't free the sysctl tree, oh well...
1788 if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) {
1790 if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0)
1791 xpt_print(periph->path,
1792 "can't remove sysctl stats context\n");
1794 if (sysctl_ctx_free(&softc->sysctl_ctx) != 0)
1795 xpt_print(periph->path,
1796 "can't remove sysctl context\n");
1799 callout_drain(&softc->mediapoll_c);
1800 disk_destroy(softc->disk);
1801 callout_drain(&softc->sendordered_c);
1802 free(softc, M_DEVBUF);
1803 cam_periph_lock(periph);
1807 daasync(void *callback_arg, u_int32_t code,
1808 struct cam_path *path, void *arg)
1810 struct cam_periph *periph;
1811 struct da_softc *softc;
1813 periph = (struct cam_periph *)callback_arg;
1815 case AC_FOUND_DEVICE:
1817 struct ccb_getdev *cgd;
1820 cgd = (struct ccb_getdev *)arg;
1824 if (cgd->protocol != PROTO_SCSI)
1826 if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED)
1828 if (SID_TYPE(&cgd->inq_data) != T_DIRECT
1829 && SID_TYPE(&cgd->inq_data) != T_RBC
1830 && SID_TYPE(&cgd->inq_data) != T_OPTICAL
1831 && SID_TYPE(&cgd->inq_data) != T_ZBC_HM)
1835 * Allocate a peripheral instance for
1836 * this device and start the probe
1839 status = cam_periph_alloc(daregister, daoninvalidate,
1841 "da", CAM_PERIPH_BIO,
1843 AC_FOUND_DEVICE, cgd);
1845 if (status != CAM_REQ_CMP
1846 && status != CAM_REQ_INPROG)
1847 printf("daasync: Unable to attach to new device "
1848 "due to status 0x%x\n", status);
1851 case AC_ADVINFO_CHANGED:
1855 buftype = (uintptr_t)arg;
1856 if (buftype == CDAI_TYPE_PHYS_PATH) {
1857 struct da_softc *softc;
1859 softc = periph->softc;
1860 disk_attr_changed(softc->disk, "GEOM::physpath",
1865 case AC_UNIT_ATTENTION:
1868 int error_code, sense_key, asc, ascq;
1870 softc = (struct da_softc *)periph->softc;
1871 ccb = (union ccb *)arg;
1874 * Handle all UNIT ATTENTIONs except our own,
1875 * as they will be handled by daerror().
1877 if (xpt_path_periph(ccb->ccb_h.path) != periph &&
1878 scsi_extract_sense_ccb(ccb,
1879 &error_code, &sense_key, &asc, &ascq)) {
1880 if (asc == 0x2A && ascq == 0x09) {
1881 xpt_print(ccb->ccb_h.path,
1882 "Capacity data has changed\n");
1883 softc->flags &= ~DA_FLAG_PROBED;
1885 } else if (asc == 0x28 && ascq == 0x00) {
1886 softc->flags &= ~DA_FLAG_PROBED;
1887 disk_media_changed(softc->disk, M_NOWAIT);
1888 } else if (asc == 0x3F && ascq == 0x03) {
1889 xpt_print(ccb->ccb_h.path,
1890 "INQUIRY data has changed\n");
1891 softc->flags &= ~DA_FLAG_PROBED;
1895 cam_periph_async(periph, code, path, arg);
1899 softc = (struct da_softc *)periph->softc;
1900 if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) {
1901 if (cam_periph_acquire(periph) == CAM_REQ_CMP) {
1902 cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR);
1910 struct ccb_hdr *ccbh;
1912 softc = (struct da_softc *)periph->softc;
1914 * Don't fail on the expected unit attention
1917 softc->flags |= DA_FLAG_RETRY_UA;
1918 LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
1919 ccbh->ccb_state |= DA_CCB_RETRY_UA;
1922 case AC_INQ_CHANGED:
1923 softc = (struct da_softc *)periph->softc;
1924 softc->flags &= ~DA_FLAG_PROBED;
1930 cam_periph_async(periph, code, path, arg);
1934 dasysctlinit(void *context, int pending)
1936 struct cam_periph *periph;
1937 struct da_softc *softc;
1938 char tmpstr[80], tmpstr2[80];
1939 struct ccb_trans_settings cts;
1941 periph = (struct cam_periph *)context;
1943 * periph was held for us when this task was enqueued
1945 if (periph->flags & CAM_PERIPH_INVALID) {
1946 cam_periph_release(periph);
1950 softc = (struct da_softc *)periph->softc;
1951 snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number);
1952 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
1954 sysctl_ctx_init(&softc->sysctl_ctx);
1955 softc->flags |= DA_FLAG_SCTX_INIT;
1956 softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx,
1957 SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2,
1958 CTLFLAG_RD, 0, tmpstr, "device_index");
1959 if (softc->sysctl_tree == NULL) {
1960 printf("dasysctlinit: unable to allocate sysctl tree\n");
1961 cam_periph_release(periph);
1966 * Now register the sysctl handler, so the user can change the value on
1969 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1970 OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RWTUN,
1971 softc, 0, dadeletemethodsysctl, "A",
1972 "BIO_DELETE execution method");
1973 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1974 OID_AUTO, "delete_max", CTLTYPE_U64 | CTLFLAG_RW,
1975 softc, 0, dadeletemaxsysctl, "Q",
1976 "Maximum BIO_DELETE size");
1977 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1978 OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW,
1979 &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
1980 "Minimum CDB size");
1982 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1983 OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD,
1984 softc, 0, dazonemodesysctl, "A",
1986 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1987 OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD,
1988 softc, 0, dazonesupsysctl, "A",
1990 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
1991 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
1992 "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones,
1993 "Optimal Number of Open Sequential Write Preferred Zones");
1994 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
1995 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
1996 "optimal_nonseq_zones", CTLFLAG_RD,
1997 &softc->optimal_nonseq_zones,
1998 "Optimal Number of Non-Sequentially Written Sequential Write "
2000 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2001 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2002 "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones,
2003 "Maximum Number of Open Sequential Write Required Zones");
2005 SYSCTL_ADD_INT(&softc->sysctl_ctx,
2006 SYSCTL_CHILDREN(softc->sysctl_tree),
2010 &softc->error_inject,
2012 "error_inject leaf");
2014 SYSCTL_ADD_INT(&softc->sysctl_ctx,
2015 SYSCTL_CHILDREN(softc->sysctl_tree),
2021 "Unmapped I/O leaf");
2023 SYSCTL_ADD_INT(&softc->sysctl_ctx,
2024 SYSCTL_CHILDREN(softc->sysctl_tree),
2033 * Add some addressing info.
2035 memset(&cts, 0, sizeof (cts));
2036 xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE);
2037 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
2038 cts.type = CTS_TYPE_CURRENT_SETTINGS;
2039 cam_periph_lock(periph);
2040 xpt_action((union ccb *)&cts);
2041 cam_periph_unlock(periph);
2042 if (cts.ccb_h.status != CAM_REQ_CMP) {
2043 cam_periph_release(periph);
2046 if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) {
2047 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
2048 if (fc->valid & CTS_FC_VALID_WWPN) {
2049 softc->wwpn = fc->wwpn;
2050 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2051 SYSCTL_CHILDREN(softc->sysctl_tree),
2052 OID_AUTO, "wwpn", CTLFLAG_RD,
2053 &softc->wwpn, "World Wide Port Name");
2059 * Now add some useful stats.
2060 * XXX These should live in cam_periph and be common to all periphs
2062 softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx,
2063 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats",
2064 CTLFLAG_RD, 0, "Statistics");
2065 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2066 SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2072 "Transport errors reported by the SIM");
2073 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2074 SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2080 "Device timeouts reported by the SIM");
2081 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2082 SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2084 "pack_invalidations",
2086 &softc->invalidations,
2088 "Device pack invalidations");
2091 cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx,
2092 softc->sysctl_tree);
2094 cam_periph_release(periph);
2098 dadeletemaxsysctl(SYSCTL_HANDLER_ARGS)
2102 struct da_softc *softc;
2104 softc = (struct da_softc *)arg1;
2106 value = softc->disk->d_delmaxsize;
2107 error = sysctl_handle_64(oidp, &value, 0, req);
2108 if ((error != 0) || (req->newptr == NULL))
2111 /* only accept values smaller than the calculated value */
2112 if (value > dadeletemaxsize(softc, softc->delete_method)) {
2115 softc->disk->d_delmaxsize = value;
2121 dacmdsizesysctl(SYSCTL_HANDLER_ARGS)
2125 value = *(int *)arg1;
2127 error = sysctl_handle_int(oidp, &value, 0, req);
2130 || (req->newptr == NULL))
2134 * Acceptable values here are 6, 10, 12 or 16.
2138 else if ((value > 6)
2141 else if ((value > 10)
2144 else if (value > 12)
2147 *(int *)arg1 = value;
2153 dasysctlsofttimeout(SYSCTL_HANDLER_ARGS)
2158 value = da_default_softtimeout / SBT_1MS;
2160 error = sysctl_handle_int(oidp, (int *)&value, 0, req);
2161 if ((error != 0) || (req->newptr == NULL))
2164 /* XXX Should clip this to a reasonable level */
2165 if (value > da_default_timeout * 1000)
2168 da_default_softtimeout = value * SBT_1MS;
2173 dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method)
2176 softc->delete_method = delete_method;
2177 softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method);
2178 softc->delete_func = da_delete_functions[delete_method];
2180 if (softc->delete_method > DA_DELETE_DISABLE)
2181 softc->disk->d_flags |= DISKFLAG_CANDELETE;
2183 softc->disk->d_flags &= ~DISKFLAG_CANDELETE;
2187 dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method)
2191 switch(delete_method) {
2192 case DA_DELETE_UNMAP:
2193 sectors = (off_t)softc->unmap_max_lba;
2195 case DA_DELETE_ATA_TRIM:
2196 sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges;
2198 case DA_DELETE_WS16:
2199 sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS);
2201 case DA_DELETE_ZERO:
2202 case DA_DELETE_WS10:
2203 sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS);
2209 return (off_t)softc->params.secsize *
2210 omin(sectors, softc->params.sectors);
2214 daprobedone(struct cam_periph *periph, union ccb *ccb)
2216 struct da_softc *softc;
2218 softc = (struct da_softc *)periph->softc;
2220 dadeletemethodchoose(softc, DA_DELETE_NONE);
2222 if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2226 snprintf(buf, sizeof(buf), "Delete methods: <");
2228 for (i = 0; i <= DA_DELETE_MAX; i++) {
2229 if ((softc->delete_available & (1 << i)) == 0 &&
2230 i != softc->delete_method)
2233 strlcat(buf, ",", sizeof(buf));
2234 strlcat(buf, da_delete_method_names[i],
2236 if (i == softc->delete_method)
2237 strlcat(buf, "(*)", sizeof(buf));
2240 strlcat(buf, ">", sizeof(buf));
2241 printf("%s%d: %s\n", periph->periph_name,
2242 periph->unit_number, buf);
2246 * Since our peripheral may be invalidated by an error
2247 * above or an external event, we must release our CCB
2248 * before releasing the probe lock on the peripheral.
2249 * The peripheral will only go away once the last lock
2250 * is removed, and we need it around for the CCB release
2253 xpt_release_ccb(ccb);
2254 softc->state = DA_STATE_NORMAL;
2255 softc->flags |= DA_FLAG_PROBED;
2257 wakeup(&softc->disk->d_mediasize);
2258 if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2259 softc->flags |= DA_FLAG_ANNOUNCED;
2260 cam_periph_unhold(periph);
2262 cam_periph_release_locked(periph);
2266 dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method)
2270 /* If available, prefer the method requested by user. */
2271 i = softc->delete_method_pref;
2272 methods = softc->delete_available | (1 << DA_DELETE_DISABLE);
2273 if (methods & (1 << i)) {
2274 dadeletemethodset(softc, i);
2278 /* Use the pre-defined order to choose the best performing delete. */
2279 for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) {
2280 if (i == DA_DELETE_ZERO)
2282 if (softc->delete_available & (1 << i)) {
2283 dadeletemethodset(softc, i);
2288 /* Fallback to default. */
2289 dadeletemethodset(softc, default_method);
2293 dadeletemethodsysctl(SYSCTL_HANDLER_ARGS)
2297 struct da_softc *softc;
2298 int i, error, methods, value;
2300 softc = (struct da_softc *)arg1;
2302 value = softc->delete_method;
2303 if (value < 0 || value > DA_DELETE_MAX)
2306 p = da_delete_method_names[value];
2307 strncpy(buf, p, sizeof(buf));
2308 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
2309 if (error != 0 || req->newptr == NULL)
2311 methods = softc->delete_available | (1 << DA_DELETE_DISABLE);
2312 for (i = 0; i <= DA_DELETE_MAX; i++) {
2313 if (strcmp(buf, da_delete_method_names[i]) == 0)
2316 if (i > DA_DELETE_MAX)
2318 softc->delete_method_pref = i;
2319 dadeletemethodchoose(softc, DA_DELETE_NONE);
2324 dazonemodesysctl(SYSCTL_HANDLER_ARGS)
2327 struct da_softc *softc;
2330 softc = (struct da_softc *)arg1;
2332 switch (softc->zone_mode) {
2333 case DA_ZONE_DRIVE_MANAGED:
2334 snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed");
2336 case DA_ZONE_HOST_AWARE:
2337 snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware");
2339 case DA_ZONE_HOST_MANAGED:
2340 snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed");
2344 snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned");
2348 error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req);
2354 dazonesupsysctl(SYSCTL_HANDLER_ARGS)
2357 struct da_softc *softc;
2362 softc = (struct da_softc *)arg1;
2366 sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0);
2368 for (i = 0; i < sizeof(da_zone_desc_table) /
2369 sizeof(da_zone_desc_table[0]); i++) {
2370 if (softc->zone_flags & da_zone_desc_table[i].value) {
2372 sbuf_printf(&sb, ", ");
2375 sbuf_cat(&sb, da_zone_desc_table[i].desc);
2380 sbuf_printf(&sb, "None");
2384 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
2390 daregister(struct cam_periph *periph, void *arg)
2392 struct da_softc *softc;
2393 struct ccb_pathinq cpi;
2394 struct ccb_getdev *cgd;
2398 cgd = (struct ccb_getdev *)arg;
2400 printf("daregister: no getdev CCB, can't register device\n");
2401 return(CAM_REQ_CMP_ERR);
2404 softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF,
2407 if (softc == NULL) {
2408 printf("daregister: Unable to probe new device. "
2409 "Unable to allocate softc\n");
2410 return(CAM_REQ_CMP_ERR);
2413 if (cam_iosched_init(&softc->cam_iosched, periph) != 0) {
2414 printf("daregister: Unable to probe new device. "
2415 "Unable to allocate iosched memory\n");
2416 free(softc, M_DEVBUF);
2417 return(CAM_REQ_CMP_ERR);
2420 LIST_INIT(&softc->pending_ccbs);
2421 softc->state = DA_STATE_PROBE_RC;
2422 bioq_init(&softc->delete_run_queue);
2423 if (SID_IS_REMOVABLE(&cgd->inq_data))
2424 softc->flags |= DA_FLAG_PACK_REMOVABLE;
2425 softc->unmap_max_ranges = UNMAP_MAX_RANGES;
2426 softc->unmap_max_lba = UNMAP_RANGE_MAX;
2427 softc->unmap_gran = 0;
2428 softc->unmap_gran_align = 0;
2429 softc->ws_max_blks = WS16_MAX_BLKS;
2430 softc->trim_max_ranges = ATA_TRIM_MAX_RANGES;
2431 softc->rotating = 1;
2433 periph->softc = softc;
2436 * See if this device has any quirks.
2438 match = cam_quirkmatch((caddr_t)&cgd->inq_data,
2439 (caddr_t)da_quirk_table,
2440 nitems(da_quirk_table),
2441 sizeof(*da_quirk_table), scsi_inquiry_match);
2444 softc->quirks = ((struct da_quirk_entry *)match)->quirks;
2446 softc->quirks = DA_Q_NONE;
2448 /* Check if the SIM does not want 6 byte commands */
2449 bzero(&cpi, sizeof(cpi));
2450 xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
2451 cpi.ccb_h.func_code = XPT_PATH_INQ;
2452 xpt_action((union ccb *)&cpi);
2453 if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE))
2454 softc->quirks |= DA_Q_NO_6_BYTE;
2456 if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM)
2457 softc->zone_mode = DA_ZONE_HOST_MANAGED;
2458 else if (softc->quirks & DA_Q_SMR_DM)
2459 softc->zone_mode = DA_ZONE_DRIVE_MANAGED;
2461 softc->zone_mode = DA_ZONE_NONE;
2463 if (softc->zone_mode != DA_ZONE_NONE) {
2464 if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
2465 if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC))
2466 softc->zone_interface = DA_ZONE_IF_ATA_SAT;
2468 softc->zone_interface = DA_ZONE_IF_ATA_PASS;
2470 softc->zone_interface = DA_ZONE_IF_SCSI;
2473 TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph);
2476 * Take an exclusive refcount on the periph while dastart is called
2477 * to finish the probe. The reference will be dropped in dadone at
2480 (void)cam_periph_hold(periph, PRIBIO);
2483 * Schedule a periodic event to occasionally send an
2484 * ordered tag to a device.
2486 callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0);
2487 callout_reset(&softc->sendordered_c,
2488 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL,
2489 dasendorderedtag, softc);
2491 cam_periph_unlock(periph);
2493 * RBC devices don't have to support READ(6), only READ(10).
2495 if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC)
2496 softc->minimum_cmd_size = 10;
2498 softc->minimum_cmd_size = 6;
2501 * Load the user's default, if any.
2503 snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size",
2504 periph->unit_number);
2505 TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size);
2508 * 6, 10, 12 and 16 are the currently permissible values.
2510 if (softc->minimum_cmd_size > 12)
2511 softc->minimum_cmd_size = 16;
2512 else if (softc->minimum_cmd_size > 10)
2513 softc->minimum_cmd_size = 12;
2514 else if (softc->minimum_cmd_size > 6)
2515 softc->minimum_cmd_size = 10;
2517 softc->minimum_cmd_size = 6;
2519 /* Predict whether device may support READ CAPACITY(16). */
2520 if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 &&
2521 (softc->quirks & DA_Q_NO_RC16) == 0) {
2522 softc->flags |= DA_FLAG_CAN_RC16;
2523 softc->state = DA_STATE_PROBE_RC16;
2527 * Register this media as a disk.
2529 softc->disk = disk_alloc();
2530 softc->disk->d_devstat = devstat_new_entry(periph->periph_name,
2531 periph->unit_number, 0,
2532 DEVSTAT_BS_UNAVAILABLE,
2533 SID_TYPE(&cgd->inq_data) |
2534 XPORT_DEVSTAT_TYPE(cpi.transport),
2535 DEVSTAT_PRIORITY_DISK);
2536 softc->disk->d_open = daopen;
2537 softc->disk->d_close = daclose;
2538 softc->disk->d_strategy = dastrategy;
2539 softc->disk->d_dump = dadump;
2540 softc->disk->d_getattr = dagetattr;
2541 softc->disk->d_gone = dadiskgonecb;
2542 softc->disk->d_name = "da";
2543 softc->disk->d_drv1 = periph;
2545 softc->maxio = DFLTPHYS; /* traditional default */
2546 else if (cpi.maxio > MAXPHYS)
2547 softc->maxio = MAXPHYS; /* for safety */
2549 softc->maxio = cpi.maxio;
2550 softc->disk->d_maxsize = softc->maxio;
2551 softc->disk->d_unit = periph->unit_number;
2552 softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE;
2553 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0)
2554 softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
2555 if ((cpi.hba_misc & PIM_UNMAPPED) != 0) {
2556 softc->unmappedio = 1;
2557 softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
2559 cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor,
2560 sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr));
2561 strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr));
2562 cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)],
2563 cgd->inq_data.product, sizeof(cgd->inq_data.product),
2564 sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr));
2565 softc->disk->d_hba_vendor = cpi.hba_vendor;
2566 softc->disk->d_hba_device = cpi.hba_device;
2567 softc->disk->d_hba_subvendor = cpi.hba_subvendor;
2568 softc->disk->d_hba_subdevice = cpi.hba_subdevice;
2571 * Acquire a reference to the periph before we register with GEOM.
2572 * We'll release this reference once GEOM calls us back (via
2573 * dadiskgonecb()) telling us that our provider has been freed.
2575 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
2576 xpt_print(periph->path, "%s: lost periph during "
2577 "registration!\n", __func__);
2578 cam_periph_lock(periph);
2579 return (CAM_REQ_CMP_ERR);
2582 disk_create(softc->disk, DISK_VERSION);
2583 cam_periph_lock(periph);
2586 * Add async callbacks for events of interest.
2587 * I don't bother checking if this fails as,
2588 * in most cases, the system will function just
2589 * fine without them and the only alternative
2590 * would be to not attach the device on failure.
2592 xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE |
2593 AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION |
2594 AC_INQ_CHANGED, daasync, periph, periph->path);
2597 * Emit an attribute changed notification just in case
2598 * physical path information arrived before our async
2599 * event handler was registered, but after anyone attaching
2600 * to our disk device polled it.
2602 disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT);
2605 * Schedule a periodic media polling events.
2607 callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0);
2608 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) &&
2609 (cgd->inq_flags & SID_AEN) == 0 &&
2610 da_poll_period != 0)
2611 callout_reset(&softc->mediapoll_c, da_poll_period * hz,
2612 damediapoll, periph);
2614 xpt_schedule(periph, CAM_PRIORITY_DEV);
2616 return(CAM_REQ_CMP);
2620 da_zone_bio_to_scsi(int disk_zone_cmd)
2622 switch (disk_zone_cmd) {
2623 case DISK_ZONE_OPEN:
2624 return ZBC_OUT_SA_OPEN;
2625 case DISK_ZONE_CLOSE:
2626 return ZBC_OUT_SA_CLOSE;
2627 case DISK_ZONE_FINISH:
2628 return ZBC_OUT_SA_FINISH;
2630 return ZBC_OUT_SA_RWP;
2637 da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp,
2640 struct da_softc *softc;
2645 if (bp->bio_cmd != BIO_ZONE) {
2650 softc = periph->softc;
2652 switch (bp->bio_zone.zone_cmd) {
2653 case DISK_ZONE_OPEN:
2654 case DISK_ZONE_CLOSE:
2655 case DISK_ZONE_FINISH:
2656 case DISK_ZONE_RWP: {
2661 zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd);
2662 if (zone_sa == -1) {
2663 xpt_print(periph->path, "Cannot translate zone "
2664 "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd);
2670 lba = bp->bio_zone.zone_params.rwp.id;
2672 if (bp->bio_zone.zone_params.rwp.flags &
2673 DISK_ZONE_RWP_FLAG_ALL)
2674 zone_flags |= ZBC_OUT_ALL;
2676 if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
2677 scsi_zbc_out(&ccb->csio,
2678 /*retries*/ da_retry_count,
2680 /*tag_action*/ MSG_SIMPLE_Q_TAG,
2681 /*service_action*/ zone_sa,
2683 /*zone_flags*/ zone_flags,
2686 /*sense_len*/ SSD_FULL_SIZE,
2687 /*timeout*/ da_default_timeout * 1000);
2690 * Note that in this case, even though we can
2691 * technically use NCQ, we don't bother for several
2693 * 1. It hasn't been tested on a SAT layer that
2694 * supports it. This is new as of SAT-4.
2695 * 2. Even when there is a SAT layer that supports
2696 * it, that SAT layer will also probably support
2697 * ZBC -> ZAC translation, since they are both
2698 * in the SAT-4 spec.
2699 * 3. Translation will likely be preferable to ATA
2700 * passthrough. LSI / Avago at least single
2701 * steps ATA passthrough commands in the HBA,
2702 * regardless of protocol, so unless that
2703 * changes, there is a performance penalty for
2704 * doing ATA passthrough no matter whether
2705 * you're using NCQ/FPDMA, DMA or PIO.
2706 * 4. It requires a 32-byte CDB, which at least at
2707 * this point in CAM requires a CDB pointer, which
2708 * would require us to allocate an additional bit
2709 * of storage separate from the CCB.
2711 error = scsi_ata_zac_mgmt_out(&ccb->csio,
2712 /*retries*/ da_retry_count,
2714 /*tag_action*/ MSG_SIMPLE_Q_TAG,
2716 /*zm_action*/ zone_sa,
2718 /*zone_flags*/ zone_flags,
2721 /*cdb_storage*/ NULL,
2722 /*cdb_storage_len*/ 0,
2723 /*sense_len*/ SSD_FULL_SIZE,
2724 /*timeout*/ da_default_timeout * 1000);
2727 xpt_print(periph->path,
2728 "scsi_ata_zac_mgmt_out() returned an "
2737 case DISK_ZONE_REPORT_ZONES: {
2739 uint32_t num_entries, alloc_size;
2740 struct disk_zone_report *rep;
2742 rep = &bp->bio_zone.zone_params.report;
2744 num_entries = rep->entries_allocated;
2745 if (num_entries == 0) {
2746 xpt_print(periph->path, "No entries allocated for "
2747 "Report Zones request\n");
2751 alloc_size = sizeof(struct scsi_report_zones_hdr) +
2752 (sizeof(struct scsi_report_zones_desc) * num_entries);
2753 alloc_size = min(alloc_size, softc->disk->d_maxsize);
2754 rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO);
2755 if (rz_ptr == NULL) {
2756 xpt_print(periph->path, "Unable to allocate memory "
2757 "for Report Zones request\n");
2762 if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
2763 scsi_zbc_in(&ccb->csio,
2764 /*retries*/ da_retry_count,
2766 /*tag_action*/ MSG_SIMPLE_Q_TAG,
2767 /*service_action*/ ZBC_IN_SA_REPORT_ZONES,
2768 /*zone_start_lba*/ rep->starting_id,
2769 /*zone_options*/ rep->rep_options,
2770 /*data_ptr*/ rz_ptr,
2771 /*dxfer_len*/ alloc_size,
2772 /*sense_len*/ SSD_FULL_SIZE,
2773 /*timeout*/ da_default_timeout * 1000);
2776 * Note that in this case, even though we can
2777 * technically use NCQ, we don't bother for several
2779 * 1. It hasn't been tested on a SAT layer that
2780 * supports it. This is new as of SAT-4.
2781 * 2. Even when there is a SAT layer that supports
2782 * it, that SAT layer will also probably support
2783 * ZBC -> ZAC translation, since they are both
2784 * in the SAT-4 spec.
2785 * 3. Translation will likely be preferable to ATA
2786 * passthrough. LSI / Avago at least single
2787 * steps ATA passthrough commands in the HBA,
2788 * regardless of protocol, so unless that
2789 * changes, there is a performance penalty for
2790 * doing ATA passthrough no matter whether
2791 * you're using NCQ/FPDMA, DMA or PIO.
2792 * 4. It requires a 32-byte CDB, which at least at
2793 * this point in CAM requires a CDB pointer, which
2794 * would require us to allocate an additional bit
2795 * of storage separate from the CCB.
2797 error = scsi_ata_zac_mgmt_in(&ccb->csio,
2798 /*retries*/ da_retry_count,
2800 /*tag_action*/ MSG_SIMPLE_Q_TAG,
2802 /*zm_action*/ ATA_ZM_REPORT_ZONES,
2803 /*zone_id*/ rep->starting_id,
2804 /*zone_flags*/ rep->rep_options,
2805 /*data_ptr*/ rz_ptr,
2806 /*dxfer_len*/ alloc_size,
2807 /*cdb_storage*/ NULL,
2808 /*cdb_storage_len*/ 0,
2809 /*sense_len*/ SSD_FULL_SIZE,
2810 /*timeout*/ da_default_timeout * 1000);
2813 xpt_print(periph->path,
2814 "scsi_ata_zac_mgmt_in() returned an "
2821 * For BIO_ZONE, this isn't normally needed. However, it
2822 * is used by devstat_end_transaction_bio() to determine
2823 * how much data was transferred.
2826 * XXX KDM we have a problem. But I'm not sure how to fix
2827 * it. devstat uses bio_bcount - bio_resid to calculate
2828 * the amount of data transferred. The GEOM disk code
2829 * uses bio_length - bio_resid to calculate the amount of
2830 * data in bio_completed. We have different structure
2831 * sizes above and below the ada(4) driver. So, if we
2832 * use the sizes above, the amount transferred won't be
2833 * quite accurate for devstat. If we use different sizes
2834 * for bio_bcount and bio_length (above and below
2835 * respectively), then the residual needs to match one or
2836 * the other. Everything is calculated after the bio
2837 * leaves the driver, so changing the values around isn't
2838 * really an option. For now, just set the count to the
2839 * passed in length. This means that the calculations
2840 * above (e.g. bio_completed) will be correct, but the
2841 * amount of data reported to devstat will be slightly
2842 * under or overstated.
2844 bp->bio_bcount = bp->bio_length;
2850 case DISK_ZONE_GET_PARAMS: {
2851 struct disk_zone_disk_params *params;
2853 params = &bp->bio_zone.zone_params.disk_params;
2854 bzero(params, sizeof(*params));
2856 switch (softc->zone_mode) {
2857 case DA_ZONE_DRIVE_MANAGED:
2858 params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED;
2860 case DA_ZONE_HOST_AWARE:
2861 params->zone_mode = DISK_ZONE_MODE_HOST_AWARE;
2863 case DA_ZONE_HOST_MANAGED:
2864 params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED;
2868 params->zone_mode = DISK_ZONE_MODE_NONE;
2872 if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ)
2873 params->flags |= DISK_ZONE_DISK_URSWRZ;
2875 if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) {
2876 params->optimal_seq_zones = softc->optimal_seq_zones;
2877 params->flags |= DISK_ZONE_OPT_SEQ_SET;
2880 if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) {
2881 params->optimal_nonseq_zones =
2882 softc->optimal_nonseq_zones;
2883 params->flags |= DISK_ZONE_OPT_NONSEQ_SET;
2886 if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) {
2887 params->max_seq_zones = softc->max_seq_zones;
2888 params->flags |= DISK_ZONE_MAX_SEQ_SET;
2890 if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP)
2891 params->flags |= DISK_ZONE_RZ_SUP;
2893 if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP)
2894 params->flags |= DISK_ZONE_OPEN_SUP;
2896 if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP)
2897 params->flags |= DISK_ZONE_CLOSE_SUP;
2899 if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP)
2900 params->flags |= DISK_ZONE_FINISH_SUP;
2902 if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP)
2903 params->flags |= DISK_ZONE_RWP_SUP;
2914 dastart(struct cam_periph *periph, union ccb *start_ccb)
2916 struct da_softc *softc;
2918 softc = (struct da_softc *)periph->softc;
2920 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n"));
2923 switch (softc->state) {
2924 case DA_STATE_NORMAL:
2930 bp = cam_iosched_next_bio(softc->cam_iosched);
2932 if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) {
2933 cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR);
2934 scsi_test_unit_ready(&start_ccb->csio,
2935 /*retries*/ da_retry_count,
2939 da_default_timeout * 1000);
2940 start_ccb->ccb_h.ccb_bp = NULL;
2941 start_ccb->ccb_h.ccb_state = DA_CCB_TUR;
2942 xpt_action(start_ccb);
2944 xpt_release_ccb(start_ccb);
2948 if (bp->bio_cmd == BIO_DELETE) {
2949 if (softc->delete_func != NULL) {
2950 softc->delete_func(periph, start_ccb, bp);
2953 /* Not sure this is possible, but failsafe by lying and saying "sure, done." */
2954 biofinish(bp, NULL, 0);
2959 if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) {
2960 cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR);
2961 cam_periph_release_locked(periph); /* XXX is this still valid? I think so but unverified */
2964 if ((bp->bio_flags & BIO_ORDERED) != 0 ||
2965 (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
2966 softc->flags &= ~DA_FLAG_NEED_OTAG;
2967 softc->flags |= DA_FLAG_WAS_OTAG;
2968 tag_code = MSG_ORDERED_Q_TAG;
2970 tag_code = MSG_SIMPLE_Q_TAG;
2973 switch (bp->bio_cmd) {
2980 biotrack(bp, __func__);
2982 if (bp->bio_cmd == BIO_WRITE) {
2983 softc->flags |= DA_FLAG_DIRTY;
2984 rw_op = SCSI_RW_WRITE;
2986 rw_op = SCSI_RW_READ;
2989 data_ptr = bp->bio_data;
2990 if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) {
2991 rw_op |= SCSI_RW_BIO;
2995 scsi_read_write(&start_ccb->csio,
2996 /*retries*/da_retry_count,
2998 /*tag_action*/tag_code,
3001 softc->minimum_cmd_size,
3002 /*lba*/bp->bio_pblkno,
3003 /*block_count*/bp->bio_bcount /
3004 softc->params.secsize,
3006 /*dxfer_len*/ bp->bio_bcount,
3007 /*sense_len*/SSD_FULL_SIZE,
3008 da_default_timeout * 1000);
3009 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
3010 start_ccb->csio.bio = bp;
3016 * BIO_FLUSH doesn't currently communicate
3017 * range data, so we synchronize the cache
3018 * over the whole disk. We also force
3019 * ordered tag semantics the flush applies
3020 * to all previously queued I/O.
3022 scsi_synchronize_cache(&start_ccb->csio,
3029 da_default_timeout*1000);
3032 int error, queue_ccb;
3036 error = da_zone_cmd(periph, start_ccb, bp,&queue_ccb);
3038 || (queue_ccb == 0)) {
3039 biofinish(bp, NULL, error);
3040 xpt_release_ccb(start_ccb);
3046 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
3047 start_ccb->ccb_h.flags |= CAM_UNLOCKED;
3048 start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout);
3051 LIST_INSERT_HEAD(&softc->pending_ccbs,
3052 &start_ccb->ccb_h, periph_links.le);
3054 /* We expect a unit attention from this device */
3055 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
3056 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
3057 softc->flags &= ~DA_FLAG_RETRY_UA;
3060 start_ccb->ccb_h.ccb_bp = bp;
3062 cam_periph_unlock(periph);
3063 xpt_action(start_ccb);
3064 cam_periph_lock(periph);
3067 /* May have more work to do, so ensure we stay scheduled */
3071 case DA_STATE_PROBE_RC:
3073 struct scsi_read_capacity_data *rcap;
3075 rcap = (struct scsi_read_capacity_data *)
3076 malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO);
3078 printf("dastart: Couldn't malloc read_capacity data\n");
3079 /* da_free_periph??? */
3082 scsi_read_capacity(&start_ccb->csio,
3083 /*retries*/da_retry_count,
3089 start_ccb->ccb_h.ccb_bp = NULL;
3090 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC;
3091 xpt_action(start_ccb);
3094 case DA_STATE_PROBE_RC16:
3096 struct scsi_read_capacity_data_long *rcaplong;
3098 rcaplong = (struct scsi_read_capacity_data_long *)
3099 malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO);
3100 if (rcaplong == NULL) {
3101 printf("dastart: Couldn't malloc read_capacity data\n");
3102 /* da_free_periph??? */
3105 scsi_read_capacity_16(&start_ccb->csio,
3106 /*retries*/ da_retry_count,
3108 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3112 /*rcap_buf*/ (uint8_t *)rcaplong,
3113 /*rcap_buf_len*/ sizeof(*rcaplong),
3114 /*sense_len*/ SSD_FULL_SIZE,
3115 /*timeout*/ da_default_timeout * 1000);
3116 start_ccb->ccb_h.ccb_bp = NULL;
3117 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16;
3118 xpt_action(start_ccb);
3121 case DA_STATE_PROBE_LBP:
3123 struct scsi_vpd_logical_block_prov *lbp;
3125 if (!scsi_vpd_supported_page(periph, SVPD_LBP)) {
3127 * If we get here we don't support any SBC-3 delete
3128 * methods with UNMAP as the Logical Block Provisioning
3129 * VPD page support is required for devices which
3130 * support it according to T10/1799-D Revision 31
3131 * however older revisions of the spec don't mandate
3132 * this so we currently don't remove these methods
3133 * from the available set.
3135 softc->state = DA_STATE_PROBE_BLK_LIMITS;
3139 lbp = (struct scsi_vpd_logical_block_prov *)
3140 malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO);
3143 printf("dastart: Couldn't malloc lbp data\n");
3144 /* da_free_periph??? */
3148 scsi_inquiry(&start_ccb->csio,
3149 /*retries*/da_retry_count,
3151 /*tag_action*/MSG_SIMPLE_Q_TAG,
3152 /*inq_buf*/(u_int8_t *)lbp,
3153 /*inq_len*/sizeof(*lbp),
3155 /*page_code*/SVPD_LBP,
3156 /*sense_len*/SSD_MIN_SIZE,
3157 /*timeout*/da_default_timeout * 1000);
3158 start_ccb->ccb_h.ccb_bp = NULL;
3159 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP;
3160 xpt_action(start_ccb);
3163 case DA_STATE_PROBE_BLK_LIMITS:
3165 struct scsi_vpd_block_limits *block_limits;
3167 if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) {
3168 /* Not supported skip to next probe */
3169 softc->state = DA_STATE_PROBE_BDC;
3173 block_limits = (struct scsi_vpd_block_limits *)
3174 malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO);
3176 if (block_limits == NULL) {
3177 printf("dastart: Couldn't malloc block_limits data\n");
3178 /* da_free_periph??? */
3182 scsi_inquiry(&start_ccb->csio,
3183 /*retries*/da_retry_count,
3185 /*tag_action*/MSG_SIMPLE_Q_TAG,
3186 /*inq_buf*/(u_int8_t *)block_limits,
3187 /*inq_len*/sizeof(*block_limits),
3189 /*page_code*/SVPD_BLOCK_LIMITS,
3190 /*sense_len*/SSD_MIN_SIZE,
3191 /*timeout*/da_default_timeout * 1000);
3192 start_ccb->ccb_h.ccb_bp = NULL;
3193 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS;
3194 xpt_action(start_ccb);
3197 case DA_STATE_PROBE_BDC:
3199 struct scsi_vpd_block_characteristics *bdc;
3201 if (!scsi_vpd_supported_page(periph, SVPD_BDC)) {
3202 softc->state = DA_STATE_PROBE_ATA;
3206 bdc = (struct scsi_vpd_block_characteristics *)
3207 malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
3210 printf("dastart: Couldn't malloc bdc data\n");
3211 /* da_free_periph??? */
3215 scsi_inquiry(&start_ccb->csio,
3216 /*retries*/da_retry_count,
3218 /*tag_action*/MSG_SIMPLE_Q_TAG,
3219 /*inq_buf*/(u_int8_t *)bdc,
3220 /*inq_len*/sizeof(*bdc),
3222 /*page_code*/SVPD_BDC,
3223 /*sense_len*/SSD_MIN_SIZE,
3224 /*timeout*/da_default_timeout * 1000);
3225 start_ccb->ccb_h.ccb_bp = NULL;
3226 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC;
3227 xpt_action(start_ccb);
3230 case DA_STATE_PROBE_ATA:
3232 struct ata_params *ata_params;
3234 if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
3235 if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
3236 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
3238 * Note that if the ATA VPD page isn't
3239 * supported, we aren't talking to an ATA
3240 * device anyway. Support for that VPD
3241 * page is mandatory for SCSI to ATA (SAT)
3242 * translation layers.
3244 softc->state = DA_STATE_PROBE_ZONE;
3247 daprobedone(periph, start_ccb);
3251 ata_params = (struct ata_params*)
3252 malloc(sizeof(*ata_params), M_SCSIDA,M_NOWAIT|M_ZERO);
3254 if (ata_params == NULL) {
3255 xpt_print(periph->path, "Couldn't malloc ata_params "
3257 /* da_free_periph??? */
3261 scsi_ata_identify(&start_ccb->csio,
3262 /*retries*/da_retry_count,
3264 /*tag_action*/MSG_SIMPLE_Q_TAG,
3265 /*data_ptr*/(u_int8_t *)ata_params,
3266 /*dxfer_len*/sizeof(*ata_params),
3267 /*sense_len*/SSD_FULL_SIZE,
3268 /*timeout*/da_default_timeout * 1000);
3269 start_ccb->ccb_h.ccb_bp = NULL;
3270 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA;
3271 xpt_action(start_ccb);
3274 case DA_STATE_PROBE_ATA_LOGDIR:
3276 struct ata_gp_log_dir *log_dir;
3281 if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) {
3283 * If we don't have log support, not much point in
3284 * trying to probe zone support.
3286 daprobedone(periph, start_ccb);
3291 * If we have an ATA device (the SCSI ATA Information VPD
3292 * page should be present and the ATA identify should have
3293 * succeeded) and it supports logs, ask for the log directory.
3296 log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO);
3297 if (log_dir == NULL) {
3298 xpt_print(periph->path, "Couldn't malloc log_dir "
3300 daprobedone(periph, start_ccb);
3304 retval = scsi_ata_read_log(&start_ccb->csio,
3305 /*retries*/ da_retry_count,
3307 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3308 /*log_address*/ ATA_LOG_DIRECTORY,
3311 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3312 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3313 /*data_ptr*/ (uint8_t *)log_dir,
3314 /*dxfer_len*/ sizeof(*log_dir),
3315 /*sense_len*/ SSD_FULL_SIZE,
3316 /*timeout*/ da_default_timeout * 1000);
3319 xpt_print(periph->path, "scsi_ata_read_log() failed!");
3320 free(log_dir, M_SCSIDA);
3321 daprobedone(periph, start_ccb);
3324 start_ccb->ccb_h.ccb_bp = NULL;
3325 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR;
3326 xpt_action(start_ccb);
3329 case DA_STATE_PROBE_ATA_IDDIR:
3331 struct ata_identify_log_pages *id_dir;
3337 * Check here to see whether the Identify Device log is
3338 * supported in the directory of logs. If so, continue
3339 * with requesting the log of identify device pages.
3341 if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) {
3342 daprobedone(periph, start_ccb);
3346 id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO);
3347 if (id_dir == NULL) {
3348 xpt_print(periph->path, "Couldn't malloc id_dir "
3350 daprobedone(periph, start_ccb);
3354 retval = scsi_ata_read_log(&start_ccb->csio,
3355 /*retries*/ da_retry_count,
3357 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3358 /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3359 /*page_number*/ ATA_IDL_PAGE_LIST,
3361 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3362 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3363 /*data_ptr*/ (uint8_t *)id_dir,
3364 /*dxfer_len*/ sizeof(*id_dir),
3365 /*sense_len*/ SSD_FULL_SIZE,
3366 /*timeout*/ da_default_timeout * 1000);
3369 xpt_print(periph->path, "scsi_ata_read_log() failed!");
3370 free(id_dir, M_SCSIDA);
3371 daprobedone(periph, start_ccb);
3374 start_ccb->ccb_h.ccb_bp = NULL;
3375 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR;
3376 xpt_action(start_ccb);
3379 case DA_STATE_PROBE_ATA_SUP:
3381 struct ata_identify_log_sup_cap *sup_cap;
3387 * Check here to see whether the Supported Capabilities log
3388 * is in the list of Identify Device logs.
3390 if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) {
3391 daprobedone(periph, start_ccb);
3395 sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO);
3396 if (sup_cap == NULL) {
3397 xpt_print(periph->path, "Couldn't malloc sup_cap "
3399 daprobedone(periph, start_ccb);
3403 retval = scsi_ata_read_log(&start_ccb->csio,
3404 /*retries*/ da_retry_count,
3406 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3407 /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3408 /*page_number*/ ATA_IDL_SUP_CAP,
3410 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3411 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3412 /*data_ptr*/ (uint8_t *)sup_cap,
3413 /*dxfer_len*/ sizeof(*sup_cap),
3414 /*sense_len*/ SSD_FULL_SIZE,
3415 /*timeout*/ da_default_timeout * 1000);
3418 xpt_print(periph->path, "scsi_ata_read_log() failed!");
3419 free(sup_cap, M_SCSIDA);
3420 daprobedone(periph, start_ccb);
3425 start_ccb->ccb_h.ccb_bp = NULL;
3426 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP;
3427 xpt_action(start_ccb);
3430 case DA_STATE_PROBE_ATA_ZONE:
3432 struct ata_zoned_info_log *ata_zone;
3438 * Check here to see whether the zoned device information
3439 * page is supported. If so, continue on to request it.
3440 * If not, skip to DA_STATE_PROBE_LOG or done.
3442 if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) {
3443 daprobedone(periph, start_ccb);
3446 ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA,
3448 if (ata_zone == NULL) {
3449 xpt_print(periph->path, "Couldn't malloc ata_zone "
3451 daprobedone(periph, start_ccb);
3455 retval = scsi_ata_read_log(&start_ccb->csio,
3456 /*retries*/ da_retry_count,
3458 /*tag_action*/ MSG_SIMPLE_Q_TAG,
3459 /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3460 /*page_number*/ ATA_IDL_ZDI,
3462 /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3463 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3464 /*data_ptr*/ (uint8_t *)ata_zone,
3465 /*dxfer_len*/ sizeof(*ata_zone),
3466 /*sense_len*/ SSD_FULL_SIZE,
3467 /*timeout*/ da_default_timeout * 1000);
3470 xpt_print(periph->path, "scsi_ata_read_log() failed!");
3471 free(ata_zone, M_SCSIDA);
3472 daprobedone(periph, start_ccb);
3475 start_ccb->ccb_h.ccb_bp = NULL;
3476 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE;
3477 xpt_action(start_ccb);
3481 case DA_STATE_PROBE_ZONE:
3483 struct scsi_vpd_zoned_bdc *bdc;
3486 * Note that this page will be supported for SCSI protocol
3487 * devices that support ZBC (SMR devices), as well as ATA
3488 * protocol devices that are behind a SAT (SCSI to ATA
3489 * Translation) layer that supports converting ZBC commands
3490 * to their ZAC equivalents.
3492 if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) {
3493 daprobedone(periph, start_ccb);
3496 bdc = (struct scsi_vpd_zoned_bdc *)
3497 malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
3500 xpt_release_ccb(start_ccb);
3501 xpt_print(periph->path, "Couldn't malloc zone VPD "
3505 scsi_inquiry(&start_ccb->csio,
3506 /*retries*/da_retry_count,
3508 /*tag_action*/MSG_SIMPLE_Q_TAG,
3509 /*inq_buf*/(u_int8_t *)bdc,
3510 /*inq_len*/sizeof(*bdc),
3512 /*page_code*/SVPD_ZONED_BDC,
3513 /*sense_len*/SSD_FULL_SIZE,
3514 /*timeout*/da_default_timeout * 1000);
3515 start_ccb->ccb_h.ccb_bp = NULL;
3516 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE;
3517 xpt_action(start_ccb);
3524 * In each of the methods below, while its the caller's
3525 * responsibility to ensure the request will fit into a
3526 * single device request, we might have changed the delete
3527 * method due to the device incorrectly advertising either
3528 * its supported methods or limits.
3530 * To prevent this causing further issues we validate the
3531 * against the methods limits, and warn which would
3532 * otherwise be unnecessary.
3535 da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
3537 struct da_softc *softc = (struct da_softc *)periph->softc;;
3539 uint8_t *buf = softc->unmap_buf;
3540 struct scsi_unmap_desc *d = (void *)&buf[UNMAP_HEAD_SIZE];
3541 uint64_t lba, lastlba = (uint64_t)-1;
3542 uint64_t totalcount = 0;
3544 uint32_t c, lastcount = 0, ranges = 0;
3547 * Currently this doesn't take the UNMAP
3548 * Granularity and Granularity Alignment
3549 * fields into account.
3551 * This could result in both unoptimal unmap
3552 * requests as as well as UNMAP calls unmapping
3553 * fewer LBA's than requested.
3556 bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
3560 * Note: ada and da are different in how they store the
3561 * pending bp's in a trim. ada stores all of them in the
3562 * trim_req.bps. da stores all but the first one in the
3563 * delete_run_queue. ada then completes all the bps in
3564 * its adadone() loop. da completes all the bps in the
3565 * delete_run_queue in dadone, and relies on the biodone
3566 * after to complete. This should be reconciled since there's
3567 * no real reason to do it differently. XXX
3570 bioq_insert_tail(&softc->delete_run_queue, bp1);
3571 lba = bp1->bio_pblkno;
3572 count = bp1->bio_bcount / softc->params.secsize;
3574 /* Try to extend the previous range. */
3575 if (lba == lastlba) {
3576 c = omin(count, UNMAP_RANGE_MAX - lastcount);
3579 scsi_ulto4b(lastcount, d[ranges - 1].length);
3583 } else if ((softc->quirks & DA_Q_STRICT_UNMAP) &&
3584 softc->unmap_gran != 0) {
3585 /* Align length of the previous range. */
3586 if ((c = lastcount % softc->unmap_gran) != 0) {
3587 if (lastcount <= c) {
3588 totalcount -= lastcount;
3589 lastlba = (uint64_t)-1;
3596 scsi_ulto4b(lastcount, d[ranges - 1].length);
3599 /* Align beginning of the new range. */
3600 c = (lba - softc->unmap_gran_align) % softc->unmap_gran;
3602 c = softc->unmap_gran - c;
3613 c = omin(count, UNMAP_RANGE_MAX);
3614 if (totalcount + c > softc->unmap_max_lba ||
3615 ranges >= softc->unmap_max_ranges) {
3616 xpt_print(periph->path,
3617 "%s issuing short delete %ld > %ld"
3619 da_delete_method_desc[softc->delete_method],
3620 totalcount + c, softc->unmap_max_lba,
3621 ranges, softc->unmap_max_ranges);
3624 scsi_u64to8b(lba, d[ranges].lba);
3625 scsi_ulto4b(c, d[ranges].length);
3633 bp1 = cam_iosched_next_trim(softc->cam_iosched);
3636 if (ranges >= softc->unmap_max_ranges ||
3637 totalcount + bp1->bio_bcount /
3638 softc->params.secsize > softc->unmap_max_lba) {
3639 cam_iosched_put_back_trim(softc->cam_iosched, bp1);
3644 /* Align length of the last range. */
3645 if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0 &&
3646 (c = lastcount % softc->unmap_gran) != 0) {
3650 scsi_ulto4b(lastcount - c, d[ranges - 1].length);
3653 scsi_ulto2b(ranges * 16 + 6, &buf[0]);
3654 scsi_ulto2b(ranges * 16, &buf[2]);
3656 scsi_unmap(&ccb->csio,
3657 /*retries*/da_retry_count,
3659 /*tag_action*/MSG_SIMPLE_Q_TAG,
3662 /*dxfer_len*/ ranges * 16 + 8,
3663 /*sense_len*/SSD_FULL_SIZE,
3664 da_default_timeout * 1000);
3665 ccb->ccb_h.ccb_state = DA_CCB_DELETE;
3666 ccb->ccb_h.flags |= CAM_UNLOCKED;
3667 cam_iosched_submit_trim(softc->cam_iosched);
3671 da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
3673 struct da_softc *softc = (struct da_softc *)periph->softc;
3675 uint8_t *buf = softc->unmap_buf;
3676 uint64_t lastlba = (uint64_t)-1;
3679 uint32_t lastcount = 0, c, requestcount;
3680 int ranges = 0, off, block_count;
3682 bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
3685 if (bp1 != bp)//XXX imp XXX
3686 bioq_insert_tail(&softc->delete_run_queue, bp1);
3687 lba = bp1->bio_pblkno;
3688 count = bp1->bio_bcount / softc->params.secsize;
3689 requestcount = count;
3691 /* Try to extend the previous range. */
3692 if (lba == lastlba) {
3693 c = omin(count, ATA_DSM_RANGE_MAX - lastcount);
3695 off = (ranges - 1) * 8;
3696 buf[off + 6] = lastcount & 0xff;
3697 buf[off + 7] = (lastcount >> 8) & 0xff;
3703 c = omin(count, ATA_DSM_RANGE_MAX);
3706 buf[off + 0] = lba & 0xff;
3707 buf[off + 1] = (lba >> 8) & 0xff;
3708 buf[off + 2] = (lba >> 16) & 0xff;
3709 buf[off + 3] = (lba >> 24) & 0xff;
3710 buf[off + 4] = (lba >> 32) & 0xff;
3711 buf[off + 5] = (lba >> 40) & 0xff;
3712 buf[off + 6] = c & 0xff;
3713 buf[off + 7] = (c >> 8) & 0xff;
3718 if (count != 0 && ranges == softc->trim_max_ranges) {
3719 xpt_print(periph->path,
3720 "%s issuing short delete %ld > %ld\n",
3721 da_delete_method_desc[softc->delete_method],
3723 (softc->trim_max_ranges - ranges) *
3729 bp1 = cam_iosched_next_trim(softc->cam_iosched);
3732 if (bp1->bio_bcount / softc->params.secsize >
3733 (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) {
3734 cam_iosched_put_back_trim(softc->cam_iosched, bp1);
3739 block_count = howmany(ranges, ATA_DSM_BLK_RANGES);
3740 scsi_ata_trim(&ccb->csio,
3741 /*retries*/da_retry_count,
3743 /*tag_action*/MSG_SIMPLE_Q_TAG,
3746 /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE,
3747 /*sense_len*/SSD_FULL_SIZE,
3748 da_default_timeout * 1000);
3749 ccb->ccb_h.ccb_state = DA_CCB_DELETE;
3750 ccb->ccb_h.flags |= CAM_UNLOCKED;
3751 cam_iosched_submit_trim(softc->cam_iosched);
3755 * We calculate ws_max_blks here based off d_delmaxsize instead
3756 * of using softc->ws_max_blks as it is absolute max for the
3757 * device not the protocol max which may well be lower.
3760 da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
3762 struct da_softc *softc;
3764 uint64_t ws_max_blks;
3766 uint64_t count; /* forward compat with WS32 */
3768 softc = (struct da_softc *)periph->softc;
3769 ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize;
3770 lba = bp->bio_pblkno;
3774 if (bp1 != bp)//XXX imp XXX
3775 bioq_insert_tail(&softc->delete_run_queue, bp1);
3776 count += bp1->bio_bcount / softc->params.secsize;
3777 if (count > ws_max_blks) {
3778 xpt_print(periph->path,
3779 "%s issuing short delete %ld > %ld\n",
3780 da_delete_method_desc[softc->delete_method],
3781 count, ws_max_blks);
3782 count = omin(count, ws_max_blks);
3785 bp1 = cam_iosched_next_trim(softc->cam_iosched);
3788 if (lba + count != bp1->bio_pblkno ||
3789 count + bp1->bio_bcount /
3790 softc->params.secsize > ws_max_blks) {
3791 cam_iosched_put_back_trim(softc->cam_iosched, bp1);
3796 scsi_write_same(&ccb->csio,
3797 /*retries*/da_retry_count,
3799 /*tag_action*/MSG_SIMPLE_Q_TAG,
3800 /*byte2*/softc->delete_method ==
3801 DA_DELETE_ZERO ? 0 : SWS_UNMAP,
3802 softc->delete_method == DA_DELETE_WS16 ? 16 : 10,
3804 /*block_count*/count,
3805 /*data_ptr*/ __DECONST(void *, zero_region),
3806 /*dxfer_len*/ softc->params.secsize,
3807 /*sense_len*/SSD_FULL_SIZE,
3808 da_default_timeout * 1000);
3809 ccb->ccb_h.ccb_state = DA_CCB_DELETE;
3810 ccb->ccb_h.flags |= CAM_UNLOCKED;
3811 cam_iosched_submit_trim(softc->cam_iosched);
3815 cmd6workaround(union ccb *ccb)
3817 struct scsi_rw_6 cmd6;
3818 struct scsi_rw_10 *cmd10;
3819 struct da_softc *softc;
3824 cdb = ccb->csio.cdb_io.cdb_bytes;
3825 softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc;
3827 if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) {
3828 da_delete_methods old_method = softc->delete_method;
3831 * Typically there are two reasons for failure here
3832 * 1. Delete method was detected as supported but isn't
3833 * 2. Delete failed due to invalid params e.g. too big
3835 * While we will attempt to choose an alternative delete method
3836 * this may result in short deletes if the existing delete
3837 * requests from geom are big for the new method chosen.
3839 * This method assumes that the error which triggered this
3840 * will not retry the io otherwise a panic will occur
3842 dadeleteflag(softc, old_method, 0);
3843 dadeletemethodchoose(softc, DA_DELETE_DISABLE);
3844 if (softc->delete_method == DA_DELETE_DISABLE)
3845 xpt_print(ccb->ccb_h.path,
3846 "%s failed, disabling BIO_DELETE\n",
3847 da_delete_method_desc[old_method]);
3849 xpt_print(ccb->ccb_h.path,
3850 "%s failed, switching to %s BIO_DELETE\n",
3851 da_delete_method_desc[old_method],
3852 da_delete_method_desc[softc->delete_method]);
3854 while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL)
3855 cam_iosched_queue_work(softc->cam_iosched, bp);
3856 cam_iosched_queue_work(softc->cam_iosched,
3857 (struct bio *)ccb->ccb_h.ccb_bp);
3858 ccb->ccb_h.ccb_bp = NULL;
3862 /* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */
3863 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
3864 (*cdb == PREVENT_ALLOW) &&
3865 (softc->quirks & DA_Q_NO_PREVENT) == 0) {
3867 xpt_print(ccb->ccb_h.path,
3868 "PREVENT ALLOW MEDIUM REMOVAL not supported.\n");
3869 softc->quirks |= DA_Q_NO_PREVENT;
3873 /* Detect unsupported SYNCHRONIZE CACHE(10). */
3874 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
3875 (*cdb == SYNCHRONIZE_CACHE) &&
3876 (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
3878 xpt_print(ccb->ccb_h.path,
3879 "SYNCHRONIZE CACHE(10) not supported.\n");
3880 softc->quirks |= DA_Q_NO_SYNC_CACHE;
3881 softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE;
3885 /* Translation only possible if CDB is an array and cmd is R/W6 */
3886 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 ||
3887 (*cdb != READ_6 && *cdb != WRITE_6))
3890 xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, "
3891 "increasing minimum_cmd_size to 10.\n");
3892 softc->minimum_cmd_size = 10;
3894 bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6));
3895 cmd10 = (struct scsi_rw_10 *)cdb;
3896 cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10;
3898 scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr);
3899 cmd10->reserved = 0;
3900 scsi_ulto2b(cmd6.length, cmd10->length);
3901 cmd10->control = cmd6.control;
3902 ccb->csio.cdb_len = sizeof(*cmd10);
3904 /* Requeue request, unfreezing queue if necessary */
3905 frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
3906 ccb->ccb_h.status = CAM_REQUEUE_REQ;
3909 cam_release_devq(ccb->ccb_h.path,
3913 /*getcount_only*/0);
3919 dazonedone(struct cam_periph *periph, union ccb *ccb)
3921 struct da_softc *softc;
3924 softc = periph->softc;
3925 bp = (struct bio *)ccb->ccb_h.ccb_bp;
3927 switch (bp->bio_zone.zone_cmd) {
3928 case DISK_ZONE_OPEN:
3929 case DISK_ZONE_CLOSE:
3930 case DISK_ZONE_FINISH:
3933 case DISK_ZONE_REPORT_ZONES: {
3935 struct disk_zone_report *rep;
3936 struct scsi_report_zones_hdr *hdr;
3937 struct scsi_report_zones_desc *desc;
3938 struct disk_zone_rep_entry *entry;
3939 uint32_t num_alloced, hdr_len, num_avail;
3940 uint32_t num_to_fill, i;
3943 rep = &bp->bio_zone.zone_params.report;
3944 avail_len = ccb->csio.dxfer_len - ccb->csio.resid;
3946 * Note that bio_resid isn't normally used for zone
3947 * commands, but it is used by devstat_end_transaction_bio()
3948 * to determine how much data was transferred. Because
3949 * the size of the SCSI/ATA data structures is different
3950 * than the size of the BIO interface structures, the
3951 * amount of data actually transferred from the drive will
3952 * be different than the amount of data transferred to
3955 bp->bio_resid = ccb->csio.resid;
3956 num_alloced = rep->entries_allocated;
3957 hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr;
3958 if (avail_len < sizeof(*hdr)) {
3960 * Is there a better error than EIO here? We asked
3961 * for at least the header, and we got less than
3964 bp->bio_error = EIO;
3965 bp->bio_flags |= BIO_ERROR;
3966 bp->bio_resid = bp->bio_bcount;
3970 if (softc->zone_interface == DA_ZONE_IF_ATA_PASS)
3975 hdr_len = ata ? le32dec(hdr->length) :
3976 scsi_4btoul(hdr->length);
3978 rep->entries_available = hdr_len / sizeof(*desc);
3980 rep->entries_available = 0;
3982 * NOTE: using the same values for the BIO version of the
3983 * same field as the SCSI/ATA values. This means we could
3984 * get some additional values that aren't defined in bio.h
3985 * if more values of the same field are defined later.
3987 rep->header.same = hdr->byte4 & SRZ_SAME_MASK;
3988 rep->header.maximum_lba = ata ? le64dec(hdr->maximum_lba) :
3989 scsi_8btou64(hdr->maximum_lba);
3991 * If the drive reports no entries that match the query,
3995 rep->entries_filled = 0;
3999 num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc),
4000 hdr_len / sizeof(*desc));
4002 * If the drive didn't return any data, then we're done.
4004 if (num_avail == 0) {
4005 rep->entries_filled = 0;
4009 num_to_fill = min(num_avail, rep->entries_allocated);
4011 * If the user didn't allocate any entries for us to fill,
4014 if (num_to_fill == 0) {
4015 rep->entries_filled = 0;
4019 for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0];
4020 i < num_to_fill; i++, desc++, entry++) {
4022 * NOTE: we're mapping the values here directly
4023 * from the SCSI/ATA bit definitions to the bio.h
4024 * definitons. There is also a warning in
4025 * disk_zone.h, but the impact is that if
4026 * additional values are added in the SCSI/ATA
4027 * specs these will be visible to consumers of
4030 entry->zone_type = desc->zone_type & SRZ_TYPE_MASK;
4031 entry->zone_condition =
4032 (desc->zone_flags & SRZ_ZONE_COND_MASK) >>
4033 SRZ_ZONE_COND_SHIFT;
4034 entry->zone_flags |= desc->zone_flags &
4035 (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET);
4036 entry->zone_length =
4037 ata ? le64dec(desc->zone_length) :
4038 scsi_8btou64(desc->zone_length);
4039 entry->zone_start_lba =
4040 ata ? le64dec(desc->zone_start_lba) :
4041 scsi_8btou64(desc->zone_start_lba);
4042 entry->write_pointer_lba =
4043 ata ? le64dec(desc->write_pointer_lba) :
4044 scsi_8btou64(desc->write_pointer_lba);
4046 rep->entries_filled = num_to_fill;
4049 case DISK_ZONE_GET_PARAMS:
4052 * In theory we should not get a GET_PARAMS bio, since it
4053 * should be handled without queueing the command to the
4056 panic("%s: Invalid zone command %d", __func__,
4057 bp->bio_zone.zone_cmd);
4061 if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)
4062 free(ccb->csio.data_ptr, M_SCSIDA);
4066 dadone(struct cam_periph *periph, union ccb *done_ccb)
4068 struct da_softc *softc;
4069 struct ccb_scsiio *csio;
4073 softc = (struct da_softc *)periph->softc;
4074 priority = done_ccb->ccb_h.pinfo.priority;
4076 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n"));
4078 csio = &done_ccb->csio;
4079 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4080 if (csio->bio != NULL)
4081 biotrack(csio->bio, __func__);
4083 state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK;
4085 case DA_CCB_BUFFER_IO:
4088 struct bio *bp, *bp1;
4090 cam_periph_lock(periph);
4091 bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
4092 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
4096 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
4101 error = daerror(done_ccb, CAM_RETRY_SELTO, sf);
4102 if (error == ERESTART) {
4104 * A retry was scheduled, so
4107 cam_periph_unlock(periph);
4110 bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
4115 * return all queued I/O with EIO, so that
4116 * the client can retry these I/Os in the
4117 * proper order should it attempt to recover.
4122 && (softc->flags & DA_FLAG_PACK_INVALID)== 0) {
4124 * Catastrophic error. Mark our pack as
4128 * XXX See if this is really a media
4131 xpt_print(periph->path,
4132 "Invalidating pack\n");
4133 softc->flags |= DA_FLAG_PACK_INVALID;
4135 softc->invalidations++;
4137 queued_error = ENXIO;
4139 cam_iosched_flush(softc->cam_iosched, NULL,
4142 bp->bio_error = error;
4143 bp->bio_resid = bp->bio_bcount;
4144 bp->bio_flags |= BIO_ERROR;
4146 } else if (bp != NULL) {
4147 if (state == DA_CCB_DELETE)
4150 bp->bio_resid = csio->resid;
4152 if (bp->bio_resid != 0)
4153 bp->bio_flags |= BIO_ERROR;
4155 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4156 cam_release_devq(done_ccb->ccb_h.path,
4160 /*getcount_only*/0);
4161 } else if (bp != NULL) {
4162 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4163 panic("REQ_CMP with QFRZN");
4164 if (bp->bio_cmd == BIO_ZONE)
4165 dazonedone(periph, done_ccb);
4166 else if (state == DA_CCB_DELETE)
4169 bp->bio_resid = csio->resid;
4170 if ((csio->resid > 0)
4171 && (bp->bio_cmd != BIO_ZONE))
4172 bp->bio_flags |= BIO_ERROR;
4173 if (softc->error_inject != 0) {
4174 bp->bio_error = softc->error_inject;
4175 bp->bio_resid = bp->bio_bcount;
4176 bp->bio_flags |= BIO_ERROR;
4177 softc->error_inject = 0;
4182 biotrack(bp, __func__);
4183 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
4184 if (LIST_EMPTY(&softc->pending_ccbs))
4185 softc->flags |= DA_FLAG_WAS_OTAG;
4187 cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb);
4188 xpt_release_ccb(done_ccb);
4189 if (state == DA_CCB_DELETE) {
4190 TAILQ_HEAD(, bio) queue;
4193 TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue);
4194 softc->delete_run_queue.insert_point = NULL;
4196 * Normally, the xpt_release_ccb() above would make sure
4197 * that when we have more work to do, that work would
4198 * get kicked off. However, we specifically keep
4199 * delete_running set to 0 before the call above to
4200 * allow other I/O to progress when many BIO_DELETE
4201 * requests are pushed down. We set delete_running to 0
4202 * and call daschedule again so that we don't stall if
4203 * there are no other I/Os pending apart from BIO_DELETEs.
4205 cam_iosched_trim_done(softc->cam_iosched);
4207 cam_periph_unlock(periph);
4208 while ((bp1 = TAILQ_FIRST(&queue)) != NULL) {
4209 TAILQ_REMOVE(&queue, bp1, bio_queue);
4210 bp1->bio_error = bp->bio_error;
4211 if (bp->bio_flags & BIO_ERROR) {
4212 bp1->bio_flags |= BIO_ERROR;
4213 bp1->bio_resid = bp1->bio_bcount;
4220 cam_periph_unlock(periph);
4226 case DA_CCB_PROBE_RC:
4227 case DA_CCB_PROBE_RC16:
4229 struct scsi_read_capacity_data *rdcap;
4230 struct scsi_read_capacity_data_long *rcaplong;
4237 /* XXX TODO: can this be a malloc? */
4238 announce_buf = softc->announce_temp;
4239 bzero(announce_buf, DA_ANNOUNCETMP_SZ);
4241 if (state == DA_CCB_PROBE_RC)
4242 rdcap =(struct scsi_read_capacity_data *)csio->data_ptr;
4244 rcaplong = (struct scsi_read_capacity_data_long *)
4247 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4248 struct disk_params *dp;
4249 uint32_t block_size;
4251 u_int lalba; /* Lowest aligned LBA. */
4253 if (state == DA_CCB_PROBE_RC) {
4254 block_size = scsi_4btoul(rdcap->length);
4255 maxsector = scsi_4btoul(rdcap->addr);
4259 * According to SBC-2, if the standard 10
4260 * byte READ CAPACITY command returns 2^32,
4261 * we should issue the 16 byte version of
4262 * the command, since the device in question
4263 * has more sectors than can be represented
4264 * with the short version of the command.
4266 if (maxsector == 0xffffffff) {
4267 free(rdcap, M_SCSIDA);
4268 xpt_release_ccb(done_ccb);
4269 softc->state = DA_STATE_PROBE_RC16;
4270 xpt_schedule(periph, priority);
4274 block_size = scsi_4btoul(rcaplong->length);
4275 maxsector = scsi_8btou64(rcaplong->addr);
4276 lalba = scsi_2btoul(rcaplong->lalba_lbp);
4280 * Because GEOM code just will panic us if we
4281 * give them an 'illegal' value we'll avoid that
4284 if (block_size == 0) {
4289 if (block_size >= MAXPHYS) {
4290 xpt_print(periph->path,
4291 "unsupportable block size %ju\n",
4292 (uintmax_t) block_size);
4293 announce_buf = NULL;
4294 cam_periph_invalidate(periph);
4297 * We pass rcaplong into dasetgeom(),
4298 * because it will only use it if it is
4301 dasetgeom(periph, block_size, maxsector,
4302 rcaplong, sizeof(*rcaplong));
4303 lbp = (lalba & SRC16_LBPME_A);
4304 dp = &softc->params;
4305 snprintf(announce_buf, DA_ANNOUNCETMP_SZ,
4306 "%juMB (%ju %u byte sectors)",
4307 ((uintmax_t)dp->secsize * dp->sectors) /
4309 (uintmax_t)dp->sectors, dp->secsize);
4315 * Retry any UNIT ATTENTION type errors. They
4316 * are expected at boot.
4318 error = daerror(done_ccb, CAM_RETRY_SELTO,
4319 SF_RETRY_UA|SF_NO_PRINT);
4320 if (error == ERESTART) {
4322 * A retry was scheuled, so
4326 } else if (error != 0) {
4328 int sense_key, error_code;
4331 struct ccb_getdev cgd;
4333 /* Don't wedge this device's queue */
4334 status = done_ccb->ccb_h.status;
4335 if ((status & CAM_DEV_QFRZN) != 0)
4336 cam_release_devq(done_ccb->ccb_h.path,
4340 /*getcount_only*/0);
4343 xpt_setup_ccb(&cgd.ccb_h,
4344 done_ccb->ccb_h.path,
4345 CAM_PRIORITY_NORMAL);
4346 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
4347 xpt_action((union ccb *)&cgd);
4349 if (scsi_extract_sense_ccb(done_ccb,
4350 &error_code, &sense_key, &asc, &ascq))
4356 * If we tried READ CAPACITY(16) and failed,
4357 * fallback to READ CAPACITY(10).
4359 if ((state == DA_CCB_PROBE_RC16) &&
4360 (softc->flags & DA_FLAG_CAN_RC16) &&
4361 (((csio->ccb_h.status & CAM_STATUS_MASK) ==
4364 (error_code == SSD_CURRENT_ERROR) &&
4365 (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) {
4366 softc->flags &= ~DA_FLAG_CAN_RC16;
4367 free(rdcap, M_SCSIDA);
4368 xpt_release_ccb(done_ccb);
4369 softc->state = DA_STATE_PROBE_RC;
4370 xpt_schedule(periph, priority);
4375 * Attach to anything that claims to be a
4376 * direct access or optical disk device,
4377 * as long as it doesn't return a "Logical
4378 * unit not supported" (0x25) error.
4379 * "Internal Target Failure" (0x44) is also
4380 * special and typically means that the
4381 * device is a SATA drive behind a SATL
4382 * translation that's fallen into a
4383 * terminally fatal state.
4386 && (asc != 0x25) && (asc != 0x44)
4387 && (error_code == SSD_CURRENT_ERROR)) {
4388 const char *sense_key_desc;
4389 const char *asc_desc;
4391 dasetgeom(periph, 512, -1, NULL, 0);
4392 scsi_sense_desc(sense_key, asc, ascq,
4396 snprintf(announce_buf,
4398 "Attempt to query device "
4399 "size failed: %s, %s",
4400 sense_key_desc, asc_desc);
4406 xpt_print(periph->path,
4407 "got CAM status %#x\n",
4408 done_ccb->ccb_h.status);
4411 xpt_print(periph->path, "fatal error, "
4412 "failed to attach to device\n");
4414 announce_buf = NULL;
4417 * Free up resources.
4419 cam_periph_invalidate(periph);
4423 free(csio->data_ptr, M_SCSIDA);
4424 if (announce_buf != NULL &&
4425 ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) {
4428 sbuf_new(&sb, softc->announcebuf, DA_ANNOUNCE_SZ,
4430 xpt_announce_periph_sbuf(periph, &sb, announce_buf);
4431 xpt_announce_quirks_sbuf(periph, &sb, softc->quirks,
4437 * Create our sysctl variables, now that we know
4438 * we have successfully attached.
4440 /* increase the refcount */
4441 if (cam_periph_acquire(periph) == CAM_REQ_CMP) {
4443 taskqueue_enqueue(taskqueue_thread,
4444 &softc->sysctl_task);
4446 /* XXX This message is useless! */
4447 xpt_print(periph->path, "fatal error, "
4448 "could not acquire reference count\n");
4452 /* We already probed the device. */
4453 if (softc->flags & DA_FLAG_PROBED) {
4454 daprobedone(periph, done_ccb);
4458 /* Ensure re-probe doesn't see old delete. */
4459 softc->delete_available = 0;
4460 dadeleteflag(softc, DA_DELETE_ZERO, 1);
4461 if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) {
4463 * Based on older SBC-3 spec revisions
4464 * any of the UNMAP methods "may" be
4465 * available via LBP given this flag so
4466 * we flag all of them as available and
4467 * then remove those which further
4468 * probes confirm aren't available
4471 * We could also check readcap(16) p_type
4472 * flag to exclude one or more invalid
4473 * write same (X) types here
4475 dadeleteflag(softc, DA_DELETE_WS16, 1);
4476 dadeleteflag(softc, DA_DELETE_WS10, 1);
4477 dadeleteflag(softc, DA_DELETE_UNMAP, 1);
4479 xpt_release_ccb(done_ccb);
4480 softc->state = DA_STATE_PROBE_LBP;
4481 xpt_schedule(periph, priority);
4485 xpt_release_ccb(done_ccb);
4486 softc->state = DA_STATE_PROBE_BDC;
4487 xpt_schedule(periph, priority);
4490 case DA_CCB_PROBE_LBP:
4492 struct scsi_vpd_logical_block_prov *lbp;
4494 lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr;
4496 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4498 * T10/1799-D Revision 31 states at least one of these
4499 * must be supported but we don't currently enforce this.
4501 dadeleteflag(softc, DA_DELETE_WS16,
4502 (lbp->flags & SVPD_LBP_WS16));
4503 dadeleteflag(softc, DA_DELETE_WS10,
4504 (lbp->flags & SVPD_LBP_WS10));
4505 dadeleteflag(softc, DA_DELETE_UNMAP,
4506 (lbp->flags & SVPD_LBP_UNMAP));
4509 error = daerror(done_ccb, CAM_RETRY_SELTO,
4510 SF_RETRY_UA|SF_NO_PRINT);
4511 if (error == ERESTART)
4513 else if (error != 0) {
4514 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4515 /* Don't wedge this device's queue */
4516 cam_release_devq(done_ccb->ccb_h.path,
4520 /*getcount_only*/0);
4524 * Failure indicates we don't support any SBC-3
4525 * delete methods with UNMAP
4530 free(lbp, M_SCSIDA);
4531 xpt_release_ccb(done_ccb);
4532 softc->state = DA_STATE_PROBE_BLK_LIMITS;
4533 xpt_schedule(periph, priority);
4536 case DA_CCB_PROBE_BLK_LIMITS:
4538 struct scsi_vpd_block_limits *block_limits;
4540 block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr;
4542 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4543 uint32_t max_txfer_len = scsi_4btoul(
4544 block_limits->max_txfer_len);
4545 uint32_t max_unmap_lba_cnt = scsi_4btoul(
4546 block_limits->max_unmap_lba_cnt);
4547 uint32_t max_unmap_blk_cnt = scsi_4btoul(
4548 block_limits->max_unmap_blk_cnt);
4549 uint32_t unmap_gran = scsi_4btoul(
4550 block_limits->opt_unmap_grain);
4551 uint32_t unmap_gran_align = scsi_4btoul(
4552 block_limits->unmap_grain_align);
4553 uint64_t ws_max_blks = scsi_8btou64(
4554 block_limits->max_write_same_length);
4556 if (max_txfer_len != 0) {
4557 softc->disk->d_maxsize = MIN(softc->maxio,
4558 (off_t)max_txfer_len * softc->params.secsize);
4562 * We should already support UNMAP but we check lba
4563 * and block count to be sure
4565 if (max_unmap_lba_cnt != 0x00L &&
4566 max_unmap_blk_cnt != 0x00L) {
4567 softc->unmap_max_lba = max_unmap_lba_cnt;
4568 softc->unmap_max_ranges = min(max_unmap_blk_cnt,
4570 if (unmap_gran > 1) {
4571 softc->unmap_gran = unmap_gran;
4572 if (unmap_gran_align & 0x80000000) {
4573 softc->unmap_gran_align =
4580 * Unexpected UNMAP limits which means the
4581 * device doesn't actually support UNMAP
4583 dadeleteflag(softc, DA_DELETE_UNMAP, 0);
4586 if (ws_max_blks != 0x00L)
4587 softc->ws_max_blks = ws_max_blks;
4590 error = daerror(done_ccb, CAM_RETRY_SELTO,
4591 SF_RETRY_UA|SF_NO_PRINT);
4592 if (error == ERESTART)
4594 else if (error != 0) {
4595 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4596 /* Don't wedge this device's queue */
4597 cam_release_devq(done_ccb->ccb_h.path,
4601 /*getcount_only*/0);
4605 * Failure here doesn't mean UNMAP is not
4606 * supported as this is an optional page.
4608 softc->unmap_max_lba = 1;
4609 softc->unmap_max_ranges = 1;
4613 free(block_limits, M_SCSIDA);
4614 xpt_release_ccb(done_ccb);
4615 softc->state = DA_STATE_PROBE_BDC;
4616 xpt_schedule(periph, priority);
4619 case DA_CCB_PROBE_BDC:
4621 struct scsi_vpd_block_device_characteristics *bdc;
4623 bdc = (struct scsi_vpd_block_device_characteristics *)
4626 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4630 * Disable queue sorting for non-rotational media
4633 u_int16_t old_rate = softc->disk->d_rotation_rate;
4635 valid_len = csio->dxfer_len - csio->resid;
4636 if (SBDC_IS_PRESENT(bdc, valid_len,
4637 medium_rotation_rate)) {
4638 softc->disk->d_rotation_rate =
4639 scsi_2btoul(bdc->medium_rotation_rate);
4640 if (softc->disk->d_rotation_rate ==
4641 SVPD_BDC_RATE_NON_ROTATING) {
4642 cam_iosched_set_sort_queue(
4643 softc->cam_iosched, 0);
4644 softc->rotating = 0;
4646 if (softc->disk->d_rotation_rate != old_rate) {
4647 disk_attr_changed(softc->disk,
4648 "GEOM::rotation_rate", M_NOWAIT);
4651 if ((SBDC_IS_PRESENT(bdc, valid_len, flags))
4652 && (softc->zone_mode == DA_ZONE_NONE)) {
4655 if (scsi_vpd_supported_page(periph,
4656 SVPD_ATA_INFORMATION))
4662 * The Zoned field will only be set for
4663 * Drive Managed and Host Aware drives. If
4664 * they are Host Managed, the device type
4665 * in the standard INQUIRY data should be
4666 * set to T_ZBC_HM (0x14).
4668 if ((bdc->flags & SVPD_ZBC_MASK) ==
4670 softc->zone_mode = DA_ZONE_HOST_AWARE;
4671 softc->zone_interface = (ata_proto) ?
4672 DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
4673 } else if ((bdc->flags & SVPD_ZBC_MASK) ==
4675 softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
4676 softc->zone_interface = (ata_proto) ?
4677 DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
4678 } else if ((bdc->flags & SVPD_ZBC_MASK) !=
4680 xpt_print(periph->path, "Unknown zoned "
4682 bdc->flags & SVPD_ZBC_MASK);
4687 error = daerror(done_ccb, CAM_RETRY_SELTO,
4688 SF_RETRY_UA|SF_NO_PRINT);
4689 if (error == ERESTART)
4691 else if (error != 0) {
4692 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4693 /* Don't wedge this device's queue */
4694 cam_release_devq(done_ccb->ccb_h.path,
4698 /*getcount_only*/0);
4703 free(bdc, M_SCSIDA);
4704 xpt_release_ccb(done_ccb);
4705 softc->state = DA_STATE_PROBE_ATA;
4706 xpt_schedule(periph, priority);
4709 case DA_CCB_PROBE_ATA:
4712 struct ata_params *ata_params;
4717 ata_params = (struct ata_params *)csio->data_ptr;
4718 ptr = (uint16_t *)ata_params;
4722 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4725 for (i = 0; i < sizeof(*ata_params) / 2; i++)
4726 ptr[i] = le16toh(ptr[i]);
4727 if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM &&
4728 (softc->quirks & DA_Q_NO_UNMAP) == 0) {
4729 dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1);
4730 if (ata_params->max_dsm_blocks != 0)
4731 softc->trim_max_ranges = min(
4732 softc->trim_max_ranges,
4733 ata_params->max_dsm_blocks *
4734 ATA_DSM_BLK_RANGES);
4737 * Disable queue sorting for non-rotational media
4740 old_rate = softc->disk->d_rotation_rate;
4741 softc->disk->d_rotation_rate =
4742 ata_params->media_rotation_rate;
4743 if (softc->disk->d_rotation_rate ==
4744 ATA_RATE_NON_ROTATING) {
4745 cam_iosched_set_sort_queue(softc->cam_iosched, 0);
4746 softc->rotating = 0;
4748 if (softc->disk->d_rotation_rate != old_rate) {
4749 disk_attr_changed(softc->disk,
4750 "GEOM::rotation_rate", M_NOWAIT);
4753 if (ata_params->capabilities1 & ATA_SUPPORT_DMA)
4754 softc->flags |= DA_FLAG_CAN_ATA_DMA;
4756 if (ata_params->support.extension &
4758 softc->flags |= DA_FLAG_CAN_ATA_LOG;
4761 * At this point, if we have a SATA host aware drive,
4762 * we communicate via ATA passthrough unless the
4763 * SAT layer supports ZBC -> ZAC translation. In
4767 * XXX KDM figure out how to detect a host managed
4770 if (softc->zone_mode == DA_ZONE_NONE) {
4772 * Note that we don't override the zone
4773 * mode or interface if it has already been
4774 * set. This is because it has either been
4775 * set as a quirk, or when we probed the
4776 * SCSI Block Device Characteristics page,
4777 * the zoned field was set. The latter
4778 * means that the SAT layer supports ZBC to
4779 * ZAC translation, and we would prefer to
4780 * use that if it is available.
4782 if ((ata_params->support3 &
4783 ATA_SUPPORT_ZONE_MASK) ==
4784 ATA_SUPPORT_ZONE_HOST_AWARE) {
4785 softc->zone_mode = DA_ZONE_HOST_AWARE;
4786 softc->zone_interface =
4787 DA_ZONE_IF_ATA_PASS;
4788 } else if ((ata_params->support3 &
4789 ATA_SUPPORT_ZONE_MASK) ==
4790 ATA_SUPPORT_ZONE_DEV_MANAGED) {
4791 softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
4792 softc->zone_interface =
4793 DA_ZONE_IF_ATA_PASS;
4798 error = daerror(done_ccb, CAM_RETRY_SELTO,
4799 SF_RETRY_UA|SF_NO_PRINT);
4800 if (error == ERESTART)
4802 else if (error != 0) {
4803 if ((done_ccb->ccb_h.status &
4804 CAM_DEV_QFRZN) != 0) {
4805 /* Don't wedge this device's queue */
4806 cam_release_devq(done_ccb->ccb_h.path,
4810 /*getcount_only*/0);
4815 free(ata_params, M_SCSIDA);
4816 if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
4817 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
4819 * If the ATA IDENTIFY failed, we could be talking
4820 * to a SCSI drive, although that seems unlikely,
4821 * since the drive did report that it supported the
4822 * ATA Information VPD page. If the ATA IDENTIFY
4823 * succeeded, and the SAT layer doesn't support
4824 * ZBC -> ZAC translation, continue on to get the
4825 * directory of ATA logs, and complete the rest of
4826 * the ZAC probe. If the SAT layer does support
4827 * ZBC -> ZAC translation, we want to use that,
4828 * and we'll probe the SCSI Zoned Block Device
4829 * Characteristics VPD page next.
4832 && (softc->flags & DA_FLAG_CAN_ATA_LOG)
4833 && (softc->zone_interface == DA_ZONE_IF_ATA_PASS))
4834 softc->state = DA_STATE_PROBE_ATA_LOGDIR;
4836 softc->state = DA_STATE_PROBE_ZONE;
4839 if (continue_probe != 0) {
4840 xpt_release_ccb(done_ccb);
4841 xpt_schedule(periph, priority);
4844 daprobedone(periph, done_ccb);
4847 case DA_CCB_PROBE_ATA_LOGDIR:
4851 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4853 softc->valid_logdir_len = 0;
4854 bzero(&softc->ata_logdir, sizeof(softc->ata_logdir));
4855 softc->valid_logdir_len =
4856 csio->dxfer_len - csio->resid;
4857 if (softc->valid_logdir_len > 0)
4858 bcopy(csio->data_ptr, &softc->ata_logdir,
4859 min(softc->valid_logdir_len,
4860 sizeof(softc->ata_logdir)));
4862 * Figure out whether the Identify Device log is
4863 * supported. The General Purpose log directory
4864 * has a header, and lists the number of pages
4865 * available for each GP log identified by the
4866 * offset into the list.
4868 if ((softc->valid_logdir_len >=
4869 ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t)))
4870 && (le16dec(softc->ata_logdir.header) ==
4871 ATA_GP_LOG_DIR_VERSION)
4872 && (le16dec(&softc->ata_logdir.num_pages[
4873 (ATA_IDENTIFY_DATA_LOG *
4874 sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){
4875 softc->flags |= DA_FLAG_CAN_ATA_IDLOG;
4877 softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
4880 error = daerror(done_ccb, CAM_RETRY_SELTO,
4881 SF_RETRY_UA|SF_NO_PRINT);
4882 if (error == ERESTART)
4884 else if (error != 0) {
4886 * If we can't get the ATA log directory,
4887 * then ATA logs are effectively not
4888 * supported even if the bit is set in the
4891 softc->flags &= ~(DA_FLAG_CAN_ATA_LOG |
4892 DA_FLAG_CAN_ATA_IDLOG);
4893 if ((done_ccb->ccb_h.status &
4894 CAM_DEV_QFRZN) != 0) {
4895 /* Don't wedge this device's queue */
4896 cam_release_devq(done_ccb->ccb_h.path,
4900 /*getcount_only*/0);
4905 free(csio->data_ptr, M_SCSIDA);
4908 && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) {
4909 softc->state = DA_STATE_PROBE_ATA_IDDIR;
4910 xpt_release_ccb(done_ccb);
4911 xpt_schedule(periph, priority);
4914 daprobedone(periph, done_ccb);
4917 case DA_CCB_PROBE_ATA_IDDIR:
4921 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4922 off_t entries_offset, max_entries;
4925 softc->valid_iddir_len = 0;
4926 bzero(&softc->ata_iddir, sizeof(softc->ata_iddir));
4927 softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP |
4928 DA_FLAG_CAN_ATA_ZONE);
4929 softc->valid_iddir_len =
4930 csio->dxfer_len - csio->resid;
4931 if (softc->valid_iddir_len > 0)
4932 bcopy(csio->data_ptr, &softc->ata_iddir,
4933 min(softc->valid_iddir_len,
4934 sizeof(softc->ata_iddir)));
4937 __offsetof(struct ata_identify_log_pages,entries);
4938 max_entries = softc->valid_iddir_len - entries_offset;
4939 if ((softc->valid_iddir_len > (entries_offset + 1))
4940 && (le64dec(softc->ata_iddir.header) ==
4942 && (softc->ata_iddir.entry_count > 0)) {
4945 num_entries = softc->ata_iddir.entry_count;
4946 num_entries = min(num_entries,
4947 softc->valid_iddir_len - entries_offset);
4948 for (i = 0; i < num_entries &&
4949 i < max_entries; i++) {
4950 if (softc->ata_iddir.entries[i] ==
4953 DA_FLAG_CAN_ATA_SUPCAP;
4954 else if (softc->ata_iddir.entries[i]==
4957 DA_FLAG_CAN_ATA_ZONE;
4960 DA_FLAG_CAN_ATA_SUPCAP)
4962 DA_FLAG_CAN_ATA_ZONE))
4967 error = daerror(done_ccb, CAM_RETRY_SELTO,
4968 SF_RETRY_UA|SF_NO_PRINT);
4969 if (error == ERESTART)
4971 else if (error != 0) {
4973 * If we can't get the ATA Identify Data log
4974 * directory, then it effectively isn't
4975 * supported even if the ATA Log directory
4976 * a non-zero number of pages present for
4979 softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
4980 if ((done_ccb->ccb_h.status &
4981 CAM_DEV_QFRZN) != 0) {
4982 /* Don't wedge this device's queue */
4983 cam_release_devq(done_ccb->ccb_h.path,
4987 /*getcount_only*/0);
4992 free(csio->data_ptr, M_SCSIDA);
4995 && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) {
4996 softc->state = DA_STATE_PROBE_ATA_SUP;
4997 xpt_release_ccb(done_ccb);
4998 xpt_schedule(periph, priority);
5001 daprobedone(periph, done_ccb);
5004 case DA_CCB_PROBE_ATA_SUP:
5008 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5011 struct ata_identify_log_sup_cap *sup_cap;
5014 sup_cap = (struct ata_identify_log_sup_cap *)
5016 valid_len = csio->dxfer_len - csio->resid;
5018 __offsetof(struct ata_identify_log_sup_cap,
5019 sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap);
5020 if (valid_len >= needed_size) {
5021 uint64_t zoned, zac_cap;
5023 zoned = le64dec(sup_cap->zoned_cap);
5024 if (zoned & ATA_ZONED_VALID) {
5026 * This should have already been
5027 * set, because this is also in the
5028 * ATA identify data.
5030 if ((zoned & ATA_ZONED_MASK) ==
5031 ATA_SUPPORT_ZONE_HOST_AWARE)
5034 else if ((zoned & ATA_ZONED_MASK) ==
5035 ATA_SUPPORT_ZONE_DEV_MANAGED)
5037 DA_ZONE_DRIVE_MANAGED;
5040 zac_cap = le64dec(sup_cap->sup_zac_cap);
5041 if (zac_cap & ATA_SUP_ZAC_CAP_VALID) {
5042 if (zac_cap & ATA_REPORT_ZONES_SUP)
5043 softc->zone_flags |=
5044 DA_ZONE_FLAG_RZ_SUP;
5045 if (zac_cap & ATA_ND_OPEN_ZONE_SUP)
5046 softc->zone_flags |=
5047 DA_ZONE_FLAG_OPEN_SUP;
5048 if (zac_cap & ATA_ND_CLOSE_ZONE_SUP)
5049 softc->zone_flags |=
5050 DA_ZONE_FLAG_CLOSE_SUP;
5051 if (zac_cap & ATA_ND_FINISH_ZONE_SUP)
5052 softc->zone_flags |=
5053 DA_ZONE_FLAG_FINISH_SUP;
5054 if (zac_cap & ATA_ND_RWP_SUP)
5055 softc->zone_flags |=
5056 DA_ZONE_FLAG_RWP_SUP;
5059 * This field was introduced in
5060 * ACS-4, r08 on April 28th, 2015.
5061 * If the drive firmware was written
5062 * to an earlier spec, it won't have
5063 * the field. So, assume all
5064 * commands are supported.
5066 softc->zone_flags |=
5067 DA_ZONE_FLAG_SUP_MASK;
5072 error = daerror(done_ccb, CAM_RETRY_SELTO,
5073 SF_RETRY_UA|SF_NO_PRINT);
5074 if (error == ERESTART)
5076 else if (error != 0) {
5078 * If we can't get the ATA Identify Data
5079 * Supported Capabilities page, clear the
5082 softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP;
5084 * And clear zone capabilities.
5086 softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK;
5087 if ((done_ccb->ccb_h.status &
5088 CAM_DEV_QFRZN) != 0) {
5089 /* Don't wedge this device's queue */
5090 cam_release_devq(done_ccb->ccb_h.path,
5094 /*getcount_only*/0);
5099 free(csio->data_ptr, M_SCSIDA);
5102 && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) {
5103 softc->state = DA_STATE_PROBE_ATA_ZONE;
5104 xpt_release_ccb(done_ccb);
5105 xpt_schedule(periph, priority);
5108 daprobedone(periph, done_ccb);
5111 case DA_CCB_PROBE_ATA_ZONE:
5115 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5116 struct ata_zoned_info_log *zi_log;
5120 zi_log = (struct ata_zoned_info_log *)csio->data_ptr;
5122 valid_len = csio->dxfer_len - csio->resid;
5123 needed_size = __offsetof(struct ata_zoned_info_log,
5124 version_info) + 1 + sizeof(zi_log->version_info);
5125 if (valid_len >= needed_size) {
5128 tmpvar = le64dec(zi_log->zoned_cap);
5129 if (tmpvar & ATA_ZDI_CAP_VALID) {
5130 if (tmpvar & ATA_ZDI_CAP_URSWRZ)
5131 softc->zone_flags |=
5132 DA_ZONE_FLAG_URSWRZ;
5134 softc->zone_flags &=
5135 ~DA_ZONE_FLAG_URSWRZ;
5137 tmpvar = le64dec(zi_log->optimal_seq_zones);
5138 if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) {
5139 softc->zone_flags |=
5140 DA_ZONE_FLAG_OPT_SEQ_SET;
5141 softc->optimal_seq_zones = (tmpvar &
5142 ATA_ZDI_OPT_SEQ_MASK);
5144 softc->zone_flags &=
5145 ~DA_ZONE_FLAG_OPT_SEQ_SET;
5146 softc->optimal_seq_zones = 0;
5149 tmpvar =le64dec(zi_log->optimal_nonseq_zones);
5150 if (tmpvar & ATA_ZDI_OPT_NS_VALID) {
5151 softc->zone_flags |=
5152 DA_ZONE_FLAG_OPT_NONSEQ_SET;
5153 softc->optimal_nonseq_zones =
5154 (tmpvar & ATA_ZDI_OPT_NS_MASK);
5156 softc->zone_flags &=
5157 ~DA_ZONE_FLAG_OPT_NONSEQ_SET;
5158 softc->optimal_nonseq_zones = 0;
5161 tmpvar = le64dec(zi_log->max_seq_req_zones);
5162 if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) {
5163 softc->zone_flags |=
5164 DA_ZONE_FLAG_MAX_SEQ_SET;
5165 softc->max_seq_zones =
5166 (tmpvar & ATA_ZDI_MAX_SEQ_MASK);
5168 softc->zone_flags &=
5169 ~DA_ZONE_FLAG_MAX_SEQ_SET;
5170 softc->max_seq_zones = 0;
5174 error = daerror(done_ccb, CAM_RETRY_SELTO,
5175 SF_RETRY_UA|SF_NO_PRINT);
5176 if (error == ERESTART)
5178 else if (error != 0) {
5179 softc->flags &= ~DA_FLAG_CAN_ATA_ZONE;
5180 softc->flags &= ~DA_ZONE_FLAG_SET_MASK;
5182 if ((done_ccb->ccb_h.status &
5183 CAM_DEV_QFRZN) != 0) {
5184 /* Don't wedge this device's queue */
5185 cam_release_devq(done_ccb->ccb_h.path,
5189 /*getcount_only*/0);
5194 free(csio->data_ptr, M_SCSIDA);
5196 daprobedone(periph, done_ccb);
5199 case DA_CCB_PROBE_ZONE:
5203 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5206 struct scsi_vpd_zoned_bdc *zoned_bdc;
5209 zoned_bdc = (struct scsi_vpd_zoned_bdc *)
5211 valid_len = csio->dxfer_len - csio->resid;
5212 needed_len = __offsetof(struct scsi_vpd_zoned_bdc,
5213 max_seq_req_zones) + 1 +
5214 sizeof(zoned_bdc->max_seq_req_zones);
5215 if ((valid_len >= needed_len)
5216 && (scsi_2btoul(zoned_bdc->page_length) >=
5218 if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ)
5219 softc->zone_flags |=
5220 DA_ZONE_FLAG_URSWRZ;
5222 softc->zone_flags &=
5223 ~DA_ZONE_FLAG_URSWRZ;
5224 softc->optimal_seq_zones =
5225 scsi_4btoul(zoned_bdc->optimal_seq_zones);
5226 softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET;
5227 softc->optimal_nonseq_zones = scsi_4btoul(
5228 zoned_bdc->optimal_nonseq_zones);
5229 softc->zone_flags |=
5230 DA_ZONE_FLAG_OPT_NONSEQ_SET;
5231 softc->max_seq_zones =
5232 scsi_4btoul(zoned_bdc->max_seq_req_zones);
5233 softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET;
5236 * All of the zone commands are mandatory for SCSI
5239 * XXX KDM this is valid as of September 2015.
5240 * Re-check this assumption once the SAT spec is
5241 * updated to support SCSI ZBC to ATA ZAC mapping.
5242 * Since ATA allows zone commands to be reported
5243 * as supported or not, this may not necessarily
5244 * be true for an ATA device behind a SAT (SCSI to
5245 * ATA Translation) layer.
5247 softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK;
5249 error = daerror(done_ccb, CAM_RETRY_SELTO,
5250 SF_RETRY_UA|SF_NO_PRINT);
5251 if (error == ERESTART)
5253 else if (error != 0) {
5254 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5255 /* Don't wedge this device's queue */
5256 cam_release_devq(done_ccb->ccb_h.path,
5260 /*getcount_only*/0);
5264 daprobedone(periph, done_ccb);
5268 /* No-op. We're polling */
5272 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5274 if (daerror(done_ccb, CAM_RETRY_SELTO,
5275 SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) ==
5278 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5279 cam_release_devq(done_ccb->ccb_h.path,
5283 /*getcount_only*/0);
5285 xpt_release_ccb(done_ccb);
5286 cam_periph_release_locked(periph);
5292 xpt_release_ccb(done_ccb);
5296 dareprobe(struct cam_periph *periph)
5298 struct da_softc *softc;
5301 softc = (struct da_softc *)periph->softc;
5303 /* Probe in progress; don't interfere. */
5304 if (softc->state != DA_STATE_NORMAL)
5307 status = cam_periph_acquire(periph);
5308 KASSERT(status == CAM_REQ_CMP,
5309 ("dareprobe: cam_periph_acquire failed"));
5311 if (softc->flags & DA_FLAG_CAN_RC16)
5312 softc->state = DA_STATE_PROBE_RC16;
5314 softc->state = DA_STATE_PROBE_RC;
5316 xpt_schedule(periph, CAM_PRIORITY_DEV);
5320 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
5322 struct da_softc *softc;
5323 struct cam_periph *periph;
5324 int error, error_code, sense_key, asc, ascq;
5326 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
5327 if (ccb->csio.bio != NULL)
5328 biotrack(ccb->csio.bio, __func__);
5331 periph = xpt_path_periph(ccb->ccb_h.path);
5332 softc = (struct da_softc *)periph->softc;
5335 * Automatically detect devices that do not support
5336 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs.
5339 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) {
5340 error = cmd6workaround(ccb);
5341 } else if (scsi_extract_sense_ccb(ccb,
5342 &error_code, &sense_key, &asc, &ascq)) {
5343 if (sense_key == SSD_KEY_ILLEGAL_REQUEST)
5344 error = cmd6workaround(ccb);
5346 * If the target replied with CAPACITY DATA HAS CHANGED UA,
5347 * query the capacity and notify upper layers.
5349 else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5350 asc == 0x2A && ascq == 0x09) {
5351 xpt_print(periph->path, "Capacity data has changed\n");
5352 softc->flags &= ~DA_FLAG_PROBED;
5354 sense_flags |= SF_NO_PRINT;
5355 } else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5356 asc == 0x28 && ascq == 0x00) {
5357 softc->flags &= ~DA_FLAG_PROBED;
5358 disk_media_changed(softc->disk, M_NOWAIT);
5359 } else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5360 asc == 0x3F && ascq == 0x03) {
5361 xpt_print(periph->path, "INQUIRY data has changed\n");
5362 softc->flags &= ~DA_FLAG_PROBED;
5364 sense_flags |= SF_NO_PRINT;
5365 } else if (sense_key == SSD_KEY_NOT_READY &&
5366 asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
5367 softc->flags |= DA_FLAG_PACK_INVALID;
5368 disk_media_gone(softc->disk, M_NOWAIT);
5371 if (error == ERESTART)
5375 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
5376 case CAM_CMD_TIMEOUT:
5379 case CAM_REQ_ABORTED:
5380 case CAM_REQ_CMP_ERR:
5381 case CAM_REQ_TERMIO:
5382 case CAM_UNREC_HBA_ERROR:
5383 case CAM_DATA_RUN_ERR:
5393 * Until we have a better way of doing pack validation,
5394 * don't treat UAs as errors.
5396 sense_flags |= SF_RETRY_UA;
5398 if (softc->quirks & DA_Q_RETRY_BUSY)
5399 sense_flags |= SF_RETRY_BUSY;
5400 return(cam_periph_error(ccb, cam_flags, sense_flags,
5401 &softc->saved_ccb));
5405 damediapoll(void *arg)
5407 struct cam_periph *periph = arg;
5408 struct da_softc *softc = periph->softc;
5410 if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) &&
5411 LIST_EMPTY(&softc->pending_ccbs)) {
5412 if (cam_periph_acquire(periph) == CAM_REQ_CMP) {
5413 cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR);
5417 /* Queue us up again */
5418 if (da_poll_period != 0)
5419 callout_schedule(&softc->mediapoll_c, da_poll_period * hz);
5423 daprevent(struct cam_periph *periph, int action)
5425 struct da_softc *softc;
5429 softc = (struct da_softc *)periph->softc;
5431 if (((action == PR_ALLOW)
5432 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
5433 || ((action == PR_PREVENT)
5434 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
5438 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
5440 scsi_prevent(&ccb->csio,
5448 error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO,
5449 SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat);
5452 if (action == PR_ALLOW)
5453 softc->flags &= ~DA_FLAG_PACK_LOCKED;
5455 softc->flags |= DA_FLAG_PACK_LOCKED;
5458 xpt_release_ccb(ccb);
5462 dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector,
5463 struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len)
5465 struct ccb_calc_geometry ccg;
5466 struct da_softc *softc;
5467 struct disk_params *dp;
5468 u_int lbppbe, lalba;
5471 softc = (struct da_softc *)periph->softc;
5473 dp = &softc->params;
5474 dp->secsize = block_len;
5475 dp->sectors = maxsector + 1;
5476 if (rcaplong != NULL) {
5477 lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE;
5478 lalba = scsi_2btoul(rcaplong->lalba_lbp);
5479 lalba &= SRC16_LALBA_A;
5486 dp->stripesize = block_len << lbppbe;
5487 dp->stripeoffset = (dp->stripesize - block_len * lalba) %
5489 } else if (softc->quirks & DA_Q_4K) {
5490 dp->stripesize = 4096;
5491 dp->stripeoffset = 0;
5492 } else if (softc->unmap_gran != 0) {
5493 dp->stripesize = block_len * softc->unmap_gran;
5494 dp->stripeoffset = (dp->stripesize - block_len *
5495 softc->unmap_gran_align) % dp->stripesize;
5498 dp->stripeoffset = 0;
5501 * Have the controller provide us with a geometry
5502 * for this disk. The only time the geometry
5503 * matters is when we boot and the controller
5504 * is the only one knowledgeable enough to come
5505 * up with something that will make this a bootable
5508 xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
5509 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
5510 ccg.block_size = dp->secsize;
5511 ccg.volume_size = dp->sectors;
5513 ccg.secs_per_track = 0;
5515 xpt_action((union ccb*)&ccg);
5516 if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5518 * We don't know what went wrong here- but just pick
5519 * a geometry so we don't have nasty things like divide
5523 dp->secs_per_track = 255;
5524 dp->cylinders = dp->sectors / (255 * 255);
5525 if (dp->cylinders == 0) {
5529 dp->heads = ccg.heads;
5530 dp->secs_per_track = ccg.secs_per_track;
5531 dp->cylinders = ccg.cylinders;
5535 * If the user supplied a read capacity buffer, and if it is
5536 * different than the previous buffer, update the data in the EDT.
5537 * If it's the same, we don't bother. This avoids sending an
5538 * update every time someone opens this device.
5540 if ((rcaplong != NULL)
5541 && (bcmp(rcaplong, &softc->rcaplong,
5542 min(sizeof(softc->rcaplong), rcap_len)) != 0)) {
5543 struct ccb_dev_advinfo cdai;
5545 xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
5546 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
5547 cdai.buftype = CDAI_TYPE_RCAPLONG;
5548 cdai.flags = CDAI_FLAG_STORE;
5549 cdai.bufsiz = rcap_len;
5550 cdai.buf = (uint8_t *)rcaplong;
5551 xpt_action((union ccb *)&cdai);
5552 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
5553 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
5554 if (cdai.ccb_h.status != CAM_REQ_CMP) {
5555 xpt_print(periph->path, "%s: failed to set read "
5556 "capacity advinfo\n", __func__);
5557 /* Use cam_error_print() to decode the status */
5558 cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS,
5561 bcopy(rcaplong, &softc->rcaplong,
5562 min(sizeof(softc->rcaplong), rcap_len));
5566 softc->disk->d_sectorsize = softc->params.secsize;
5567 softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors;
5568 softc->disk->d_stripesize = softc->params.stripesize;
5569 softc->disk->d_stripeoffset = softc->params.stripeoffset;
5570 /* XXX: these are not actually "firmware" values, so they may be wrong */
5571 softc->disk->d_fwsectors = softc->params.secs_per_track;
5572 softc->disk->d_fwheads = softc->params.heads;
5573 softc->disk->d_devstat->block_size = softc->params.secsize;
5574 softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE;
5576 error = disk_resize(softc->disk, M_NOWAIT);
5578 xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error);
5582 dasendorderedtag(void *arg)
5584 struct da_softc *softc = arg;
5586 if (da_send_ordered) {
5587 if (!LIST_EMPTY(&softc->pending_ccbs)) {
5588 if ((softc->flags & DA_FLAG_WAS_OTAG) == 0)
5589 softc->flags |= DA_FLAG_NEED_OTAG;
5590 softc->flags &= ~DA_FLAG_WAS_OTAG;
5593 /* Queue us up again */
5594 callout_reset(&softc->sendordered_c,
5595 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL,
5596 dasendorderedtag, softc);
5600 * Step through all DA peripheral drivers, and if the device is still open,
5601 * sync the disk cache to physical media.
5604 dashutdown(void * arg, int howto)
5606 struct cam_periph *periph;
5607 struct da_softc *softc;
5611 CAM_PERIPH_FOREACH(periph, &dadriver) {
5612 softc = (struct da_softc *)periph->softc;
5613 if (SCHEDULER_STOPPED()) {
5614 /* If we paniced with the lock held, do not recurse. */
5615 if (!cam_periph_owned(periph) &&
5616 (softc->flags & DA_FLAG_OPEN)) {
5617 dadump(softc->disk, NULL, 0, 0, 0);
5621 cam_periph_lock(periph);
5624 * We only sync the cache if the drive is still open, and
5625 * if the drive is capable of it..
5627 if (((softc->flags & DA_FLAG_OPEN) == 0)
5628 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) {
5629 cam_periph_unlock(periph);
5633 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
5634 scsi_synchronize_cache(&ccb->csio,
5638 /*begin_lba*/0, /* whole disk */
5643 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
5644 /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR,
5645 softc->disk->d_devstat);
5647 xpt_print(periph->path, "Synchronize cache failed\n");
5648 xpt_release_ccb(ccb);
5649 cam_periph_unlock(periph);
5653 #else /* !_KERNEL */
5656 * XXX These are only left out of the kernel build to silence warnings. If,
5657 * for some reason these functions are used in the kernel, the ifdefs should
5658 * be moved so they are included both in the kernel and userland.
5661 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
5662 void (*cbfcnp)(struct cam_periph *, union ccb *),
5663 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave,
5664 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
5667 struct scsi_format_unit *scsi_cmd;
5669 scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
5670 scsi_cmd->opcode = FORMAT_UNIT;
5671 scsi_cmd->byte2 = byte2;
5672 scsi_ulto2b(ileave, scsi_cmd->interleave);
5677 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
5687 scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries,
5688 void (*cbfcnp)(struct cam_periph *, union ccb *),
5689 uint8_t tag_action, uint8_t list_format,
5690 uint32_t addr_desc_index, uint8_t *data_ptr,
5691 uint32_t dxfer_len, int minimum_cmd_size,
5692 uint8_t sense_len, uint32_t timeout)
5697 * These conditions allow using the 10 byte command. Otherwise we
5698 * need to use the 12 byte command.
5700 if ((minimum_cmd_size <= 10)
5701 && (addr_desc_index == 0)
5702 && (dxfer_len <= SRDD10_MAX_LENGTH)) {
5703 struct scsi_read_defect_data_10 *cdb10;
5705 cdb10 = (struct scsi_read_defect_data_10 *)
5706 &csio->cdb_io.cdb_bytes;
5708 cdb_len = sizeof(*cdb10);
5709 bzero(cdb10, cdb_len);
5710 cdb10->opcode = READ_DEFECT_DATA_10;
5711 cdb10->format = list_format;
5712 scsi_ulto2b(dxfer_len, cdb10->alloc_length);
5714 struct scsi_read_defect_data_12 *cdb12;
5716 cdb12 = (struct scsi_read_defect_data_12 *)
5717 &csio->cdb_io.cdb_bytes;
5719 cdb_len = sizeof(*cdb12);
5720 bzero(cdb12, cdb_len);
5721 cdb12->opcode = READ_DEFECT_DATA_12;
5722 cdb12->format = list_format;
5723 scsi_ulto4b(dxfer_len, cdb12->alloc_length);
5724 scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index);
5730 /*flags*/ CAM_DIR_IN,
5740 scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries,
5741 void (*cbfcnp)(struct cam_periph *, union ccb *),
5742 u_int8_t tag_action, u_int8_t byte2, u_int16_t control,
5743 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
5746 struct scsi_sanitize *scsi_cmd;
5748 scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes;
5749 scsi_cmd->opcode = SANITIZE;
5750 scsi_cmd->byte2 = byte2;
5751 scsi_cmd->control = control;
5752 scsi_ulto2b(dxfer_len, scsi_cmd->length);
5757 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
5766 #endif /* _KERNEL */
5769 scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries,
5770 void (*cbfcnp)(struct cam_periph *, union ccb *),
5771 uint8_t tag_action, uint8_t service_action, uint64_t zone_id,
5772 uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len,
5773 uint8_t sense_len, uint32_t timeout)
5775 struct scsi_zbc_out *scsi_cmd;
5777 scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes;
5778 scsi_cmd->opcode = ZBC_OUT;
5779 scsi_cmd->service_action = service_action;
5780 scsi_u64to8b(zone_id, scsi_cmd->zone_id);
5781 scsi_cmd->zone_flags = zone_flags;
5786 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
5796 scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries,
5797 void (*cbfcnp)(struct cam_periph *, union ccb *),
5798 uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba,
5799 uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len,
5800 uint8_t sense_len, uint32_t timeout)
5802 struct scsi_zbc_in *scsi_cmd;
5804 scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes;
5805 scsi_cmd->opcode = ZBC_IN;
5806 scsi_cmd->service_action = service_action;
5807 scsi_ulto4b(dxfer_len, scsi_cmd->length);
5808 scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba);
5809 scsi_cmd->zone_options = zone_options;
5814 /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE,
5825 scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries,
5826 void (*cbfcnp)(struct cam_periph *, union ccb *),
5827 uint8_t tag_action, int use_ncq,
5828 uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
5829 uint8_t *data_ptr, uint32_t dxfer_len,
5830 uint8_t *cdb_storage, size_t cdb_storage_len,
5831 uint8_t sense_len, uint32_t timeout)
5833 uint8_t command_out, protocol, ata_flags;
5834 uint16_t features_out;
5835 uint32_t sectors_out, auxiliary;
5841 command_out = ATA_ZAC_MANAGEMENT_OUT;
5842 features_out = (zm_action & 0xf) | (zone_flags << 8);
5843 ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
5844 if (dxfer_len == 0) {
5845 protocol = AP_PROTO_NON_DATA;
5846 ata_flags |= AP_FLAG_TLEN_NO_DATA;
5849 protocol = AP_PROTO_DMA;
5850 ata_flags |= AP_FLAG_TLEN_SECT_CNT |
5851 AP_FLAG_TDIR_TO_DEV;
5852 sectors_out = ((dxfer_len >> 9) & 0xffff);
5856 ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
5857 if (dxfer_len == 0) {
5858 command_out = ATA_NCQ_NON_DATA;
5859 features_out = ATA_NCQ_ZAC_MGMT_OUT;
5861 * We're assuming the SCSI to ATA translation layer
5862 * will set the NCQ tag number in the tag field.
5863 * That isn't clear from the SAT-4 spec (as of rev 05).
5866 ata_flags |= AP_FLAG_TLEN_NO_DATA;
5868 command_out = ATA_SEND_FPDMA_QUEUED;
5870 * Note that we're defaulting to normal priority,
5871 * and assuming that the SCSI to ATA translation
5872 * layer will insert the NCQ tag number in the tag
5873 * field. That isn't clear in the SAT-4 spec (as
5876 sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8;
5878 ata_flags |= AP_FLAG_TLEN_FEAT |
5879 AP_FLAG_TDIR_TO_DEV;
5882 * For SEND FPDMA QUEUED, the transfer length is
5883 * encoded in the FEATURE register, and 0 means
5884 * that 65536 512 byte blocks are to be tranferred.
5885 * In practice, it seems unlikely that we'll see
5886 * a transfer that large, and it may confuse the
5887 * the SAT layer, because generally that means that
5888 * 0 bytes should be transferred.
5890 if (dxfer_len == (65536 * 512)) {
5892 } else if (dxfer_len <= (65535 * 512)) {
5893 features_out = ((dxfer_len >> 9) & 0xffff);
5895 /* The transfer is too big. */
5902 auxiliary = (zm_action & 0xf) | (zone_flags << 8);
5903 protocol = AP_PROTO_FPDMA;
5906 protocol |= AP_EXTEND;
5908 retval = scsi_ata_pass(csio,
5911 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
5913 /*protocol*/ protocol,
5914 /*ata_flags*/ ata_flags,
5915 /*features*/ features_out,
5916 /*sector_count*/ sectors_out,
5918 /*command*/ command_out,
5921 /*auxiliary*/ auxiliary,
5923 /*data_ptr*/ data_ptr,
5924 /*dxfer_len*/ dxfer_len,
5925 /*cdb_storage*/ cdb_storage,
5926 /*cdb_storage_len*/ cdb_storage_len,
5927 /*minimum_cmd_size*/ 0,
5928 /*sense_len*/ SSD_FULL_SIZE,
5929 /*timeout*/ timeout);
5937 scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries,
5938 void (*cbfcnp)(struct cam_periph *, union ccb *),
5939 uint8_t tag_action, int use_ncq,
5940 uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
5941 uint8_t *data_ptr, uint32_t dxfer_len,
5942 uint8_t *cdb_storage, size_t cdb_storage_len,
5943 uint8_t sense_len, uint32_t timeout)
5945 uint8_t command_out, protocol;
5946 uint16_t features_out, sectors_out;
5952 ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS;
5955 command_out = ATA_ZAC_MANAGEMENT_IN;
5956 /* XXX KDM put a macro here */
5957 features_out = (zm_action & 0xf) | (zone_flags << 8);
5958 sectors_out = dxfer_len >> 9; /* XXX KDM macro */
5959 protocol = AP_PROTO_DMA;
5960 ata_flags |= AP_FLAG_TLEN_SECT_CNT;
5963 ata_flags |= AP_FLAG_TLEN_FEAT;
5965 command_out = ATA_RECV_FPDMA_QUEUED;
5966 sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8;
5969 * For RECEIVE FPDMA QUEUED, the transfer length is
5970 * encoded in the FEATURE register, and 0 means
5971 * that 65536 512 byte blocks are to be tranferred.
5972 * In practice, it seems unlikely that we'll see
5973 * a transfer that large, and it may confuse the
5974 * the SAT layer, because generally that means that
5975 * 0 bytes should be transferred.
5977 if (dxfer_len == (65536 * 512)) {
5979 } else if (dxfer_len <= (65535 * 512)) {
5980 features_out = ((dxfer_len >> 9) & 0xffff);
5982 /* The transfer is too big. */
5986 auxiliary = (zm_action & 0xf) | (zone_flags << 8),
5987 protocol = AP_PROTO_FPDMA;
5990 protocol |= AP_EXTEND;
5992 retval = scsi_ata_pass(csio,
5995 /*flags*/ CAM_DIR_IN,
5997 /*protocol*/ protocol,
5998 /*ata_flags*/ ata_flags,
5999 /*features*/ features_out,
6000 /*sector_count*/ sectors_out,
6002 /*command*/ command_out,
6005 /*auxiliary*/ auxiliary,
6007 /*data_ptr*/ data_ptr,
6008 /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */
6009 /*cdb_storage*/ cdb_storage,
6010 /*cdb_storage_len*/ cdb_storage_len,
6011 /*minimum_cmd_size*/ 0,
6012 /*sense_len*/ SSD_FULL_SIZE,
6013 /*timeout*/ timeout);