2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
40 #include <sys/sysctl.h>
41 #include <sys/taskqueue.h>
43 #include <sys/mutex.h>
45 #include <sys/devicestat.h>
46 #include <sys/eventhandler.h>
47 #include <sys/malloc.h>
48 #include <sys/endian.h>
51 #include <sys/reboot.h>
53 #include <geom/geom_disk.h>
62 #include <cam/cam_ccb.h>
63 #include <cam/cam_periph.h>
64 #include <cam/cam_xpt_periph.h>
65 #include <cam/scsi/scsi_all.h>
66 #include <cam/scsi/scsi_da.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_iosched.h>
70 #include <cam/ata/ata_all.h>
72 #include <machine/md_var.h> /* geometry translation */
76 #define ATA_MAX_28BIT_LBA 268435455UL
78 extern int iosched_debug;
91 ADA_FLAG_CAN_48BIT = 0x00000002,
92 ADA_FLAG_CAN_FLUSHCACHE = 0x00000004,
93 ADA_FLAG_CAN_NCQ = 0x00000008,
94 ADA_FLAG_CAN_DMA = 0x00000010,
95 ADA_FLAG_NEED_OTAG = 0x00000020,
96 ADA_FLAG_WAS_OTAG = 0x00000040,
97 ADA_FLAG_CAN_TRIM = 0x00000080,
98 ADA_FLAG_OPEN = 0x00000100,
99 ADA_FLAG_SCTX_INIT = 0x00000200,
100 ADA_FLAG_CAN_CFA = 0x00000400,
101 ADA_FLAG_CAN_POWERMGT = 0x00000800,
102 ADA_FLAG_CAN_DMA48 = 0x00001000,
103 ADA_FLAG_CAN_LOG = 0x00002000,
104 ADA_FLAG_CAN_IDLOG = 0x00004000,
105 ADA_FLAG_CAN_SUPCAP = 0x00008000,
106 ADA_FLAG_CAN_ZONE = 0x00010000,
107 ADA_FLAG_CAN_WCACHE = 0x00020000,
108 ADA_FLAG_CAN_RAHEAD = 0x00040000,
109 ADA_FLAG_PROBED = 0x00080000,
110 ADA_FLAG_ANNOUNCED = 0x00100000,
111 ADA_FLAG_DIRTY = 0x00200000,
112 ADA_FLAG_CAN_NCQ_TRIM = 0x00400000, /* CAN_TRIM also set */
113 ADA_FLAG_PIM_ATA_EXT = 0x00800000
119 ADA_Q_NCQ_TRIM_BROKEN = 0x02,
120 ADA_Q_LOG_BROKEN = 0x04,
122 ADA_Q_NO_TRIM = 0x10,
126 #define ADA_Q_BIT_STRING \
129 "\002NCQ_TRIM_BROKEN" \
136 ADA_CCB_RAHEAD = 0x01,
137 ADA_CCB_WCACHE = 0x02,
138 ADA_CCB_BUFFER_IO = 0x03,
141 ADA_CCB_LOGDIR = 0x07,
142 ADA_CCB_IDDIR = 0x08,
143 ADA_CCB_SUP_CAP = 0x09,
145 ADA_CCB_TYPE_MASK = 0x0F,
149 ADA_ZONE_NONE = 0x00,
150 ADA_ZONE_DRIVE_MANAGED = 0x01,
151 ADA_ZONE_HOST_AWARE = 0x02,
152 ADA_ZONE_HOST_MANAGED = 0x03
156 ADA_ZONE_FLAG_RZ_SUP = 0x0001,
157 ADA_ZONE_FLAG_OPEN_SUP = 0x0002,
158 ADA_ZONE_FLAG_CLOSE_SUP = 0x0004,
159 ADA_ZONE_FLAG_FINISH_SUP = 0x0008,
160 ADA_ZONE_FLAG_RWP_SUP = 0x0010,
161 ADA_ZONE_FLAG_SUP_MASK = (ADA_ZONE_FLAG_RZ_SUP |
162 ADA_ZONE_FLAG_OPEN_SUP |
163 ADA_ZONE_FLAG_CLOSE_SUP |
164 ADA_ZONE_FLAG_FINISH_SUP |
165 ADA_ZONE_FLAG_RWP_SUP),
166 ADA_ZONE_FLAG_URSWRZ = 0x0020,
167 ADA_ZONE_FLAG_OPT_SEQ_SET = 0x0040,
168 ADA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080,
169 ADA_ZONE_FLAG_MAX_SEQ_SET = 0x0100,
170 ADA_ZONE_FLAG_SET_MASK = (ADA_ZONE_FLAG_OPT_SEQ_SET |
171 ADA_ZONE_FLAG_OPT_NONSEQ_SET |
172 ADA_ZONE_FLAG_MAX_SEQ_SET)
175 static struct ada_zone_desc {
176 ada_zone_flags value;
178 } ada_zone_desc_table[] = {
179 {ADA_ZONE_FLAG_RZ_SUP, "Report Zones" },
180 {ADA_ZONE_FLAG_OPEN_SUP, "Open" },
181 {ADA_ZONE_FLAG_CLOSE_SUP, "Close" },
182 {ADA_ZONE_FLAG_FINISH_SUP, "Finish" },
183 {ADA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" },
187 /* Offsets into our private area for storing information */
188 #define ccb_state ppriv_field0
189 #define ccb_bp ppriv_ptr1
194 ADA_DELETE_CFA_ERASE,
196 ADA_DELETE_NCQ_DSM_TRIM,
197 ADA_DELETE_MIN = ADA_DELETE_CFA_ERASE,
198 ADA_DELETE_MAX = ADA_DELETE_NCQ_DSM_TRIM,
199 } ada_delete_methods;
201 static const char *ada_delete_method_names[] =
202 { "NONE", "DISABLE", "CFA_ERASE", "DSM_TRIM", "NCQ_DSM_TRIM" };
204 static const char *ada_delete_method_desc[] =
205 { "NONE", "DISABLED", "CFA Erase", "DSM Trim", "DSM Trim via NCQ" };
210 u_int8_t secs_per_track;
212 u_int32_t secsize; /* Number of bytes/logical sector */
213 u_int64_t sectors; /* Total number sectors */
216 #define TRIM_MAX_BLOCKS 8
217 #define TRIM_MAX_RANGES (TRIM_MAX_BLOCKS * ATA_DSM_BLK_RANGES)
218 struct trim_request {
219 uint8_t data[TRIM_MAX_RANGES * ATA_DSM_RANGE_SIZE];
220 TAILQ_HEAD(, bio) bps;
224 struct cam_iosched_softc *cam_iosched;
225 int outstanding_cmds; /* Number of active commands */
226 int refcount; /* Active xpt_action() calls */
229 ada_zone_mode zone_mode;
230 ada_zone_flags zone_flags;
231 struct ata_gp_log_dir ata_logdir;
232 int valid_logdir_len;
233 struct ata_identify_log_pages ata_iddir;
235 uint64_t optimal_seq_zones;
236 uint64_t optimal_nonseq_zones;
237 uint64_t max_seq_zones;
239 ada_delete_methods delete_method;
245 #ifdef CAM_TEST_FAILURE
246 int force_read_error;
247 int force_write_error;
248 int periodic_read_error;
249 int periodic_read_count;
251 struct ccb_pathinq cpi;
252 struct disk_params params;
254 struct task sysctl_task;
255 struct sysctl_ctx_list sysctl_ctx;
256 struct sysctl_oid *sysctl_tree;
257 struct callout sendordered_c;
258 struct trim_request trim_req;
260 uint64_t trim_ranges;
263 struct sysctl_ctx_list sysctl_stats_ctx;
264 struct sysctl_oid *sysctl_stats_tree;
269 #define ADA_ANNOUNCETMP_SZ 80
270 char announce_temp[ADA_ANNOUNCETMP_SZ];
271 #define ADA_ANNOUNCE_SZ 400
272 char announce_buffer[ADA_ANNOUNCE_SZ];
275 struct ada_quirk_entry {
276 struct scsi_inquiry_pattern inq_pat;
280 static struct ada_quirk_entry ada_quirk_table[] =
284 { T_DIRECT, SIP_MEDIA_FIXED, "*", "SanDisk?SD8SB8U1T00*", "X4162000*" },
285 /*quirks*/ADA_Q_128KB
288 /* Hitachi Advanced Format (4k) drives */
289 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Hitachi H??????????E3*", "*" },
293 /* Samsung Advanced Format (4k) drives */
294 { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD155UI*", "*" },
298 /* Samsung Advanced Format (4k) drives */
299 { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD204UI*", "*" },
303 /* Seagate Barracuda Green Advanced Format (4k) drives */
304 { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DL*", "*" },
308 /* Seagate Barracuda Advanced Format (4k) drives */
309 { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???DM*", "*" },
313 /* Seagate Barracuda Advanced Format (4k) drives */
314 { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DM*", "*" },
318 /* Seagate Momentus Advanced Format (4k) drives */
319 { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500423AS*", "*" },
323 /* Seagate Momentus Advanced Format (4k) drives */
324 { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500424AS*", "*" },
328 /* Seagate Momentus Advanced Format (4k) drives */
329 { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9640423AS*", "*" },
333 /* Seagate Momentus Advanced Format (4k) drives */
334 { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9640424AS*", "*" },
338 /* Seagate Momentus Advanced Format (4k) drives */
339 { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750420AS*", "*" },
343 /* Seagate Momentus Advanced Format (4k) drives */
344 { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750422AS*", "*" },
348 /* Seagate Momentus Advanced Format (4k) drives */
349 { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750423AS*", "*" },
353 /* Seagate Momentus Thin Advanced Format (4k) drives */
354 { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???LT*", "*" },
358 /* WDC Caviar Red Advanced Format (4k) drives */
359 { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????CX*", "*" },
363 /* WDC Caviar Green Advanced Format (4k) drives */
364 { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RS*", "*" },
368 /* WDC Caviar Green/Red Advanced Format (4k) drives */
369 { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RX*", "*" },
373 /* WDC Caviar Red Advanced Format (4k) drives */
374 { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????CX*", "*" },
378 /* WDC Caviar Black Advanced Format (4k) drives */
379 { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????AZEX*", "*" },
383 /* WDC Caviar Black Advanced Format (4k) drives */
384 { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????FZEX*", "*" },
388 /* WDC Caviar Green Advanced Format (4k) drives */
389 { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RS*", "*" },
393 /* WDC Caviar Green Advanced Format (4k) drives */
394 { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RX*", "*" },
398 /* WDC Scorpio Black Advanced Format (4k) drives */
399 { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PKT*", "*" },
403 /* WDC Scorpio Black Advanced Format (4k) drives */
404 { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PKT*", "*" },
408 /* WDC Scorpio Blue Advanced Format (4k) drives */
409 { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PVT*", "*" },
413 /* WDC Scorpio Blue Advanced Format (4k) drives */
414 { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PVT*", "*" },
420 * Corsair Force 2 SSDs
421 * 4k optimised & trim only works in 4k requests + 4k aligned
423 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair CSSD-F*", "*" },
428 * Corsair Force 3 SSDs
429 * 4k optimised & trim only works in 4k requests + 4k aligned
431 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Force 3*", "*" },
436 * Corsair Neutron GTX SSDs
437 * 4k optimised & trim only works in 4k requests + 4k aligned
439 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" },
444 * Corsair Force GT & GS SSDs
445 * 4k optimised & trim only works in 4k requests + 4k aligned
447 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Force G*", "*" },
453 * 4k optimised & trim only works in 4k requests + 4k aligned
455 { T_DIRECT, SIP_MEDIA_FIXED, "*", "M4-CT???M4SSD2*", "*" },
460 * Crucial M500 SSDs MU07 firmware
463 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M500*", "MU07" },
468 * Crucial M500 SSDs all other firmware
469 * NCQ Trim doesn't work
471 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M500*", "*" },
472 /*quirks*/ADA_Q_NCQ_TRIM_BROKEN
477 * NCQ Trim doesn't work, but only on MU01 firmware
479 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M550*", "MU01" },
480 /*quirks*/ADA_Q_NCQ_TRIM_BROKEN
485 * NCQ Trim doesn't work, but only on MU01 firmware
487 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*MX100*", "MU01" },
488 /*quirks*/ADA_Q_NCQ_TRIM_BROKEN
492 * Crucial RealSSD C300 SSDs
495 { T_DIRECT, SIP_MEDIA_FIXED, "*", "C300-CTFDDAC???MAG*",
496 "*" }, /*quirks*/ADA_Q_4K
501 * NCQ Trim doesn't work
503 { T_DIRECT, SIP_MEDIA_FIXED, "*", "FCCT*M500*", "*" },
504 /*quirks*/ADA_Q_NCQ_TRIM_BROKEN
508 * Intel 320 Series SSDs
509 * 4k optimised & trim only works in 4k requests + 4k aligned
511 { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSA2CW*", "*" },
516 * Intel 330 Series SSDs
517 * 4k optimised & trim only works in 4k requests + 4k aligned
519 { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2CT*", "*" },
524 * Intel 510 Series SSDs
525 * 4k optimised & trim only works in 4k requests + 4k aligned
527 { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2MH*", "*" },
532 * Intel 520 Series SSDs
533 * 4k optimised & trim only works in 4k requests + 4k aligned
535 { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2BW*", "*" },
540 * Intel S3610 Series SSDs
541 * 4k optimised & trim only works in 4k requests + 4k aligned
543 { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2BX*", "*" },
548 * Intel X25-M Series SSDs
549 * 4k optimised & trim only works in 4k requests + 4k aligned
551 { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSA2M*", "*" },
556 * KingDian S200 60GB P0921B
557 * Trimming crash the SSD
559 { T_DIRECT, SIP_MEDIA_FIXED, "*", "KingDian S200 *", "*" },
560 /*quirks*/ADA_Q_NO_TRIM
564 * Kingston E100 Series SSDs
565 * 4k optimised & trim only works in 4k requests + 4k aligned
567 { T_DIRECT, SIP_MEDIA_FIXED, "*", "KINGSTON SE100S3*", "*" },
572 * Kingston HyperX 3k SSDs
573 * 4k optimised & trim only works in 4k requests + 4k aligned
575 { T_DIRECT, SIP_MEDIA_FIXED, "*", "KINGSTON SH103S3*", "*" },
580 * Marvell SSDs (entry taken from OpenSolaris)
581 * 4k optimised & trim only works in 4k requests + 4k aligned
583 { T_DIRECT, SIP_MEDIA_FIXED, "*", "MARVELL SD88SA02*", "*" },
588 * Micron M500 SSDs firmware MU07
591 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M500*", "MU07" },
596 * Micron M500 SSDs all other firmware
597 * NCQ Trim doesn't work
599 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M500*", "*" },
600 /*quirks*/ADA_Q_NCQ_TRIM_BROKEN
604 * Micron M5[15]0 SSDs
605 * NCQ Trim doesn't work, but only MU01 firmware
607 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M5[15]0*", "MU01" },
608 /*quirks*/ADA_Q_NCQ_TRIM_BROKEN
613 * 4k optimised & trim only works in 4k requests + 4k aligned
615 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron 5100 MTFDDAK*", "*" },
621 * 4k optimised & trim only works in 4k requests + 4k aligned
623 { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" },
629 * 4k optimised & trim only works in 4k requests + 4k aligned
631 { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY3*", "*" },
636 * OCZ Deneva R Series SSDs
637 * 4k optimised & trim only works in 4k requests + 4k aligned
639 { T_DIRECT, SIP_MEDIA_FIXED, "*", "DENRSTE251M45*", "*" },
644 * OCZ Vertex 2 SSDs (inc pro series)
645 * 4k optimised & trim only works in 4k requests + 4k aligned
647 { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ?VERTEX2*", "*" },
653 * 4k optimised & trim only works in 4k requests + 4k aligned
655 { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-VERTEX3*", "*" },
661 * 4k optimised & trim only works in 4k requests + 4k aligned
663 { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-VERTEX4*", "*" },
669 * 4k optimised, NCQ TRIM seems to work
671 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 750*", "*" },
676 * Samsung 830 Series SSDs
677 * 4k optimised, NCQ TRIM Broken (normal TRIM is fine)
679 { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG SSD 830 Series*", "*" },
680 /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN
685 * 4k optimised, NCQ TRIM Broken (normal TRIM is fine)
687 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 840*", "*" },
688 /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN
693 * 4k optimised, NCQ TRIM Broken (normal TRIM is fine)
695 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 845*", "*" },
696 /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN
701 * 4k optimised, NCQ TRIM broken (normal TRIM fine)
703 { T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 850*", "*" },
704 /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN
708 * Samsung SM863 Series SSDs (MZ7KM*)
709 * 4k optimised, NCQ believed to be working
711 { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG MZ7KM*", "*" },
716 * Samsung 843T Series SSDs (MZ7WD*)
717 * Samsung PM851 Series SSDs (MZ7TE*)
718 * Samsung PM853T Series SSDs (MZ7GE*)
719 * 4k optimised, NCQ believed to be broken since these are
720 * appear to be built with the same controllers as the 840/850.
722 { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG MZ7*", "*" },
723 /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN
727 * Same as for SAMSUNG MZ7* but enable the quirks for SSD
728 * starting with MZ7* too
730 { T_DIRECT, SIP_MEDIA_FIXED, "*", "MZ7*", "*" },
731 /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN
735 * Samsung PM851 Series SSDs Dell OEM
736 * device model "SAMSUNG SSD PM851 mSATA 256GB"
737 * 4k optimised, NCQ broken
739 { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG SSD PM851*", "*" },
740 /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN
744 * SuperTalent TeraDrive CT SSDs
745 * 4k optimised & trim only works in 4k requests + 4k aligned
747 { T_DIRECT, SIP_MEDIA_FIXED, "*", "FTM??CT25H*", "*" },
752 * XceedIOPS SATA SSDs
755 { T_DIRECT, SIP_MEDIA_FIXED, "*", "SG9XCS2D*", "*" },
760 * Samsung drive that doesn't support READ LOG EXT or
761 * READ LOG DMA EXT, despite reporting that it does in
763 * SAMSUNG HD200HJ KF100-06
765 { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD200*", "*" },
766 /*quirks*/ADA_Q_LOG_BROKEN
770 * Samsung drive that doesn't support READ LOG EXT or
771 * READ LOG DMA EXT, despite reporting that it does in
773 * SAMSUNG HD501LJ CR100-10
775 { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD501*", "*" },
776 /*quirks*/ADA_Q_LOG_BROKEN
780 * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR)
781 * Drive Managed SATA hard drive. This drive doesn't report
782 * in firmware that it is a drive managed SMR drive.
784 { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST8000AS000[23]*", "*" },
785 /*quirks*/ADA_Q_SMR_DM
790 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
791 /*vendor*/"*", /*product*/"*", /*revision*/"*"
797 static disk_strategy_t adastrategy;
798 static dumper_t adadump;
799 static periph_init_t adainit;
800 static void adadiskgonecb(struct disk *dp);
801 static periph_oninv_t adaoninvalidate;
802 static periph_dtor_t adacleanup;
803 static void adaasync(void *callback_arg, u_int32_t code,
804 struct cam_path *path, void *arg);
805 static int adazonemodesysctl(SYSCTL_HANDLER_ARGS);
806 static int adazonesupsysctl(SYSCTL_HANDLER_ARGS);
807 static void adasysctlinit(void *context, int pending);
808 static int adagetattr(struct bio *bp);
809 static void adasetflags(struct ada_softc *softc,
810 struct ccb_getdev *cgd);
811 static void adasetgeom(struct ada_softc *softc,
812 struct ccb_getdev *cgd);
813 static periph_ctor_t adaregister;
814 static void ada_dsmtrim(struct ada_softc *softc, struct bio *bp,
815 struct ccb_ataio *ataio);
816 static void ada_cfaerase(struct ada_softc *softc, struct bio *bp,
817 struct ccb_ataio *ataio);
818 static int ada_zone_bio_to_ata(int disk_zone_cmd);
819 static int ada_zone_cmd(struct cam_periph *periph, union ccb *ccb,
820 struct bio *bp, int *queue_ccb);
821 static periph_start_t adastart;
822 static void adaprobedone(struct cam_periph *periph, union ccb *ccb);
823 static void adazonedone(struct cam_periph *periph, union ccb *ccb);
824 static void adadone(struct cam_periph *periph,
825 union ccb *done_ccb);
826 static int adaerror(union ccb *ccb, u_int32_t cam_flags,
827 u_int32_t sense_flags);
828 static timeout_t adasendorderedtag;
829 static void adashutdown(void *arg, int howto);
830 static void adasuspend(void *arg);
831 static void adaresume(void *arg);
833 #ifndef ADA_DEFAULT_TIMEOUT
834 #define ADA_DEFAULT_TIMEOUT 30 /* Timeout in seconds */
837 #ifndef ADA_DEFAULT_RETRY
838 #define ADA_DEFAULT_RETRY 4
841 #ifndef ADA_DEFAULT_SEND_ORDERED
842 #define ADA_DEFAULT_SEND_ORDERED 1
845 #ifndef ADA_DEFAULT_SPINDOWN_SHUTDOWN
846 #define ADA_DEFAULT_SPINDOWN_SHUTDOWN 1
849 #ifndef ADA_DEFAULT_SPINDOWN_SUSPEND
850 #define ADA_DEFAULT_SPINDOWN_SUSPEND 1
853 #ifndef ADA_DEFAULT_READ_AHEAD
854 #define ADA_DEFAULT_READ_AHEAD 1
857 #ifndef ADA_DEFAULT_WRITE_CACHE
858 #define ADA_DEFAULT_WRITE_CACHE 1
861 #define ADA_RA (softc->read_ahead >= 0 ? \
862 softc->read_ahead : ada_read_ahead)
863 #define ADA_WC (softc->write_cache >= 0 ? \
864 softc->write_cache : ada_write_cache)
867 * Most platforms map firmware geometry to actual, but some don't. If
868 * not overridden, default to nothing.
870 #ifndef ata_disk_firmware_geom_adjust
871 #define ata_disk_firmware_geom_adjust(disk)
874 static int ada_retry_count = ADA_DEFAULT_RETRY;
875 static int ada_default_timeout = ADA_DEFAULT_TIMEOUT;
876 static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED;
877 static int ada_spindown_shutdown = ADA_DEFAULT_SPINDOWN_SHUTDOWN;
878 static int ada_spindown_suspend = ADA_DEFAULT_SPINDOWN_SUSPEND;
879 static int ada_read_ahead = ADA_DEFAULT_READ_AHEAD;
880 static int ada_write_cache = ADA_DEFAULT_WRITE_CACHE;
882 static SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
883 "CAM Direct Access Disk driver");
884 SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RWTUN,
885 &ada_retry_count, 0, "Normal I/O retry count");
886 SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RWTUN,
887 &ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
888 SYSCTL_INT(_kern_cam_ada, OID_AUTO, send_ordered, CTLFLAG_RWTUN,
889 &ada_send_ordered, 0, "Send Ordered Tags");
890 SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RWTUN,
891 &ada_spindown_shutdown, 0, "Spin down upon shutdown");
892 SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_suspend, CTLFLAG_RWTUN,
893 &ada_spindown_suspend, 0, "Spin down upon suspend");
894 SYSCTL_INT(_kern_cam_ada, OID_AUTO, read_ahead, CTLFLAG_RWTUN,
895 &ada_read_ahead, 0, "Enable disk read-ahead");
896 SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RWTUN,
897 &ada_write_cache, 0, "Enable disk write cache");
900 * ADA_ORDEREDTAG_INTERVAL determines how often, relative
901 * to the default timeout, we check to see whether an ordered
902 * tagged transaction is appropriate to prevent simple tag
903 * starvation. Since we'd like to ensure that there is at least
904 * 1/2 of the timeout length left for a starved transaction to
905 * complete after we've sent an ordered tag, we must poll at least
906 * four times in every timeout period. This takes care of the worst
907 * case where a starved transaction starts during an interval that
908 * meets the requirement "don't send an ordered tag" test so it takes
909 * us two intervals to determine that a tag must be sent.
911 #ifndef ADA_ORDEREDTAG_INTERVAL
912 #define ADA_ORDEREDTAG_INTERVAL 4
915 static struct periph_driver adadriver =
918 TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0
921 static int adadeletemethodsysctl(SYSCTL_HANDLER_ARGS);
923 PERIPHDRIVER_DECLARE(ada, adadriver);
925 static MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers");
928 adaopen(struct disk *dp)
930 struct cam_periph *periph;
931 struct ada_softc *softc;
934 periph = (struct cam_periph *)dp->d_drv1;
935 if (cam_periph_acquire(periph) != 0) {
939 cam_periph_lock(periph);
940 if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
941 cam_periph_unlock(periph);
942 cam_periph_release(periph);
946 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
949 softc = (struct ada_softc *)periph->softc;
950 softc->flags |= ADA_FLAG_OPEN;
952 cam_periph_unhold(periph);
953 cam_periph_unlock(periph);
958 adaclose(struct disk *dp)
960 struct cam_periph *periph;
961 struct ada_softc *softc;
965 periph = (struct cam_periph *)dp->d_drv1;
966 softc = (struct ada_softc *)periph->softc;
967 cam_periph_lock(periph);
969 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
972 /* We only sync the cache if the drive is capable of it. */
973 if ((softc->flags & ADA_FLAG_DIRTY) != 0 &&
974 (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) != 0 &&
975 (periph->flags & CAM_PERIPH_INVALID) == 0 &&
976 cam_periph_hold(periph, PRIBIO) == 0) {
978 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
979 cam_fill_ataio(&ccb->ataio,
986 ada_default_timeout*1000);
988 if (softc->flags & ADA_FLAG_CAN_48BIT)
989 ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
991 ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
992 error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0,
993 /*sense_flags*/0, softc->disk->d_devstat);
996 xpt_print(periph->path, "Synchronize cache failed\n");
997 softc->flags &= ~ADA_FLAG_DIRTY;
998 xpt_release_ccb(ccb);
999 cam_periph_unhold(periph);
1002 softc->flags &= ~ADA_FLAG_OPEN;
1004 while (softc->refcount != 0)
1005 cam_periph_sleep(periph, &softc->refcount, PRIBIO, "adaclose", 1);
1006 cam_periph_unlock(periph);
1007 cam_periph_release(periph);
1012 adaschedule(struct cam_periph *periph)
1014 struct ada_softc *softc = (struct ada_softc *)periph->softc;
1016 if (softc->state != ADA_STATE_NORMAL)
1019 cam_iosched_schedule(softc->cam_iosched, periph);
1023 * Actually translate the requested transfer into one the physical driver
1024 * can understand. The transfer is described by a buf and will include
1025 * only one physical transfer.
1028 adastrategy(struct bio *bp)
1030 struct cam_periph *periph;
1031 struct ada_softc *softc;
1033 periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1034 softc = (struct ada_softc *)periph->softc;
1036 cam_periph_lock(periph);
1038 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adastrategy(%p)\n", bp));
1041 * If the device has been made invalid, error out
1043 if ((periph->flags & CAM_PERIPH_INVALID) != 0) {
1044 cam_periph_unlock(periph);
1045 biofinish(bp, NULL, ENXIO);
1050 * Zone commands must be ordered, because they can depend on the
1051 * effects of previously issued commands, and they may affect
1052 * commands after them.
1054 if (bp->bio_cmd == BIO_ZONE)
1055 bp->bio_flags |= BIO_ORDERED;
1058 * Place it in the queue of disk activities for this disk
1060 cam_iosched_queue_work(softc->cam_iosched, bp);
1063 * Schedule ourselves for performing the work.
1065 adaschedule(periph);
1066 cam_periph_unlock(periph);
1072 adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
1074 struct cam_periph *periph;
1075 struct ada_softc *softc;
1077 struct ccb_ataio ataio;
1084 periph = dp->d_drv1;
1085 softc = (struct ada_softc *)periph->softc;
1086 secsize = softc->params.secsize;
1087 lba = offset / secsize;
1088 count = length / secsize;
1089 if ((periph->flags & CAM_PERIPH_INVALID) != 0)
1092 memset(&ataio, 0, sizeof(ataio));
1094 xpt_setup_ccb(&ataio.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1095 ataio.ccb_h.ccb_state = ADA_CCB_DUMP;
1096 cam_fill_ataio(&ataio,
1101 (u_int8_t *) virtual,
1103 ada_default_timeout*1000);
1104 if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
1105 (lba + count >= ATA_MAX_28BIT_LBA ||
1107 ata_48bit_cmd(&ataio, ATA_WRITE_DMA48,
1110 ata_28bit_cmd(&ataio, ATA_WRITE_DMA,
1113 error = cam_periph_runccb((union ccb *)&ataio, adaerror,
1114 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
1116 printf("Aborting dump due to I/O error.\n");
1121 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
1122 xpt_setup_ccb(&ataio.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1125 * Tell the drive to flush its internal cache. if we
1126 * can't flush in 5s we have big problems. No need to
1127 * wait the default 60s to detect problems.
1129 ataio.ccb_h.ccb_state = ADA_CCB_DUMP;
1130 cam_fill_ataio(&ataio,
1139 if (softc->flags & ADA_FLAG_CAN_48BIT)
1140 ata_48bit_cmd(&ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1142 ata_28bit_cmd(&ataio, ATA_FLUSHCACHE, 0, 0, 0);
1143 error = cam_periph_runccb((union ccb *)&ataio, adaerror,
1144 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
1146 xpt_print(periph->path, "Synchronize cache failed\n");
1157 * Install a global async callback. This callback will
1158 * receive async callbacks like "new device found".
1160 status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL);
1162 if (status != CAM_REQ_CMP) {
1163 printf("ada: Failed to attach master async callback "
1164 "due to status 0x%x!\n", status);
1165 } else if (ada_send_ordered) {
1167 /* Register our event handlers */
1168 if ((EVENTHANDLER_REGISTER(power_suspend, adasuspend,
1169 NULL, EVENTHANDLER_PRI_LAST)) == NULL)
1170 printf("adainit: power event registration failed!\n");
1171 if ((EVENTHANDLER_REGISTER(power_resume, adaresume,
1172 NULL, EVENTHANDLER_PRI_LAST)) == NULL)
1173 printf("adainit: power event registration failed!\n");
1174 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown,
1175 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
1176 printf("adainit: shutdown event registration failed!\n");
1181 * Callback from GEOM, called when it has finished cleaning up its
1185 adadiskgonecb(struct disk *dp)
1187 struct cam_periph *periph;
1189 periph = (struct cam_periph *)dp->d_drv1;
1191 cam_periph_release(periph);
1195 adaoninvalidate(struct cam_periph *periph)
1197 struct ada_softc *softc;
1199 softc = (struct ada_softc *)periph->softc;
1202 * De-register any async callbacks.
1204 xpt_register_async(0, adaasync, periph, periph->path);
1206 softc->invalidations++;
1210 * Return all queued I/O with ENXIO.
1211 * XXX Handle any transactions queued to the card
1212 * with XPT_ABORT_CCB.
1214 cam_iosched_flush(softc->cam_iosched, NULL, ENXIO);
1216 disk_gone(softc->disk);
1220 adacleanup(struct cam_periph *periph)
1222 struct ada_softc *softc;
1224 softc = (struct ada_softc *)periph->softc;
1226 cam_periph_unlock(periph);
1228 cam_iosched_fini(softc->cam_iosched);
1231 * If we can't free the sysctl tree, oh well...
1233 if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0) {
1235 if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0)
1236 xpt_print(periph->path,
1237 "can't remove sysctl stats context\n");
1239 if (sysctl_ctx_free(&softc->sysctl_ctx) != 0)
1240 xpt_print(periph->path,
1241 "can't remove sysctl context\n");
1244 disk_destroy(softc->disk);
1245 callout_drain(&softc->sendordered_c);
1246 free(softc, M_DEVBUF);
1247 cam_periph_lock(periph);
1251 adasetdeletemethod(struct ada_softc *softc)
1254 if (softc->flags & ADA_FLAG_CAN_NCQ_TRIM)
1255 softc->delete_method = ADA_DELETE_NCQ_DSM_TRIM;
1256 else if (softc->flags & ADA_FLAG_CAN_TRIM)
1257 softc->delete_method = ADA_DELETE_DSM_TRIM;
1258 else if ((softc->flags & ADA_FLAG_CAN_CFA) && !(softc->flags & ADA_FLAG_CAN_48BIT))
1259 softc->delete_method = ADA_DELETE_CFA_ERASE;
1261 softc->delete_method = ADA_DELETE_NONE;
1265 adaasync(void *callback_arg, u_int32_t code,
1266 struct cam_path *path, void *arg)
1268 struct ccb_getdev cgd;
1269 struct cam_periph *periph;
1270 struct ada_softc *softc;
1272 periph = (struct cam_periph *)callback_arg;
1274 case AC_FOUND_DEVICE:
1276 struct ccb_getdev *cgd;
1279 cgd = (struct ccb_getdev *)arg;
1283 if (cgd->protocol != PROTO_ATA)
1287 * Allocate a peripheral instance for
1288 * this device and start the probe
1291 status = cam_periph_alloc(adaregister, adaoninvalidate,
1292 adacleanup, adastart,
1293 "ada", CAM_PERIPH_BIO,
1295 AC_FOUND_DEVICE, cgd);
1297 if (status != CAM_REQ_CMP
1298 && status != CAM_REQ_INPROG)
1299 printf("adaasync: Unable to attach to new device "
1300 "due to status 0x%x\n", status);
1303 case AC_GETDEV_CHANGED:
1305 softc = (struct ada_softc *)periph->softc;
1306 xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1307 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1308 xpt_action((union ccb *)&cgd);
1311 * Update our information based on the new Identify data.
1313 adasetflags(softc, &cgd);
1314 adasetgeom(softc, &cgd);
1315 disk_resize(softc->disk, M_NOWAIT);
1317 cam_periph_async(periph, code, path, arg);
1320 case AC_ADVINFO_CHANGED:
1324 buftype = (uintptr_t)arg;
1325 if (buftype == CDAI_TYPE_PHYS_PATH) {
1326 struct ada_softc *softc;
1328 softc = periph->softc;
1329 disk_attr_changed(softc->disk, "GEOM::physpath",
1337 softc = (struct ada_softc *)periph->softc;
1338 cam_periph_async(periph, code, path, arg);
1339 if (softc->state != ADA_STATE_NORMAL)
1341 xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1342 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1343 xpt_action((union ccb *)&cgd);
1344 if (ADA_RA >= 0 && softc->flags & ADA_FLAG_CAN_RAHEAD)
1345 softc->state = ADA_STATE_RAHEAD;
1346 else if (ADA_WC >= 0 && softc->flags & ADA_FLAG_CAN_WCACHE)
1347 softc->state = ADA_STATE_WCACHE;
1348 else if ((softc->flags & ADA_FLAG_CAN_LOG)
1349 && (softc->zone_mode != ADA_ZONE_NONE))
1350 softc->state = ADA_STATE_LOGDIR;
1353 if (cam_periph_acquire(periph) != 0)
1354 softc->state = ADA_STATE_NORMAL;
1356 xpt_schedule(periph, CAM_PRIORITY_DEV);
1359 cam_periph_async(periph, code, path, arg);
1365 adazonemodesysctl(SYSCTL_HANDLER_ARGS)
1368 struct ada_softc *softc;
1371 softc = (struct ada_softc *)arg1;
1373 switch (softc->zone_mode) {
1374 case ADA_ZONE_DRIVE_MANAGED:
1375 snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed");
1377 case ADA_ZONE_HOST_AWARE:
1378 snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware");
1380 case ADA_ZONE_HOST_MANAGED:
1381 snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed");
1385 snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned");
1389 error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req);
1395 adazonesupsysctl(SYSCTL_HANDLER_ARGS)
1398 struct ada_softc *softc;
1403 softc = (struct ada_softc *)arg1;
1407 sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0);
1409 for (i = 0; i < sizeof(ada_zone_desc_table) /
1410 sizeof(ada_zone_desc_table[0]); i++) {
1411 if (softc->zone_flags & ada_zone_desc_table[i].value) {
1413 sbuf_printf(&sb, ", ");
1416 sbuf_cat(&sb, ada_zone_desc_table[i].desc);
1421 sbuf_printf(&sb, "None");
1425 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
1432 adasysctlinit(void *context, int pending)
1434 struct cam_periph *periph;
1435 struct ada_softc *softc;
1436 char tmpstr[32], tmpstr2[16];
1438 periph = (struct cam_periph *)context;
1440 /* periph was held for us when this task was enqueued */
1441 if ((periph->flags & CAM_PERIPH_INVALID) != 0) {
1442 cam_periph_release(periph);
1446 softc = (struct ada_softc *)periph->softc;
1447 snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d",periph->unit_number);
1448 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
1450 sysctl_ctx_init(&softc->sysctl_ctx);
1451 softc->flags |= ADA_FLAG_SCTX_INIT;
1452 softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx,
1453 SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2,
1454 CTLFLAG_RD, 0, tmpstr, "device_index");
1455 if (softc->sysctl_tree == NULL) {
1456 printf("adasysctlinit: unable to allocate sysctl tree\n");
1457 cam_periph_release(periph);
1461 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1462 OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RW,
1463 softc, 0, adadeletemethodsysctl, "A",
1464 "BIO_DELETE execution method");
1465 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
1466 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
1467 "trim_count", CTLFLAG_RD, &softc->trim_count,
1468 "Total number of dsm commands sent");
1469 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
1470 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
1471 "trim_ranges", CTLFLAG_RD, &softc->trim_ranges,
1472 "Total number of ranges in dsm commands");
1473 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
1474 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
1475 "trim_lbas", CTLFLAG_RD, &softc->trim_lbas,
1476 "Total lbas in the dsm commands sent");
1477 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1478 OID_AUTO, "read_ahead", CTLFLAG_RW | CTLFLAG_MPSAFE,
1479 &softc->read_ahead, 0, "Enable disk read ahead.");
1480 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1481 OID_AUTO, "write_cache", CTLFLAG_RW | CTLFLAG_MPSAFE,
1482 &softc->write_cache, 0, "Enable disk write cache.");
1483 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1484 OID_AUTO, "unmapped_io", CTLFLAG_RD | CTLFLAG_MPSAFE,
1485 &softc->unmappedio, 0, "Unmapped I/O leaf");
1486 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1487 OID_AUTO, "rotating", CTLFLAG_RD | CTLFLAG_MPSAFE,
1488 &softc->rotating, 0, "Rotating media");
1489 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1490 OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD,
1491 softc, 0, adazonemodesysctl, "A",
1493 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1494 OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD,
1495 softc, 0, adazonesupsysctl, "A",
1497 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
1498 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
1499 "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones,
1500 "Optimal Number of Open Sequential Write Preferred Zones");
1501 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
1502 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
1503 "optimal_nonseq_zones", CTLFLAG_RD,
1504 &softc->optimal_nonseq_zones,
1505 "Optimal Number of Non-Sequentially Written Sequential Write "
1507 SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
1508 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
1509 "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones,
1510 "Maximum Number of Open Sequential Write Required Zones");
1512 #ifdef CAM_TEST_FAILURE
1514 * Add a 'door bell' sysctl which allows one to set it from userland
1515 * and cause something bad to happen. For the moment, we only allow
1516 * whacking the next read or write.
1518 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1519 OID_AUTO, "force_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
1520 &softc->force_read_error, 0,
1521 "Force a read error for the next N reads.");
1522 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1523 OID_AUTO, "force_write_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
1524 &softc->force_write_error, 0,
1525 "Force a write error for the next N writes.");
1526 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1527 OID_AUTO, "periodic_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
1528 &softc->periodic_read_error, 0,
1529 "Force a read error every N reads (don't set too low).");
1530 SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1531 OID_AUTO, "invalidate", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE,
1532 periph, 0, cam_periph_invalidate_sysctl, "I",
1533 "Write 1 to invalidate the drive immediately");
1537 softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx,
1538 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats",
1539 CTLFLAG_RD, 0, "Statistics");
1540 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
1541 SYSCTL_CHILDREN(softc->sysctl_stats_tree),
1542 OID_AUTO, "timeouts", CTLFLAG_RD | CTLFLAG_MPSAFE,
1543 &softc->timeouts, 0,
1544 "Device timeouts reported by the SIM");
1545 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
1546 SYSCTL_CHILDREN(softc->sysctl_stats_tree),
1547 OID_AUTO, "errors", CTLFLAG_RD | CTLFLAG_MPSAFE,
1549 "Transport errors reported by the SIM.");
1550 SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
1551 SYSCTL_CHILDREN(softc->sysctl_stats_tree),
1552 OID_AUTO, "pack_invalidations", CTLFLAG_RD | CTLFLAG_MPSAFE,
1553 &softc->invalidations, 0,
1554 "Device pack invalidations.");
1557 cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx,
1558 softc->sysctl_tree);
1560 cam_periph_release(periph);
1564 adagetattr(struct bio *bp)
1567 struct cam_periph *periph;
1569 periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1570 cam_periph_lock(periph);
1571 ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute,
1573 cam_periph_unlock(periph);
1575 bp->bio_completed = bp->bio_length;
1580 adadeletemethodsysctl(SYSCTL_HANDLER_ARGS)
1584 struct ada_softc *softc;
1585 int i, error, value, methods;
1587 softc = (struct ada_softc *)arg1;
1589 value = softc->delete_method;
1590 if (value < 0 || value > ADA_DELETE_MAX)
1593 p = ada_delete_method_names[value];
1594 strncpy(buf, p, sizeof(buf));
1595 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
1596 if (error != 0 || req->newptr == NULL)
1598 methods = 1 << ADA_DELETE_DISABLE;
1599 if ((softc->flags & ADA_FLAG_CAN_CFA) &&
1600 !(softc->flags & ADA_FLAG_CAN_48BIT))
1601 methods |= 1 << ADA_DELETE_CFA_ERASE;
1602 if (softc->flags & ADA_FLAG_CAN_TRIM)
1603 methods |= 1 << ADA_DELETE_DSM_TRIM;
1604 if (softc->flags & ADA_FLAG_CAN_NCQ_TRIM)
1605 methods |= 1 << ADA_DELETE_NCQ_DSM_TRIM;
1606 for (i = 0; i <= ADA_DELETE_MAX; i++) {
1607 if (!(methods & (1 << i)) ||
1608 strcmp(buf, ada_delete_method_names[i]) != 0)
1610 softc->delete_method = i;
1617 adasetflags(struct ada_softc *softc, struct ccb_getdev *cgd)
1619 if ((cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA) &&
1620 (cgd->inq_flags & SID_DMA))
1621 softc->flags |= ADA_FLAG_CAN_DMA;
1623 softc->flags &= ~ADA_FLAG_CAN_DMA;
1625 if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) {
1626 softc->flags |= ADA_FLAG_CAN_48BIT;
1627 if (cgd->inq_flags & SID_DMA48)
1628 softc->flags |= ADA_FLAG_CAN_DMA48;
1630 softc->flags &= ~ADA_FLAG_CAN_DMA48;
1632 softc->flags &= ~(ADA_FLAG_CAN_48BIT | ADA_FLAG_CAN_DMA48);
1634 if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
1635 softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
1637 softc->flags &= ~ADA_FLAG_CAN_FLUSHCACHE;
1639 if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT)
1640 softc->flags |= ADA_FLAG_CAN_POWERMGT;
1642 softc->flags &= ~ADA_FLAG_CAN_POWERMGT;
1644 if ((cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ) &&
1645 (cgd->inq_flags & SID_DMA) && (cgd->inq_flags & SID_CmdQue))
1646 softc->flags |= ADA_FLAG_CAN_NCQ;
1648 softc->flags &= ~ADA_FLAG_CAN_NCQ;
1650 if ((cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) &&
1651 (cgd->inq_flags & SID_DMA) &&
1652 (softc->quirks & ADA_Q_NO_TRIM) == 0) {
1653 softc->flags |= ADA_FLAG_CAN_TRIM;
1654 softc->trim_max_ranges = TRIM_MAX_RANGES;
1655 if (cgd->ident_data.max_dsm_blocks != 0) {
1656 softc->trim_max_ranges =
1657 min(cgd->ident_data.max_dsm_blocks *
1658 ATA_DSM_BLK_RANGES, softc->trim_max_ranges);
1661 * If we can do RCVSND_FPDMA_QUEUED commands, we may be able
1662 * to do NCQ trims, if we support trims at all. We also need
1663 * support from the SIM to do things properly. Perhaps we
1664 * should look at log 13 dword 0 bit 0 and dword 1 bit 0 are
1667 if ((softc->quirks & ADA_Q_NCQ_TRIM_BROKEN) == 0 &&
1668 (softc->flags & ADA_FLAG_PIM_ATA_EXT) != 0 &&
1669 (cgd->ident_data.satacapabilities2 &
1670 ATA_SUPPORT_RCVSND_FPDMA_QUEUED) != 0 &&
1671 (softc->flags & ADA_FLAG_CAN_TRIM) != 0)
1672 softc->flags |= ADA_FLAG_CAN_NCQ_TRIM;
1674 softc->flags &= ~ADA_FLAG_CAN_NCQ_TRIM;
1676 softc->flags &= ~(ADA_FLAG_CAN_TRIM | ADA_FLAG_CAN_NCQ_TRIM);
1678 if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA)
1679 softc->flags |= ADA_FLAG_CAN_CFA;
1681 softc->flags &= ~ADA_FLAG_CAN_CFA;
1684 * Now that we've set the appropriate flags, setup the delete
1687 adasetdeletemethod(softc);
1689 if ((cgd->ident_data.support.extension & ATA_SUPPORT_GENLOG)
1690 && ((softc->quirks & ADA_Q_LOG_BROKEN) == 0))
1691 softc->flags |= ADA_FLAG_CAN_LOG;
1693 softc->flags &= ~ADA_FLAG_CAN_LOG;
1695 if ((cgd->ident_data.support3 & ATA_SUPPORT_ZONE_MASK) ==
1696 ATA_SUPPORT_ZONE_HOST_AWARE)
1697 softc->zone_mode = ADA_ZONE_HOST_AWARE;
1698 else if (((cgd->ident_data.support3 & ATA_SUPPORT_ZONE_MASK) ==
1699 ATA_SUPPORT_ZONE_DEV_MANAGED)
1700 || (softc->quirks & ADA_Q_SMR_DM))
1701 softc->zone_mode = ADA_ZONE_DRIVE_MANAGED;
1703 softc->zone_mode = ADA_ZONE_NONE;
1705 if (cgd->ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD)
1706 softc->flags |= ADA_FLAG_CAN_RAHEAD;
1708 softc->flags &= ~ADA_FLAG_CAN_RAHEAD;
1710 if (cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE)
1711 softc->flags |= ADA_FLAG_CAN_WCACHE;
1713 softc->flags &= ~ADA_FLAG_CAN_WCACHE;
1717 adaregister(struct cam_periph *periph, void *arg)
1719 struct ada_softc *softc;
1720 struct ccb_getdev *cgd;
1721 struct disk_params *dp;
1727 cgd = (struct ccb_getdev *)arg;
1729 printf("adaregister: no getdev CCB, can't register device\n");
1730 return(CAM_REQ_CMP_ERR);
1733 softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF,
1736 if (softc == NULL) {
1737 printf("adaregister: Unable to probe new device. "
1738 "Unable to allocate softc\n");
1739 return(CAM_REQ_CMP_ERR);
1742 announce_buf = softc->announce_temp;
1743 bzero(announce_buf, ADA_ANNOUNCETMP_SZ);
1745 if (cam_iosched_init(&softc->cam_iosched, periph) != 0) {
1746 printf("adaregister: Unable to probe new device. "
1747 "Unable to allocate iosched memory\n");
1748 free(softc, M_DEVBUF);
1749 return(CAM_REQ_CMP_ERR);
1752 periph->softc = softc;
1753 xpt_path_inq(&softc->cpi, periph->path);
1756 * See if this device has any quirks.
1758 match = cam_quirkmatch((caddr_t)&cgd->ident_data,
1759 (caddr_t)ada_quirk_table,
1760 nitems(ada_quirk_table),
1761 sizeof(*ada_quirk_table), ata_identify_match);
1763 softc->quirks = ((struct ada_quirk_entry *)match)->quirks;
1765 softc->quirks = ADA_Q_NONE;
1767 TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph);
1770 * Register this media as a disk
1772 (void)cam_periph_hold(periph, PRIBIO);
1773 cam_periph_unlock(periph);
1774 snprintf(announce_buf, ADA_ANNOUNCETMP_SZ,
1775 "kern.cam.ada.%d.quirks", periph->unit_number);
1776 quirks = softc->quirks;
1777 TUNABLE_INT_FETCH(announce_buf, &quirks);
1778 softc->quirks = quirks;
1779 softc->read_ahead = -1;
1780 snprintf(announce_buf, ADA_ANNOUNCETMP_SZ,
1781 "kern.cam.ada.%d.read_ahead", periph->unit_number);
1782 TUNABLE_INT_FETCH(announce_buf, &softc->read_ahead);
1783 softc->write_cache = -1;
1784 snprintf(announce_buf, ADA_ANNOUNCETMP_SZ,
1785 "kern.cam.ada.%d.write_cache", periph->unit_number);
1786 TUNABLE_INT_FETCH(announce_buf, &softc->write_cache);
1789 * Set support flags based on the Identify data and quirks.
1791 adasetflags(softc, cgd);
1792 if (softc->cpi.hba_misc & PIM_ATA_EXT)
1793 softc->flags |= ADA_FLAG_PIM_ATA_EXT;
1795 /* Disable queue sorting for non-rotational media by default. */
1796 if (cgd->ident_data.media_rotation_rate == ATA_RATE_NON_ROTATING) {
1797 softc->rotating = 0;
1799 softc->rotating = 1;
1801 cam_iosched_set_sort_queue(softc->cam_iosched, softc->rotating ? -1 : 0);
1802 softc->disk = disk_alloc();
1803 adasetgeom(softc, cgd);
1804 softc->disk->d_devstat = devstat_new_entry(periph->periph_name,
1805 periph->unit_number, softc->params.secsize,
1806 DEVSTAT_ALL_SUPPORTED,
1807 DEVSTAT_TYPE_DIRECT |
1808 XPORT_DEVSTAT_TYPE(softc->cpi.transport),
1809 DEVSTAT_PRIORITY_DISK);
1810 softc->disk->d_open = adaopen;
1811 softc->disk->d_close = adaclose;
1812 softc->disk->d_strategy = adastrategy;
1813 softc->disk->d_getattr = adagetattr;
1814 softc->disk->d_dump = adadump;
1815 softc->disk->d_gone = adadiskgonecb;
1816 softc->disk->d_name = "ada";
1817 softc->disk->d_drv1 = periph;
1818 softc->disk->d_unit = periph->unit_number;
1821 * Acquire a reference to the periph before we register with GEOM.
1822 * We'll release this reference once GEOM calls us back (via
1823 * adadiskgonecb()) telling us that our provider has been freed.
1825 if (cam_periph_acquire(periph) != 0) {
1826 xpt_print(periph->path, "%s: lost periph during "
1827 "registration!\n", __func__);
1828 cam_periph_lock(periph);
1829 return (CAM_REQ_CMP_ERR);
1831 disk_create(softc->disk, DISK_VERSION);
1832 cam_periph_lock(periph);
1834 dp = &softc->params;
1835 snprintf(announce_buf, ADA_ANNOUNCETMP_SZ,
1836 "%juMB (%ju %u byte sectors)",
1837 ((uintmax_t)dp->secsize * dp->sectors) / (1024 * 1024),
1838 (uintmax_t)dp->sectors, dp->secsize);
1840 sbuf_new(&sb, softc->announce_buffer, ADA_ANNOUNCE_SZ, SBUF_FIXEDLEN);
1841 xpt_announce_periph_sbuf(periph, &sb, announce_buf);
1842 xpt_announce_quirks_sbuf(periph, &sb, softc->quirks, ADA_Q_BIT_STRING);
1847 * Create our sysctl variables, now that we know
1848 * we have successfully attached.
1850 if (cam_periph_acquire(periph) == 0)
1851 taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task);
1854 * Add async callbacks for bus reset and
1855 * bus device reset calls. I don't bother
1856 * checking if this fails as, in most cases,
1857 * the system will function just fine without
1858 * them and the only alternative would be to
1859 * not attach the device on failure.
1861 xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE |
1862 AC_GETDEV_CHANGED | AC_ADVINFO_CHANGED,
1863 adaasync, periph, periph->path);
1866 * Schedule a periodic event to occasionally send an
1867 * ordered tag to a device.
1869 callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0);
1870 callout_reset(&softc->sendordered_c,
1871 (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL,
1872 adasendorderedtag, softc);
1874 if (ADA_RA >= 0 && softc->flags & ADA_FLAG_CAN_RAHEAD) {
1875 softc->state = ADA_STATE_RAHEAD;
1876 } else if (ADA_WC >= 0 && softc->flags & ADA_FLAG_CAN_WCACHE) {
1877 softc->state = ADA_STATE_WCACHE;
1878 } else if ((softc->flags & ADA_FLAG_CAN_LOG)
1879 && (softc->zone_mode != ADA_ZONE_NONE)) {
1880 softc->state = ADA_STATE_LOGDIR;
1883 * Nothing to probe, so we can just transition to the
1886 adaprobedone(periph, NULL);
1887 return(CAM_REQ_CMP);
1890 xpt_schedule(periph, CAM_PRIORITY_DEV);
1892 return(CAM_REQ_CMP);
1896 ada_dsmtrim_req_create(struct ada_softc *softc, struct bio *bp, struct trim_request *req)
1898 uint64_t lastlba = (uint64_t)-1, lbas = 0;
1899 int c, lastcount = 0, off, ranges = 0;
1901 bzero(req, sizeof(*req));
1902 TAILQ_INIT(&req->bps);
1904 uint64_t lba = bp->bio_pblkno;
1905 int count = bp->bio_bcount / softc->params.secsize;
1907 /* Try to extend the previous range. */
1908 if (lba == lastlba) {
1909 c = min(count, ATA_DSM_RANGE_MAX - lastcount);
1911 off = (ranges - 1) * ATA_DSM_RANGE_SIZE;
1912 req->data[off + 6] = lastcount & 0xff;
1913 req->data[off + 7] =
1914 (lastcount >> 8) & 0xff;
1921 c = min(count, ATA_DSM_RANGE_MAX);
1922 off = ranges * ATA_DSM_RANGE_SIZE;
1923 req->data[off + 0] = lba & 0xff;
1924 req->data[off + 1] = (lba >> 8) & 0xff;
1925 req->data[off + 2] = (lba >> 16) & 0xff;
1926 req->data[off + 3] = (lba >> 24) & 0xff;
1927 req->data[off + 4] = (lba >> 32) & 0xff;
1928 req->data[off + 5] = (lba >> 40) & 0xff;
1929 req->data[off + 6] = c & 0xff;
1930 req->data[off + 7] = (c >> 8) & 0xff;
1937 * Its the caller's responsibility to ensure the
1938 * request will fit so we don't need to check for
1943 TAILQ_INSERT_TAIL(&req->bps, bp, bio_queue);
1945 bp = cam_iosched_next_trim(softc->cam_iosched);
1948 if (bp->bio_bcount / softc->params.secsize >
1949 (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) {
1950 cam_iosched_put_back_trim(softc->cam_iosched, bp);
1954 softc->trim_count++;
1955 softc->trim_ranges += ranges;
1956 softc->trim_lbas += lbas;
1962 ada_dsmtrim(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio)
1964 struct trim_request *req = &softc->trim_req;
1967 ranges = ada_dsmtrim_req_create(softc, bp, req);
1968 cam_fill_ataio(ataio,
1974 howmany(ranges, ATA_DSM_BLK_RANGES) * ATA_DSM_BLK_SIZE,
1975 ada_default_timeout * 1000);
1976 ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT,
1977 ATA_DSM_TRIM, 0, howmany(ranges, ATA_DSM_BLK_RANGES));
1981 ada_ncq_dsmtrim(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio)
1983 struct trim_request *req = &softc->trim_req;
1986 ranges = ada_dsmtrim_req_create(softc, bp, req);
1987 cam_fill_ataio(ataio,
1993 howmany(ranges, ATA_DSM_BLK_RANGES) * ATA_DSM_BLK_SIZE,
1994 ada_default_timeout * 1000);
1996 ATA_SEND_FPDMA_QUEUED,
1998 howmany(ranges, ATA_DSM_BLK_RANGES));
1999 ataio->cmd.sector_count_exp = ATA_SFPDMA_DSM;
2000 ataio->ata_flags |= ATA_FLAG_AUX;
2005 ada_cfaerase(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio)
2007 struct trim_request *req = &softc->trim_req;
2008 uint64_t lba = bp->bio_pblkno;
2009 uint16_t count = bp->bio_bcount / softc->params.secsize;
2011 bzero(req, sizeof(*req));
2012 TAILQ_INIT(&req->bps);
2013 TAILQ_INSERT_TAIL(&req->bps, bp, bio_queue);
2015 cam_fill_ataio(ataio,
2022 ada_default_timeout*1000);
2026 ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count);
2030 ada_zone_bio_to_ata(int disk_zone_cmd)
2032 switch (disk_zone_cmd) {
2033 case DISK_ZONE_OPEN:
2034 return ATA_ZM_OPEN_ZONE;
2035 case DISK_ZONE_CLOSE:
2036 return ATA_ZM_CLOSE_ZONE;
2037 case DISK_ZONE_FINISH:
2038 return ATA_ZM_FINISH_ZONE;
2047 ada_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp,
2050 struct ada_softc *softc;
2055 if (bp->bio_cmd != BIO_ZONE) {
2060 softc = periph->softc;
2062 switch (bp->bio_zone.zone_cmd) {
2063 case DISK_ZONE_OPEN:
2064 case DISK_ZONE_CLOSE:
2065 case DISK_ZONE_FINISH:
2066 case DISK_ZONE_RWP: {
2071 zone_sa = ada_zone_bio_to_ata(bp->bio_zone.zone_cmd);
2072 if (zone_sa == -1) {
2073 xpt_print(periph->path, "Cannot translate zone "
2074 "cmd %#x to ATA\n", bp->bio_zone.zone_cmd);
2080 lba = bp->bio_zone.zone_params.rwp.id;
2082 if (bp->bio_zone.zone_params.rwp.flags &
2083 DISK_ZONE_RWP_FLAG_ALL)
2084 zone_flags |= ZBC_OUT_ALL;
2086 ata_zac_mgmt_out(&ccb->ataio,
2087 /*retries*/ ada_retry_count,
2089 /*use_ncq*/ (softc->flags &
2090 ADA_FLAG_PIM_ATA_EXT) ? 1 : 0,
2091 /*zm_action*/ zone_sa,
2093 /*zone_flags*/ zone_flags,
2097 /*timeout*/ ada_default_timeout * 1000);
2102 case DISK_ZONE_REPORT_ZONES: {
2104 uint32_t num_entries, alloc_size;
2105 struct disk_zone_report *rep;
2107 rep = &bp->bio_zone.zone_params.report;
2109 num_entries = rep->entries_allocated;
2110 if (num_entries == 0) {
2111 xpt_print(periph->path, "No entries allocated for "
2112 "Report Zones request\n");
2116 alloc_size = sizeof(struct scsi_report_zones_hdr) +
2117 (sizeof(struct scsi_report_zones_desc) * num_entries);
2118 alloc_size = min(alloc_size, softc->disk->d_maxsize);
2119 rz_ptr = malloc(alloc_size, M_ATADA, M_NOWAIT | M_ZERO);
2120 if (rz_ptr == NULL) {
2121 xpt_print(periph->path, "Unable to allocate memory "
2122 "for Report Zones request\n");
2127 ata_zac_mgmt_in(&ccb->ataio,
2128 /*retries*/ ada_retry_count,
2130 /*use_ncq*/ (softc->flags &
2131 ADA_FLAG_PIM_ATA_EXT) ? 1 : 0,
2132 /*zm_action*/ ATA_ZM_REPORT_ZONES,
2133 /*zone_id*/ rep->starting_id,
2134 /*zone_flags*/ rep->rep_options,
2135 /*data_ptr*/ rz_ptr,
2136 /*dxfer_len*/ alloc_size,
2137 /*timeout*/ ada_default_timeout * 1000);
2140 * For BIO_ZONE, this isn't normally needed. However, it
2141 * is used by devstat_end_transaction_bio() to determine
2142 * how much data was transferred.
2145 * XXX KDM we have a problem. But I'm not sure how to fix
2146 * it. devstat uses bio_bcount - bio_resid to calculate
2147 * the amount of data transferred. The GEOM disk code
2148 * uses bio_length - bio_resid to calculate the amount of
2149 * data in bio_completed. We have different structure
2150 * sizes above and below the ada(4) driver. So, if we
2151 * use the sizes above, the amount transferred won't be
2152 * quite accurate for devstat. If we use different sizes
2153 * for bio_bcount and bio_length (above and below
2154 * respectively), then the residual needs to match one or
2155 * the other. Everything is calculated after the bio
2156 * leaves the driver, so changing the values around isn't
2157 * really an option. For now, just set the count to the
2158 * passed in length. This means that the calculations
2159 * above (e.g. bio_completed) will be correct, but the
2160 * amount of data reported to devstat will be slightly
2161 * under or overstated.
2163 bp->bio_bcount = bp->bio_length;
2169 case DISK_ZONE_GET_PARAMS: {
2170 struct disk_zone_disk_params *params;
2172 params = &bp->bio_zone.zone_params.disk_params;
2173 bzero(params, sizeof(*params));
2175 switch (softc->zone_mode) {
2176 case ADA_ZONE_DRIVE_MANAGED:
2177 params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED;
2179 case ADA_ZONE_HOST_AWARE:
2180 params->zone_mode = DISK_ZONE_MODE_HOST_AWARE;
2182 case ADA_ZONE_HOST_MANAGED:
2183 params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED;
2187 params->zone_mode = DISK_ZONE_MODE_NONE;
2191 if (softc->zone_flags & ADA_ZONE_FLAG_URSWRZ)
2192 params->flags |= DISK_ZONE_DISK_URSWRZ;
2194 if (softc->zone_flags & ADA_ZONE_FLAG_OPT_SEQ_SET) {
2195 params->optimal_seq_zones = softc->optimal_seq_zones;
2196 params->flags |= DISK_ZONE_OPT_SEQ_SET;
2199 if (softc->zone_flags & ADA_ZONE_FLAG_OPT_NONSEQ_SET) {
2200 params->optimal_nonseq_zones =
2201 softc->optimal_nonseq_zones;
2202 params->flags |= DISK_ZONE_OPT_NONSEQ_SET;
2205 if (softc->zone_flags & ADA_ZONE_FLAG_MAX_SEQ_SET) {
2206 params->max_seq_zones = softc->max_seq_zones;
2207 params->flags |= DISK_ZONE_MAX_SEQ_SET;
2209 if (softc->zone_flags & ADA_ZONE_FLAG_RZ_SUP)
2210 params->flags |= DISK_ZONE_RZ_SUP;
2212 if (softc->zone_flags & ADA_ZONE_FLAG_OPEN_SUP)
2213 params->flags |= DISK_ZONE_OPEN_SUP;
2215 if (softc->zone_flags & ADA_ZONE_FLAG_CLOSE_SUP)
2216 params->flags |= DISK_ZONE_CLOSE_SUP;
2218 if (softc->zone_flags & ADA_ZONE_FLAG_FINISH_SUP)
2219 params->flags |= DISK_ZONE_FINISH_SUP;
2221 if (softc->zone_flags & ADA_ZONE_FLAG_RWP_SUP)
2222 params->flags |= DISK_ZONE_RWP_SUP;
2233 adastart(struct cam_periph *periph, union ccb *start_ccb)
2235 struct ada_softc *softc = (struct ada_softc *)periph->softc;
2236 struct ccb_ataio *ataio = &start_ccb->ataio;
2238 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adastart\n"));
2240 switch (softc->state) {
2241 case ADA_STATE_NORMAL:
2246 bp = cam_iosched_next_bio(softc->cam_iosched);
2248 xpt_release_ccb(start_ccb);
2252 if ((bp->bio_flags & BIO_ORDERED) != 0 ||
2253 (bp->bio_cmd != BIO_DELETE && (softc->flags & ADA_FLAG_NEED_OTAG) != 0)) {
2254 softc->flags &= ~ADA_FLAG_NEED_OTAG;
2255 softc->flags |= ADA_FLAG_WAS_OTAG;
2260 switch (bp->bio_cmd) {
2264 uint64_t lba = bp->bio_pblkno;
2265 uint16_t count = bp->bio_bcount / softc->params.secsize;
2269 if (bp->bio_cmd == BIO_WRITE) {
2270 softc->flags |= ADA_FLAG_DIRTY;
2271 rw_op = CAM_DIR_OUT;
2276 data_ptr = bp->bio_data;
2277 if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) {
2278 rw_op |= CAM_DATA_BIO;
2282 #ifdef CAM_TEST_FAILURE
2286 * Support the failure ioctls. If the command is a
2287 * read, and there are pending forced read errors, or
2288 * if a write and pending write errors, then fail this
2289 * operation with EIO. This is useful for testing
2290 * purposes. Also, support having every Nth read fail.
2292 * This is a rather blunt tool.
2294 if (bp->bio_cmd == BIO_READ) {
2295 if (softc->force_read_error) {
2296 softc->force_read_error--;
2299 if (softc->periodic_read_error > 0) {
2300 if (++softc->periodic_read_count >=
2301 softc->periodic_read_error) {
2302 softc->periodic_read_count = 0;
2307 if (softc->force_write_error) {
2308 softc->force_write_error--;
2313 biofinish(bp, NULL, EIO);
2314 xpt_release_ccb(start_ccb);
2315 adaschedule(periph);
2319 KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
2320 round_page(bp->bio_bcount + bp->bio_ma_offset) /
2321 PAGE_SIZE == bp->bio_ma_n,
2322 ("Short bio %p", bp));
2323 cam_fill_ataio(ataio,
2330 ada_default_timeout*1000);
2332 if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) {
2333 if (bp->bio_cmd == BIO_READ) {
2334 ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED,
2337 ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED,
2340 } else if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
2341 (lba + count >= ATA_MAX_28BIT_LBA ||
2343 if (softc->flags & ADA_FLAG_CAN_DMA48) {
2344 if (bp->bio_cmd == BIO_READ) {
2345 ata_48bit_cmd(ataio, ATA_READ_DMA48,
2348 ata_48bit_cmd(ataio, ATA_WRITE_DMA48,
2352 if (bp->bio_cmd == BIO_READ) {
2353 ata_48bit_cmd(ataio, ATA_READ_MUL48,
2356 ata_48bit_cmd(ataio, ATA_WRITE_MUL48,
2363 if (softc->flags & ADA_FLAG_CAN_DMA) {
2364 if (bp->bio_cmd == BIO_READ) {
2365 ata_28bit_cmd(ataio, ATA_READ_DMA,
2368 ata_28bit_cmd(ataio, ATA_WRITE_DMA,
2372 if (bp->bio_cmd == BIO_READ) {
2373 ata_28bit_cmd(ataio, ATA_READ_MUL,
2376 ata_28bit_cmd(ataio, ATA_WRITE_MUL,
2384 switch (softc->delete_method) {
2385 case ADA_DELETE_NCQ_DSM_TRIM:
2386 ada_ncq_dsmtrim(softc, bp, ataio);
2388 case ADA_DELETE_DSM_TRIM:
2389 ada_dsmtrim(softc, bp, ataio);
2391 case ADA_DELETE_CFA_ERASE:
2392 ada_cfaerase(softc, bp, ataio);
2395 biofinish(bp, NULL, EOPNOTSUPP);
2396 xpt_release_ccb(start_ccb);
2397 adaschedule(periph);
2400 start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM;
2401 start_ccb->ccb_h.flags |= CAM_UNLOCKED;
2402 cam_iosched_submit_trim(softc->cam_iosched);
2405 cam_fill_ataio(ataio,
2412 ada_default_timeout*1000);
2414 if (softc->flags & ADA_FLAG_CAN_48BIT)
2415 ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0);
2417 ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0);
2420 int error, queue_ccb;
2424 error = ada_zone_cmd(periph, start_ccb, bp, &queue_ccb);
2426 || (queue_ccb == 0)) {
2427 biofinish(bp, NULL, error);
2428 xpt_release_ccb(start_ccb);
2434 start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
2435 start_ccb->ccb_h.flags |= CAM_UNLOCKED;
2437 start_ccb->ccb_h.ccb_bp = bp;
2438 softc->outstanding_cmds++;
2440 cam_periph_unlock(periph);
2441 xpt_action(start_ccb);
2442 cam_periph_lock(periph);
2444 /* May have more work to do, so ensure we stay scheduled */
2445 adaschedule(periph);
2448 case ADA_STATE_RAHEAD:
2449 case ADA_STATE_WCACHE:
2451 cam_fill_ataio(ataio,
2458 ada_default_timeout*1000);
2460 if (softc->state == ADA_STATE_RAHEAD) {
2461 ata_28bit_cmd(ataio, ATA_SETFEATURES, ADA_RA ?
2462 ATA_SF_ENAB_RCACHE : ATA_SF_DIS_RCACHE, 0, 0);
2463 start_ccb->ccb_h.ccb_state = ADA_CCB_RAHEAD;
2465 ata_28bit_cmd(ataio, ATA_SETFEATURES, ADA_WC ?
2466 ATA_SF_ENAB_WCACHE : ATA_SF_DIS_WCACHE, 0, 0);
2467 start_ccb->ccb_h.ccb_state = ADA_CCB_WCACHE;
2469 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2470 xpt_action(start_ccb);
2473 case ADA_STATE_LOGDIR:
2475 struct ata_gp_log_dir *log_dir;
2477 if ((softc->flags & ADA_FLAG_CAN_LOG) == 0) {
2478 adaprobedone(periph, start_ccb);
2482 log_dir = malloc(sizeof(*log_dir), M_ATADA, M_NOWAIT|M_ZERO);
2483 if (log_dir == NULL) {
2484 xpt_print(periph->path, "Couldn't malloc log_dir "
2486 softc->state = ADA_STATE_NORMAL;
2487 xpt_release_ccb(start_ccb);
2495 /*log_address*/ ATA_LOG_DIRECTORY,
2498 /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ?
2500 /*data_ptr*/ (uint8_t *)log_dir,
2501 /*dxfer_len*/sizeof(*log_dir),
2502 /*timeout*/ada_default_timeout*1000);
2504 start_ccb->ccb_h.ccb_state = ADA_CCB_LOGDIR;
2505 xpt_action(start_ccb);
2508 case ADA_STATE_IDDIR:
2510 struct ata_identify_log_pages *id_dir;
2512 id_dir = malloc(sizeof(*id_dir), M_ATADA, M_NOWAIT | M_ZERO);
2513 if (id_dir == NULL) {
2514 xpt_print(periph->path, "Couldn't malloc id_dir "
2516 adaprobedone(periph, start_ccb);
2523 /*log_address*/ ATA_IDENTIFY_DATA_LOG,
2524 /*page_number*/ ATA_IDL_PAGE_LIST,
2526 /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ?
2528 /*data_ptr*/ (uint8_t *)id_dir,
2529 /*dxfer_len*/ sizeof(*id_dir),
2530 /*timeout*/ada_default_timeout*1000);
2532 start_ccb->ccb_h.ccb_state = ADA_CCB_IDDIR;
2533 xpt_action(start_ccb);
2536 case ADA_STATE_SUP_CAP:
2538 struct ata_identify_log_sup_cap *sup_cap;
2540 sup_cap = malloc(sizeof(*sup_cap), M_ATADA, M_NOWAIT|M_ZERO);
2541 if (sup_cap == NULL) {
2542 xpt_print(periph->path, "Couldn't malloc sup_cap "
2544 adaprobedone(periph, start_ccb);
2551 /*log_address*/ ATA_IDENTIFY_DATA_LOG,
2552 /*page_number*/ ATA_IDL_SUP_CAP,
2554 /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ?
2556 /*data_ptr*/ (uint8_t *)sup_cap,
2557 /*dxfer_len*/ sizeof(*sup_cap),
2558 /*timeout*/ada_default_timeout*1000);
2560 start_ccb->ccb_h.ccb_state = ADA_CCB_SUP_CAP;
2561 xpt_action(start_ccb);
2564 case ADA_STATE_ZONE:
2566 struct ata_zoned_info_log *ata_zone;
2568 ata_zone = malloc(sizeof(*ata_zone), M_ATADA, M_NOWAIT|M_ZERO);
2569 if (ata_zone == NULL) {
2570 xpt_print(periph->path, "Couldn't malloc ata_zone "
2572 adaprobedone(periph, start_ccb);
2579 /*log_address*/ ATA_IDENTIFY_DATA_LOG,
2580 /*page_number*/ ATA_IDL_ZDI,
2582 /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ?
2584 /*data_ptr*/ (uint8_t *)ata_zone,
2585 /*dxfer_len*/ sizeof(*ata_zone),
2586 /*timeout*/ada_default_timeout*1000);
2588 start_ccb->ccb_h.ccb_state = ADA_CCB_ZONE;
2589 xpt_action(start_ccb);
2596 adaprobedone(struct cam_periph *periph, union ccb *ccb)
2598 struct ada_softc *softc;
2600 softc = (struct ada_softc *)periph->softc;
2603 xpt_release_ccb(ccb);
2605 softc->state = ADA_STATE_NORMAL;
2606 softc->flags |= ADA_FLAG_PROBED;
2607 adaschedule(periph);
2608 if ((softc->flags & ADA_FLAG_ANNOUNCED) == 0) {
2609 softc->flags |= ADA_FLAG_ANNOUNCED;
2610 cam_periph_unhold(periph);
2612 cam_periph_release_locked(periph);
2617 adazonedone(struct cam_periph *periph, union ccb *ccb)
2621 bp = (struct bio *)ccb->ccb_h.ccb_bp;
2623 switch (bp->bio_zone.zone_cmd) {
2624 case DISK_ZONE_OPEN:
2625 case DISK_ZONE_CLOSE:
2626 case DISK_ZONE_FINISH:
2629 case DISK_ZONE_REPORT_ZONES: {
2631 struct disk_zone_report *rep;
2632 struct scsi_report_zones_hdr *hdr;
2633 struct scsi_report_zones_desc *desc;
2634 struct disk_zone_rep_entry *entry;
2635 uint32_t hdr_len, num_avail;
2636 uint32_t num_to_fill, i;
2638 rep = &bp->bio_zone.zone_params.report;
2639 avail_len = ccb->ataio.dxfer_len - ccb->ataio.resid;
2641 * Note that bio_resid isn't normally used for zone
2642 * commands, but it is used by devstat_end_transaction_bio()
2643 * to determine how much data was transferred. Because
2644 * the size of the SCSI/ATA data structures is different
2645 * than the size of the BIO interface structures, the
2646 * amount of data actually transferred from the drive will
2647 * be different than the amount of data transferred to
2650 hdr = (struct scsi_report_zones_hdr *)ccb->ataio.data_ptr;
2651 if (avail_len < sizeof(*hdr)) {
2653 * Is there a better error than EIO here? We asked
2654 * for at least the header, and we got less than
2657 bp->bio_error = EIO;
2658 bp->bio_flags |= BIO_ERROR;
2659 bp->bio_resid = bp->bio_bcount;
2663 hdr_len = le32dec(hdr->length);
2665 rep->entries_available = hdr_len / sizeof(*desc);
2667 rep->entries_available = 0;
2669 * NOTE: using the same values for the BIO version of the
2670 * same field as the SCSI/ATA values. This means we could
2671 * get some additional values that aren't defined in bio.h
2672 * if more values of the same field are defined later.
2674 rep->header.same = hdr->byte4 & SRZ_SAME_MASK;
2675 rep->header.maximum_lba = le64dec(hdr->maximum_lba);
2677 * If the drive reports no entries that match the query,
2681 rep->entries_filled = 0;
2682 bp->bio_resid = bp->bio_bcount;
2686 num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc),
2687 hdr_len / sizeof(*desc));
2689 * If the drive didn't return any data, then we're done.
2691 if (num_avail == 0) {
2692 rep->entries_filled = 0;
2693 bp->bio_resid = bp->bio_bcount;
2697 num_to_fill = min(num_avail, rep->entries_allocated);
2699 * If the user didn't allocate any entries for us to fill,
2702 if (num_to_fill == 0) {
2703 rep->entries_filled = 0;
2704 bp->bio_resid = bp->bio_bcount;
2708 for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0];
2709 i < num_to_fill; i++, desc++, entry++) {
2711 * NOTE: we're mapping the values here directly
2712 * from the SCSI/ATA bit definitions to the bio.h
2713 * definitions. There is also a warning in
2714 * disk_zone.h, but the impact is that if
2715 * additional values are added in the SCSI/ATA
2716 * specs these will be visible to consumers of
2719 entry->zone_type = desc->zone_type & SRZ_TYPE_MASK;
2720 entry->zone_condition =
2721 (desc->zone_flags & SRZ_ZONE_COND_MASK) >>
2722 SRZ_ZONE_COND_SHIFT;
2723 entry->zone_flags |= desc->zone_flags &
2724 (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET);
2725 entry->zone_length = le64dec(desc->zone_length);
2726 entry->zone_start_lba = le64dec(desc->zone_start_lba);
2727 entry->write_pointer_lba =
2728 le64dec(desc->write_pointer_lba);
2730 rep->entries_filled = num_to_fill;
2732 * Note that this residual is accurate from the user's
2733 * standpoint, but the amount transferred isn't accurate
2734 * from the standpoint of what actually came back from the
2737 bp->bio_resid = bp->bio_bcount - (num_to_fill * sizeof(*entry));
2740 case DISK_ZONE_GET_PARAMS:
2743 * In theory we should not get a GET_PARAMS bio, since it
2744 * should be handled without queueing the command to the
2747 panic("%s: Invalid zone command %d", __func__,
2748 bp->bio_zone.zone_cmd);
2752 if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)
2753 free(ccb->ataio.data_ptr, M_ATADA);
2758 adadone(struct cam_periph *periph, union ccb *done_ccb)
2760 struct ada_softc *softc;
2761 struct ccb_ataio *ataio;
2762 struct cam_path *path;
2766 softc = (struct ada_softc *)periph->softc;
2767 ataio = &done_ccb->ataio;
2768 path = done_ccb->ccb_h.path;
2769 priority = done_ccb->ccb_h.pinfo.priority;
2771 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("adadone\n"));
2773 state = ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK;
2775 case ADA_CCB_BUFFER_IO:
2781 cam_periph_lock(periph);
2782 bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
2783 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2784 error = adaerror(done_ccb, 0, 0);
2785 if (error == ERESTART) {
2786 /* A retry was scheduled, so just return. */
2787 cam_periph_unlock(periph);
2790 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2791 cam_release_devq(path,
2795 /*getcount_only*/0);
2797 * If we get an error on an NCQ DSM TRIM, fall back
2798 * to a non-NCQ DSM TRIM forever. Please note that if
2799 * CAN_NCQ_TRIM is set, CAN_TRIM is necessarily set too.
2800 * However, for this one trim, we treat it as advisory
2801 * and return success up the stack.
2803 if (state == ADA_CCB_TRIM &&
2805 (softc->flags & ADA_FLAG_CAN_NCQ_TRIM) != 0) {
2806 softc->flags &= ~ADA_FLAG_CAN_NCQ_TRIM;
2808 adasetdeletemethod(softc);
2811 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2812 panic("REQ_CMP with QFRZN");
2816 bp->bio_error = error;
2818 bp->bio_resid = bp->bio_bcount;
2819 bp->bio_flags |= BIO_ERROR;
2821 if (bp->bio_cmd == BIO_ZONE)
2822 adazonedone(periph, done_ccb);
2823 else if (state == ADA_CCB_TRIM)
2826 bp->bio_resid = ataio->resid;
2828 if ((bp->bio_resid > 0)
2829 && (bp->bio_cmd != BIO_ZONE))
2830 bp->bio_flags |= BIO_ERROR;
2832 softc->outstanding_cmds--;
2833 if (softc->outstanding_cmds == 0)
2834 softc->flags |= ADA_FLAG_WAS_OTAG;
2837 * We need to call cam_iosched before we call biodone so that we
2838 * don't measure any activity that happens in the completion
2839 * routine, which in the case of sendfile can be quite
2840 * extensive. Release the periph refcount taken in adastart()
2843 cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb);
2844 xpt_release_ccb(done_ccb);
2845 KASSERT(softc->refcount >= 1, ("adadone softc %p refcount %d", softc, softc->refcount));
2847 if (state == ADA_CCB_TRIM) {
2848 TAILQ_HEAD(, bio) queue;
2852 TAILQ_CONCAT(&queue, &softc->trim_req.bps, bio_queue);
2854 * Normally, the xpt_release_ccb() above would make sure
2855 * that when we have more work to do, that work would
2856 * get kicked off. However, we specifically keep
2857 * trim_running set to 0 before the call above to allow
2858 * other I/O to progress when many BIO_DELETE requests
2859 * are pushed down. We set trim_running to 0 and call
2860 * daschedule again so that we don't stall if there are
2861 * no other I/Os pending apart from BIO_DELETEs.
2863 cam_iosched_trim_done(softc->cam_iosched);
2864 adaschedule(periph);
2865 cam_periph_unlock(periph);
2866 while ((bp1 = TAILQ_FIRST(&queue)) != NULL) {
2867 TAILQ_REMOVE(&queue, bp1, bio_queue);
2868 bp1->bio_error = error;
2870 bp1->bio_flags |= BIO_ERROR;
2871 bp1->bio_resid = bp1->bio_bcount;
2877 adaschedule(periph);
2878 cam_periph_unlock(periph);
2883 case ADA_CCB_RAHEAD:
2885 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2886 if (adaerror(done_ccb, 0, 0) == ERESTART) {
2887 /* Drop freeze taken due to CAM_DEV_QFREEZE */
2888 cam_release_devq(path, 0, 0, 0, FALSE);
2890 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
2891 cam_release_devq(path,
2895 /*getcount_only*/0);
2900 * Since our peripheral may be invalidated by an error
2901 * above or an external event, we must release our CCB
2902 * before releasing the reference on the peripheral.
2903 * The peripheral will only go away once the last reference
2904 * is removed, and we need it around for the CCB release
2908 xpt_release_ccb(done_ccb);
2909 softc->state = ADA_STATE_WCACHE;
2910 xpt_schedule(periph, priority);
2911 /* Drop freeze taken due to CAM_DEV_QFREEZE */
2912 cam_release_devq(path, 0, 0, 0, FALSE);
2915 case ADA_CCB_WCACHE:
2917 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2918 if (adaerror(done_ccb, 0, 0) == ERESTART) {
2919 /* Drop freeze taken due to CAM_DEV_QFREEZE */
2920 cam_release_devq(path, 0, 0, 0, FALSE);
2922 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
2923 cam_release_devq(path,
2927 /*getcount_only*/0);
2931 /* Drop freeze taken due to CAM_DEV_QFREEZE */
2932 cam_release_devq(path, 0, 0, 0, FALSE);
2934 if ((softc->flags & ADA_FLAG_CAN_LOG)
2935 && (softc->zone_mode != ADA_ZONE_NONE)) {
2936 xpt_release_ccb(done_ccb);
2937 softc->state = ADA_STATE_LOGDIR;
2938 xpt_schedule(periph, priority);
2940 adaprobedone(periph, done_ccb);
2944 case ADA_CCB_LOGDIR:
2948 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
2950 softc->valid_logdir_len = 0;
2951 bzero(&softc->ata_logdir, sizeof(softc->ata_logdir));
2952 softc->valid_logdir_len =
2953 ataio->dxfer_len - ataio->resid;
2954 if (softc->valid_logdir_len > 0)
2955 bcopy(ataio->data_ptr, &softc->ata_logdir,
2956 min(softc->valid_logdir_len,
2957 sizeof(softc->ata_logdir)));
2959 * Figure out whether the Identify Device log is
2960 * supported. The General Purpose log directory
2961 * has a header, and lists the number of pages
2962 * available for each GP log identified by the
2963 * offset into the list.
2965 if ((softc->valid_logdir_len >=
2966 ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t)))
2967 && (le16dec(softc->ata_logdir.header) ==
2968 ATA_GP_LOG_DIR_VERSION)
2969 && (le16dec(&softc->ata_logdir.num_pages[
2970 (ATA_IDENTIFY_DATA_LOG *
2971 sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){
2972 softc->flags |= ADA_FLAG_CAN_IDLOG;
2974 softc->flags &= ~ADA_FLAG_CAN_IDLOG;
2977 error = adaerror(done_ccb, CAM_RETRY_SELTO,
2978 SF_RETRY_UA|SF_NO_PRINT);
2979 if (error == ERESTART)
2981 else if (error != 0) {
2983 * If we can't get the ATA log directory,
2984 * then ATA logs are effectively not
2985 * supported even if the bit is set in the
2988 softc->flags &= ~(ADA_FLAG_CAN_LOG |
2989 ADA_FLAG_CAN_IDLOG);
2990 if ((done_ccb->ccb_h.status &
2991 CAM_DEV_QFRZN) != 0) {
2992 /* Don't wedge this device's queue */
2993 cam_release_devq(done_ccb->ccb_h.path,
2997 /*getcount_only*/0);
3004 free(ataio->data_ptr, M_ATADA);
3007 && (softc->flags & ADA_FLAG_CAN_IDLOG)) {
3008 softc->state = ADA_STATE_IDDIR;
3009 xpt_release_ccb(done_ccb);
3010 xpt_schedule(periph, priority);
3012 adaprobedone(periph, done_ccb);
3016 case ADA_CCB_IDDIR: {
3019 if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
3020 off_t entries_offset, max_entries;
3023 softc->valid_iddir_len = 0;
3024 bzero(&softc->ata_iddir, sizeof(softc->ata_iddir));
3025 softc->flags &= ~(ADA_FLAG_CAN_SUPCAP |
3027 softc->valid_iddir_len =
3028 ataio->dxfer_len - ataio->resid;
3029 if (softc->valid_iddir_len > 0)
3030 bcopy(ataio->data_ptr, &softc->ata_iddir,
3031 min(softc->valid_iddir_len,
3032 sizeof(softc->ata_iddir)));
3035 __offsetof(struct ata_identify_log_pages,entries);
3036 max_entries = softc->valid_iddir_len - entries_offset;
3037 if ((softc->valid_iddir_len > (entries_offset + 1))
3038 && (le64dec(softc->ata_iddir.header) ==
3040 && (softc->ata_iddir.entry_count > 0)) {
3043 num_entries = softc->ata_iddir.entry_count;
3044 num_entries = min(num_entries,
3045 softc->valid_iddir_len - entries_offset);
3046 for (i = 0; i < num_entries &&
3047 i < max_entries; i++) {
3048 if (softc->ata_iddir.entries[i] ==
3051 ADA_FLAG_CAN_SUPCAP;
3052 else if (softc->ata_iddir.entries[i]==
3058 ADA_FLAG_CAN_SUPCAP)
3065 error = adaerror(done_ccb, CAM_RETRY_SELTO,
3066 SF_RETRY_UA|SF_NO_PRINT);
3067 if (error == ERESTART)
3069 else if (error != 0) {
3071 * If we can't get the ATA Identify Data log
3072 * directory, then it effectively isn't
3073 * supported even if the ATA Log directory
3074 * a non-zero number of pages present for
3077 softc->flags &= ~ADA_FLAG_CAN_IDLOG;
3078 if ((done_ccb->ccb_h.status &
3079 CAM_DEV_QFRZN) != 0) {
3080 /* Don't wedge this device's queue */
3081 cam_release_devq(done_ccb->ccb_h.path,
3085 /*getcount_only*/0);
3090 free(ataio->data_ptr, M_ATADA);
3093 && (softc->flags & ADA_FLAG_CAN_SUPCAP)) {
3094 softc->state = ADA_STATE_SUP_CAP;
3095 xpt_release_ccb(done_ccb);
3096 xpt_schedule(periph, priority);
3098 adaprobedone(periph, done_ccb);
3101 case ADA_CCB_SUP_CAP: {
3104 if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
3107 struct ata_identify_log_sup_cap *sup_cap;
3110 sup_cap = (struct ata_identify_log_sup_cap *)
3112 valid_len = ataio->dxfer_len - ataio->resid;
3114 __offsetof(struct ata_identify_log_sup_cap,
3115 sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap);
3116 if (valid_len >= needed_size) {
3117 uint64_t zoned, zac_cap;
3119 zoned = le64dec(sup_cap->zoned_cap);
3120 if (zoned & ATA_ZONED_VALID) {
3122 * This should have already been
3123 * set, because this is also in the
3124 * ATA identify data.
3126 if ((zoned & ATA_ZONED_MASK) ==
3127 ATA_SUPPORT_ZONE_HOST_AWARE)
3129 ADA_ZONE_HOST_AWARE;
3130 else if ((zoned & ATA_ZONED_MASK) ==
3131 ATA_SUPPORT_ZONE_DEV_MANAGED)
3133 ADA_ZONE_DRIVE_MANAGED;
3136 zac_cap = le64dec(sup_cap->sup_zac_cap);
3137 if (zac_cap & ATA_SUP_ZAC_CAP_VALID) {
3138 if (zac_cap & ATA_REPORT_ZONES_SUP)
3139 softc->zone_flags |=
3140 ADA_ZONE_FLAG_RZ_SUP;
3141 if (zac_cap & ATA_ND_OPEN_ZONE_SUP)
3142 softc->zone_flags |=
3143 ADA_ZONE_FLAG_OPEN_SUP;
3144 if (zac_cap & ATA_ND_CLOSE_ZONE_SUP)
3145 softc->zone_flags |=
3146 ADA_ZONE_FLAG_CLOSE_SUP;
3147 if (zac_cap & ATA_ND_FINISH_ZONE_SUP)
3148 softc->zone_flags |=
3149 ADA_ZONE_FLAG_FINISH_SUP;
3150 if (zac_cap & ATA_ND_RWP_SUP)
3151 softc->zone_flags |=
3152 ADA_ZONE_FLAG_RWP_SUP;
3155 * This field was introduced in
3156 * ACS-4, r08 on April 28th, 2015.
3157 * If the drive firmware was written
3158 * to an earlier spec, it won't have
3159 * the field. So, assume all
3160 * commands are supported.
3162 softc->zone_flags |=
3163 ADA_ZONE_FLAG_SUP_MASK;
3167 error = adaerror(done_ccb, CAM_RETRY_SELTO,
3168 SF_RETRY_UA|SF_NO_PRINT);
3169 if (error == ERESTART)
3171 else if (error != 0) {
3173 * If we can't get the ATA Identify Data
3174 * Supported Capabilities page, clear the
3177 softc->flags &= ~ADA_FLAG_CAN_SUPCAP;
3179 * And clear zone capabilities.
3181 softc->zone_flags &= ~ADA_ZONE_FLAG_SUP_MASK;
3182 if ((done_ccb->ccb_h.status &
3183 CAM_DEV_QFRZN) != 0) {
3184 /* Don't wedge this device's queue */
3185 cam_release_devq(done_ccb->ccb_h.path,
3189 /*getcount_only*/0);
3194 free(ataio->data_ptr, M_ATADA);
3197 && (softc->flags & ADA_FLAG_CAN_ZONE)) {
3198 softc->state = ADA_STATE_ZONE;
3199 xpt_release_ccb(done_ccb);
3200 xpt_schedule(periph, priority);
3202 adaprobedone(periph, done_ccb);
3205 case ADA_CCB_ZONE: {
3208 if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
3209 struct ata_zoned_info_log *zi_log;
3213 zi_log = (struct ata_zoned_info_log *)ataio->data_ptr;
3215 valid_len = ataio->dxfer_len - ataio->resid;
3216 needed_size = __offsetof(struct ata_zoned_info_log,
3217 version_info) + 1 + sizeof(zi_log->version_info);
3218 if (valid_len >= needed_size) {
3221 tmpvar = le64dec(zi_log->zoned_cap);
3222 if (tmpvar & ATA_ZDI_CAP_VALID) {
3223 if (tmpvar & ATA_ZDI_CAP_URSWRZ)
3224 softc->zone_flags |=
3225 ADA_ZONE_FLAG_URSWRZ;
3227 softc->zone_flags &=
3228 ~ADA_ZONE_FLAG_URSWRZ;
3230 tmpvar = le64dec(zi_log->optimal_seq_zones);
3231 if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) {
3232 softc->zone_flags |=
3233 ADA_ZONE_FLAG_OPT_SEQ_SET;
3234 softc->optimal_seq_zones = (tmpvar &
3235 ATA_ZDI_OPT_SEQ_MASK);
3237 softc->zone_flags &=
3238 ~ADA_ZONE_FLAG_OPT_SEQ_SET;
3239 softc->optimal_seq_zones = 0;
3242 tmpvar =le64dec(zi_log->optimal_nonseq_zones);
3243 if (tmpvar & ATA_ZDI_OPT_NS_VALID) {
3244 softc->zone_flags |=
3245 ADA_ZONE_FLAG_OPT_NONSEQ_SET;
3246 softc->optimal_nonseq_zones =
3247 (tmpvar & ATA_ZDI_OPT_NS_MASK);
3249 softc->zone_flags &=
3250 ~ADA_ZONE_FLAG_OPT_NONSEQ_SET;
3251 softc->optimal_nonseq_zones = 0;
3254 tmpvar = le64dec(zi_log->max_seq_req_zones);
3255 if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) {
3256 softc->zone_flags |=
3257 ADA_ZONE_FLAG_MAX_SEQ_SET;
3258 softc->max_seq_zones =
3259 (tmpvar & ATA_ZDI_MAX_SEQ_MASK);
3261 softc->zone_flags &=
3262 ~ADA_ZONE_FLAG_MAX_SEQ_SET;
3263 softc->max_seq_zones = 0;
3267 error = adaerror(done_ccb, CAM_RETRY_SELTO,
3268 SF_RETRY_UA|SF_NO_PRINT);
3269 if (error == ERESTART)
3271 else if (error != 0) {
3272 softc->flags &= ~ADA_FLAG_CAN_ZONE;
3273 softc->flags &= ~ADA_ZONE_FLAG_SET_MASK;
3275 if ((done_ccb->ccb_h.status &
3276 CAM_DEV_QFRZN) != 0) {
3277 /* Don't wedge this device's queue */
3278 cam_release_devq(done_ccb->ccb_h.path,
3282 /*getcount_only*/0);
3286 free(ataio->data_ptr, M_ATADA);
3288 adaprobedone(periph, done_ccb);
3292 /* No-op. We're polling */
3297 xpt_release_ccb(done_ccb);
3301 adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
3304 struct ada_softc *softc;
3305 struct cam_periph *periph;
3307 periph = xpt_path_periph(ccb->ccb_h.path);
3308 softc = (struct ada_softc *)periph->softc;
3310 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
3311 case CAM_CMD_TIMEOUT:
3314 case CAM_REQ_ABORTED:
3315 case CAM_REQ_CMP_ERR:
3316 case CAM_REQ_TERMIO:
3317 case CAM_UNREC_HBA_ERROR:
3318 case CAM_DATA_RUN_ERR:
3319 case CAM_ATA_STATUS_ERROR:
3327 return(cam_periph_error(ccb, cam_flags, sense_flags));
3331 adasetgeom(struct ada_softc *softc, struct ccb_getdev *cgd)
3333 struct disk_params *dp = &softc->params;
3334 u_int64_t lbasize48;
3336 u_int maxio, d_flags;
3338 dp->secsize = ata_logical_sector_size(&cgd->ident_data);
3339 if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) &&
3340 cgd->ident_data.current_heads != 0 &&
3341 cgd->ident_data.current_sectors != 0) {
3342 dp->heads = cgd->ident_data.current_heads;
3343 dp->secs_per_track = cgd->ident_data.current_sectors;
3344 dp->cylinders = cgd->ident_data.cylinders;
3345 dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 |
3346 ((u_int32_t)cgd->ident_data.current_size_2 << 16);
3348 dp->heads = cgd->ident_data.heads;
3349 dp->secs_per_track = cgd->ident_data.sectors;
3350 dp->cylinders = cgd->ident_data.cylinders;
3351 dp->sectors = cgd->ident_data.cylinders *
3352 (u_int32_t)(dp->heads * dp->secs_per_track);
3354 lbasize = (u_int32_t)cgd->ident_data.lba_size_1 |
3355 ((u_int32_t)cgd->ident_data.lba_size_2 << 16);
3357 /* use the 28bit LBA size if valid or bigger than the CHS mapping */
3358 if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize)
3359 dp->sectors = lbasize;
3361 /* use the 48bit LBA size if valid */
3362 lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) |
3363 ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) |
3364 ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) |
3365 ((u_int64_t)cgd->ident_data.lba_size48_4 << 48);
3366 if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) &&
3367 lbasize48 > ATA_MAX_28BIT_LBA)
3368 dp->sectors = lbasize48;
3370 maxio = softc->cpi.maxio; /* Honor max I/O size of SIM */
3372 maxio = DFLTPHYS; /* traditional default */
3373 else if (maxio > MAXPHYS)
3374 maxio = MAXPHYS; /* for safety */
3375 if (softc->flags & ADA_FLAG_CAN_48BIT)
3376 maxio = min(maxio, 65536 * softc->params.secsize);
3377 else /* 28bit ATA command limit */
3378 maxio = min(maxio, 256 * softc->params.secsize);
3379 if (softc->quirks & ADA_Q_128KB)
3380 maxio = min(maxio, 128 * 1024);
3381 softc->disk->d_maxsize = maxio;
3382 d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE;
3383 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
3384 d_flags |= DISKFLAG_CANFLUSHCACHE;
3385 if (softc->flags & ADA_FLAG_CAN_TRIM) {
3386 d_flags |= DISKFLAG_CANDELETE;
3387 softc->disk->d_delmaxsize = softc->params.secsize *
3388 ATA_DSM_RANGE_MAX * softc->trim_max_ranges;
3389 } else if ((softc->flags & ADA_FLAG_CAN_CFA) &&
3390 !(softc->flags & ADA_FLAG_CAN_48BIT)) {
3391 d_flags |= DISKFLAG_CANDELETE;
3392 softc->disk->d_delmaxsize = 256 * softc->params.secsize;
3394 softc->disk->d_delmaxsize = maxio;
3395 if ((softc->cpi.hba_misc & PIM_UNMAPPED) != 0) {
3396 d_flags |= DISKFLAG_UNMAPPED_BIO;
3397 softc->unmappedio = 1;
3399 softc->disk->d_flags = d_flags;
3400 strlcpy(softc->disk->d_descr, cgd->ident_data.model,
3401 MIN(sizeof(softc->disk->d_descr), sizeof(cgd->ident_data.model)));
3402 strlcpy(softc->disk->d_ident, cgd->ident_data.serial,
3403 MIN(sizeof(softc->disk->d_ident), sizeof(cgd->ident_data.serial)));
3405 softc->disk->d_sectorsize = softc->params.secsize;
3406 softc->disk->d_mediasize = (off_t)softc->params.sectors *
3407 softc->params.secsize;
3408 if (ata_physical_sector_size(&cgd->ident_data) !=
3409 softc->params.secsize) {
3410 softc->disk->d_stripesize =
3411 ata_physical_sector_size(&cgd->ident_data);
3412 softc->disk->d_stripeoffset = (softc->disk->d_stripesize -
3413 ata_logical_sector_offset(&cgd->ident_data)) %
3414 softc->disk->d_stripesize;
3415 } else if (softc->quirks & ADA_Q_4K) {
3416 softc->disk->d_stripesize = 4096;
3417 softc->disk->d_stripeoffset = 0;
3419 softc->disk->d_fwsectors = softc->params.secs_per_track;
3420 softc->disk->d_fwheads = softc->params.heads;
3421 ata_disk_firmware_geom_adjust(softc->disk);
3422 softc->disk->d_rotation_rate = cgd->ident_data.media_rotation_rate;
3423 snprintf(softc->disk->d_attachment, sizeof(softc->disk->d_attachment),
3424 "%s%d", softc->cpi.dev_name, softc->cpi.unit_number);
3428 adasendorderedtag(void *arg)
3430 struct ada_softc *softc = arg;
3432 if (ada_send_ordered) {
3433 if (softc->outstanding_cmds > 0) {
3434 if ((softc->flags & ADA_FLAG_WAS_OTAG) == 0)
3435 softc->flags |= ADA_FLAG_NEED_OTAG;
3436 softc->flags &= ~ADA_FLAG_WAS_OTAG;
3439 /* Queue us up again */
3440 callout_reset(&softc->sendordered_c,
3441 (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL,
3442 adasendorderedtag, softc);
3446 * Step through all ADA peripheral drivers, and if the device is still open,
3447 * sync the disk cache to physical media.
3452 struct cam_periph *periph;
3453 struct ada_softc *softc;
3457 CAM_PERIPH_FOREACH(periph, &adadriver) {
3458 softc = (struct ada_softc *)periph->softc;
3459 if (SCHEDULER_STOPPED()) {
3460 /* If we paniced with the lock held, do not recurse. */
3461 if (!cam_periph_owned(periph) &&
3462 (softc->flags & ADA_FLAG_OPEN)) {
3463 adadump(softc->disk, NULL, 0, 0, 0);
3467 cam_periph_lock(periph);
3469 * We only sync the cache if the drive is still open, and
3470 * if the drive is capable of it..
3472 if (((softc->flags & ADA_FLAG_OPEN) == 0) ||
3473 (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) {
3474 cam_periph_unlock(periph);
3478 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
3479 cam_fill_ataio(&ccb->ataio,
3486 ada_default_timeout*1000);
3487 if (softc->flags & ADA_FLAG_CAN_48BIT)
3488 ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
3490 ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
3492 error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0,
3493 /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY,
3494 softc->disk->d_devstat);
3496 xpt_print(periph->path, "Synchronize cache failed\n");
3497 xpt_release_ccb(ccb);
3498 cam_periph_unlock(periph);
3503 adaspindown(uint8_t cmd, int flags)
3505 struct cam_periph *periph;
3506 struct ada_softc *softc;
3507 struct ccb_ataio local_ccb;
3510 CAM_PERIPH_FOREACH(periph, &adadriver) {
3511 /* If we paniced with lock held - not recurse here. */
3512 if (cam_periph_owned(periph))
3514 cam_periph_lock(periph);
3515 softc = (struct ada_softc *)periph->softc;
3517 * We only spin-down the drive if it is capable of it..
3519 if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
3520 cam_periph_unlock(periph);
3525 xpt_print(periph->path, "spin-down\n");
3527 memset(&local_ccb, 0, sizeof(local_ccb));
3528 xpt_setup_ccb(&local_ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
3529 local_ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
3531 cam_fill_ataio(&local_ccb,
3534 CAM_DIR_NONE | flags,
3538 ada_default_timeout*1000);
3539 ata_28bit_cmd(&local_ccb, cmd, 0, 0, 0);
3540 error = cam_periph_runccb((union ccb *)&local_ccb, adaerror,
3541 /*cam_flags*/0, /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY,
3542 softc->disk->d_devstat);
3544 xpt_print(periph->path, "Spin-down disk failed\n");
3545 cam_periph_unlock(periph);
3550 adashutdown(void *arg, int howto)
3557 * STANDBY IMMEDIATE saves any volatile data to the drive. It also spins
3558 * down hard drives. IDLE IMMEDIATE also saves the volatile data without
3559 * a spindown. We send the former when we expect to lose power soon. For
3560 * a warm boot, we send the latter to avoid a thundering herd of spinups
3561 * just after the kernel loads while probing. We have to do something to
3562 * flush the data because the BIOS in many systems resets the HBA
3563 * causing a COMINIT/COMRESET negotiation, which some drives interpret
3564 * as license to toss the volatile data, and others count as unclean
3565 * shutdown when in the Active PM state in SMART attributes.
3567 * adaspindown will ensure that we don't send this to a drive that
3568 * doesn't support it.
3570 if (ada_spindown_shutdown != 0) {
3571 how = (howto & (RB_HALT | RB_POWEROFF | RB_POWERCYCLE)) ?
3572 ATA_STANDBY_IMMEDIATE : ATA_IDLE_IMMEDIATE;
3573 adaspindown(how, 0);
3578 adasuspend(void *arg)
3583 * SLEEP also fushes any volatile data, like STANDBY IMEDIATE,
3584 * so we don't need to send it as well.
3586 if (ada_spindown_suspend != 0)
3587 adaspindown(ATA_SLEEP, CAM_DEV_QFREEZE);
3591 adaresume(void *arg)
3593 struct cam_periph *periph;
3594 struct ada_softc *softc;
3596 if (ada_spindown_suspend == 0)
3599 CAM_PERIPH_FOREACH(periph, &adadriver) {
3600 cam_periph_lock(periph);
3601 softc = (struct ada_softc *)periph->softc;
3603 * We only spin-down the drive if it is capable of it..
3605 if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
3606 cam_periph_unlock(periph);
3611 xpt_print(periph->path, "resume\n");
3614 * Drop freeze taken due to CAM_DEV_QFREEZE flag set on
3617 cam_release_devq(periph->path,
3621 /*getcount_only*/0);
3623 cam_periph_unlock(periph);
3627 #endif /* _KERNEL */