]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/cam/ctl/ctl.c
Import PCG-C into sys/contrib
[FreeBSD/FreeBSD.git] / sys / cam / ctl / ctl.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2003-2009 Silicon Graphics International Corp.
5  * Copyright (c) 2012 The FreeBSD Foundation
6  * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org>
7  * Copyright (c) 2017 Jakub Wojciech Klama <jceel@FreeBSD.org>
8  * Copyright (c) 2018 Marcelo Araujo <araujo@FreeBSD.org>
9  * All rights reserved.
10  *
11  * Portions of this software were developed by Edward Tomasz Napierala
12  * under sponsorship from the FreeBSD Foundation.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions, and the following disclaimer,
19  *    without modification.
20  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
21  *    substantially similar to the "NO WARRANTY" disclaimer below
22  *    ("Disclaimer") and any redistribution must be conditioned upon
23  *    including a substantially similar Disclaimer requirement for further
24  *    binary redistribution.
25  *
26  * NO WARRANTY
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGES.
38  *
39  * $Id$
40  */
41 /*
42  * CAM Target Layer, a SCSI device emulation subsystem.
43  *
44  * Author: Ken Merry <ken@FreeBSD.org>
45  */
46
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/ctype.h>
53 #include <sys/kernel.h>
54 #include <sys/types.h>
55 #include <sys/kthread.h>
56 #include <sys/bio.h>
57 #include <sys/fcntl.h>
58 #include <sys/lock.h>
59 #include <sys/module.h>
60 #include <sys/mutex.h>
61 #include <sys/condvar.h>
62 #include <sys/malloc.h>
63 #include <sys/conf.h>
64 #include <sys/ioccom.h>
65 #include <sys/queue.h>
66 #include <sys/sbuf.h>
67 #include <sys/smp.h>
68 #include <sys/endian.h>
69 #include <sys/proc.h>
70 #include <sys/sched.h>
71 #include <sys/sysctl.h>
72 #include <sys/nv.h>
73 #include <sys/dnv.h>
74 #include <vm/uma.h>
75
76 #include <cam/cam.h>
77 #include <cam/scsi/scsi_all.h>
78 #include <cam/scsi/scsi_cd.h>
79 #include <cam/scsi/scsi_da.h>
80 #include <cam/ctl/ctl_io.h>
81 #include <cam/ctl/ctl.h>
82 #include <cam/ctl/ctl_frontend.h>
83 #include <cam/ctl/ctl_util.h>
84 #include <cam/ctl/ctl_backend.h>
85 #include <cam/ctl/ctl_ioctl.h>
86 #include <cam/ctl/ctl_ha.h>
87 #include <cam/ctl/ctl_private.h>
88 #include <cam/ctl/ctl_debug.h>
89 #include <cam/ctl/ctl_scsi_all.h>
90 #include <cam/ctl/ctl_error.h>
91
92 struct ctl_softc *control_softc = NULL;
93
94 /*
95  * Template mode pages.
96  */
97
98 /*
99  * Note that these are default values only.  The actual values will be
100  * filled in when the user does a mode sense.
101  */
102 const static struct scsi_da_rw_recovery_page rw_er_page_default = {
103         /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE,
104         /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2,
105         /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE,
106         /*read_retry_count*/0,
107         /*correction_span*/0,
108         /*head_offset_count*/0,
109         /*data_strobe_offset_cnt*/0,
110         /*byte8*/SMS_RWER_LBPERE,
111         /*write_retry_count*/0,
112         /*reserved2*/0,
113         /*recovery_time_limit*/{0, 0},
114 };
115
116 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = {
117         /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE,
118         /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2,
119         /*byte3*/SMS_RWER_PER,
120         /*read_retry_count*/0,
121         /*correction_span*/0,
122         /*head_offset_count*/0,
123         /*data_strobe_offset_cnt*/0,
124         /*byte8*/SMS_RWER_LBPERE,
125         /*write_retry_count*/0,
126         /*reserved2*/0,
127         /*recovery_time_limit*/{0, 0},
128 };
129
130 const static struct scsi_format_page format_page_default = {
131         /*page_code*/SMS_FORMAT_DEVICE_PAGE,
132         /*page_length*/sizeof(struct scsi_format_page) - 2,
133         /*tracks_per_zone*/ {0, 0},
134         /*alt_sectors_per_zone*/ {0, 0},
135         /*alt_tracks_per_zone*/ {0, 0},
136         /*alt_tracks_per_lun*/ {0, 0},
137         /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff,
138                                 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff},
139         /*bytes_per_sector*/ {0, 0},
140         /*interleave*/ {0, 0},
141         /*track_skew*/ {0, 0},
142         /*cylinder_skew*/ {0, 0},
143         /*flags*/ SFP_HSEC,
144         /*reserved*/ {0, 0, 0}
145 };
146
147 const static struct scsi_format_page format_page_changeable = {
148         /*page_code*/SMS_FORMAT_DEVICE_PAGE,
149         /*page_length*/sizeof(struct scsi_format_page) - 2,
150         /*tracks_per_zone*/ {0, 0},
151         /*alt_sectors_per_zone*/ {0, 0},
152         /*alt_tracks_per_zone*/ {0, 0},
153         /*alt_tracks_per_lun*/ {0, 0},
154         /*sectors_per_track*/ {0, 0},
155         /*bytes_per_sector*/ {0, 0},
156         /*interleave*/ {0, 0},
157         /*track_skew*/ {0, 0},
158         /*cylinder_skew*/ {0, 0},
159         /*flags*/ 0,
160         /*reserved*/ {0, 0, 0}
161 };
162
163 const static struct scsi_rigid_disk_page rigid_disk_page_default = {
164         /*page_code*/SMS_RIGID_DISK_PAGE,
165         /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2,
166         /*cylinders*/ {0, 0, 0},
167         /*heads*/ CTL_DEFAULT_HEADS,
168         /*start_write_precomp*/ {0, 0, 0},
169         /*start_reduced_current*/ {0, 0, 0},
170         /*step_rate*/ {0, 0},
171         /*landing_zone_cylinder*/ {0, 0, 0},
172         /*rpl*/ SRDP_RPL_DISABLED,
173         /*rotational_offset*/ 0,
174         /*reserved1*/ 0,
175         /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff,
176                            CTL_DEFAULT_ROTATION_RATE & 0xff},
177         /*reserved2*/ {0, 0}
178 };
179
180 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = {
181         /*page_code*/SMS_RIGID_DISK_PAGE,
182         /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2,
183         /*cylinders*/ {0, 0, 0},
184         /*heads*/ 0,
185         /*start_write_precomp*/ {0, 0, 0},
186         /*start_reduced_current*/ {0, 0, 0},
187         /*step_rate*/ {0, 0},
188         /*landing_zone_cylinder*/ {0, 0, 0},
189         /*rpl*/ 0,
190         /*rotational_offset*/ 0,
191         /*reserved1*/ 0,
192         /*rotation_rate*/ {0, 0},
193         /*reserved2*/ {0, 0}
194 };
195
196 const static struct scsi_da_verify_recovery_page verify_er_page_default = {
197         /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE,
198         /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2,
199         /*byte3*/0,
200         /*read_retry_count*/0,
201         /*reserved*/{ 0, 0, 0, 0, 0, 0 },
202         /*recovery_time_limit*/{0, 0},
203 };
204
205 const static struct scsi_da_verify_recovery_page verify_er_page_changeable = {
206         /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE,
207         /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2,
208         /*byte3*/SMS_VER_PER,
209         /*read_retry_count*/0,
210         /*reserved*/{ 0, 0, 0, 0, 0, 0 },
211         /*recovery_time_limit*/{0, 0},
212 };
213
214 const static struct scsi_caching_page caching_page_default = {
215         /*page_code*/SMS_CACHING_PAGE,
216         /*page_length*/sizeof(struct scsi_caching_page) - 2,
217         /*flags1*/ SCP_DISC | SCP_WCE,
218         /*ret_priority*/ 0,
219         /*disable_pf_transfer_len*/ {0xff, 0xff},
220         /*min_prefetch*/ {0, 0},
221         /*max_prefetch*/ {0xff, 0xff},
222         /*max_pf_ceiling*/ {0xff, 0xff},
223         /*flags2*/ 0,
224         /*cache_segments*/ 0,
225         /*cache_seg_size*/ {0, 0},
226         /*reserved*/ 0,
227         /*non_cache_seg_size*/ {0, 0, 0}
228 };
229
230 const static struct scsi_caching_page caching_page_changeable = {
231         /*page_code*/SMS_CACHING_PAGE,
232         /*page_length*/sizeof(struct scsi_caching_page) - 2,
233         /*flags1*/ SCP_WCE | SCP_RCD,
234         /*ret_priority*/ 0,
235         /*disable_pf_transfer_len*/ {0, 0},
236         /*min_prefetch*/ {0, 0},
237         /*max_prefetch*/ {0, 0},
238         /*max_pf_ceiling*/ {0, 0},
239         /*flags2*/ 0,
240         /*cache_segments*/ 0,
241         /*cache_seg_size*/ {0, 0},
242         /*reserved*/ 0,
243         /*non_cache_seg_size*/ {0, 0, 0}
244 };
245
246 const static struct scsi_control_page control_page_default = {
247         /*page_code*/SMS_CONTROL_MODE_PAGE,
248         /*page_length*/sizeof(struct scsi_control_page) - 2,
249         /*rlec*/0,
250         /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED,
251         /*eca_and_aen*/0,
252         /*flags4*/SCP_TAS,
253         /*aen_holdoff_period*/{0, 0},
254         /*busy_timeout_period*/{0, 0},
255         /*extended_selftest_completion_time*/{0, 0}
256 };
257
258 const static struct scsi_control_page control_page_changeable = {
259         /*page_code*/SMS_CONTROL_MODE_PAGE,
260         /*page_length*/sizeof(struct scsi_control_page) - 2,
261         /*rlec*/SCP_DSENSE,
262         /*queue_flags*/SCP_QUEUE_ALG_MASK | SCP_NUAR,
263         /*eca_and_aen*/SCP_SWP,
264         /*flags4*/0,
265         /*aen_holdoff_period*/{0, 0},
266         /*busy_timeout_period*/{0, 0},
267         /*extended_selftest_completion_time*/{0, 0}
268 };
269
270 #define CTL_CEM_LEN     (sizeof(struct scsi_control_ext_page) - 4)
271
272 const static struct scsi_control_ext_page control_ext_page_default = {
273         /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF,
274         /*subpage_code*/0x01,
275         /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN},
276         /*flags*/0,
277         /*prio*/0,
278         /*max_sense*/0
279 };
280
281 const static struct scsi_control_ext_page control_ext_page_changeable = {
282         /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF,
283         /*subpage_code*/0x01,
284         /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN},
285         /*flags*/0,
286         /*prio*/0,
287         /*max_sense*/0xff
288 };
289
290 const static struct scsi_info_exceptions_page ie_page_default = {
291         /*page_code*/SMS_INFO_EXCEPTIONS_PAGE,
292         /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2,
293         /*info_flags*/SIEP_FLAGS_EWASC,
294         /*mrie*/SIEP_MRIE_NO,
295         /*interval_timer*/{0, 0, 0, 0},
296         /*report_count*/{0, 0, 0, 1}
297 };
298
299 const static struct scsi_info_exceptions_page ie_page_changeable = {
300         /*page_code*/SMS_INFO_EXCEPTIONS_PAGE,
301         /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2,
302         /*info_flags*/SIEP_FLAGS_EWASC | SIEP_FLAGS_DEXCPT | SIEP_FLAGS_TEST |
303             SIEP_FLAGS_LOGERR,
304         /*mrie*/0x0f,
305         /*interval_timer*/{0xff, 0xff, 0xff, 0xff},
306         /*report_count*/{0xff, 0xff, 0xff, 0xff}
307 };
308
309 #define CTL_LBPM_LEN    (sizeof(struct ctl_logical_block_provisioning_page) - 4)
310
311 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{
312         /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF,
313         /*subpage_code*/0x02,
314         /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN},
315         /*flags*/0,
316         /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
317         /*descr*/{}},
318         {{/*flags*/0,
319           /*resource*/0x01,
320           /*reserved*/{0, 0},
321           /*count*/{0, 0, 0, 0}},
322          {/*flags*/0,
323           /*resource*/0x02,
324           /*reserved*/{0, 0},
325           /*count*/{0, 0, 0, 0}},
326          {/*flags*/0,
327           /*resource*/0xf1,
328           /*reserved*/{0, 0},
329           /*count*/{0, 0, 0, 0}},
330          {/*flags*/0,
331           /*resource*/0xf2,
332           /*reserved*/{0, 0},
333           /*count*/{0, 0, 0, 0}}
334         }
335 };
336
337 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{
338         /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF,
339         /*subpage_code*/0x02,
340         /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN},
341         /*flags*/SLBPP_SITUA,
342         /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
343         /*descr*/{}},
344         {{/*flags*/0,
345           /*resource*/0,
346           /*reserved*/{0, 0},
347           /*count*/{0, 0, 0, 0}},
348          {/*flags*/0,
349           /*resource*/0,
350           /*reserved*/{0, 0},
351           /*count*/{0, 0, 0, 0}},
352          {/*flags*/0,
353           /*resource*/0,
354           /*reserved*/{0, 0},
355           /*count*/{0, 0, 0, 0}},
356          {/*flags*/0,
357           /*resource*/0,
358           /*reserved*/{0, 0},
359           /*count*/{0, 0, 0, 0}}
360         }
361 };
362
363 const static struct scsi_cddvd_capabilities_page cddvd_page_default = {
364         /*page_code*/SMS_CDDVD_CAPS_PAGE,
365         /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2,
366         /*caps1*/0x3f,
367         /*caps2*/0x00,
368         /*caps3*/0xf0,
369         /*caps4*/0x00,
370         /*caps5*/0x29,
371         /*caps6*/0x00,
372         /*obsolete*/{0, 0},
373         /*nvol_levels*/{0, 0},
374         /*buffer_size*/{8, 0},
375         /*obsolete2*/{0, 0},
376         /*reserved*/0,
377         /*digital*/0,
378         /*obsolete3*/0,
379         /*copy_management*/0,
380         /*reserved2*/0,
381         /*rotation_control*/0,
382         /*cur_write_speed*/0,
383         /*num_speed_descr*/0,
384 };
385
386 const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = {
387         /*page_code*/SMS_CDDVD_CAPS_PAGE,
388         /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2,
389         /*caps1*/0,
390         /*caps2*/0,
391         /*caps3*/0,
392         /*caps4*/0,
393         /*caps5*/0,
394         /*caps6*/0,
395         /*obsolete*/{0, 0},
396         /*nvol_levels*/{0, 0},
397         /*buffer_size*/{0, 0},
398         /*obsolete2*/{0, 0},
399         /*reserved*/0,
400         /*digital*/0,
401         /*obsolete3*/0,
402         /*copy_management*/0,
403         /*reserved2*/0,
404         /*rotation_control*/0,
405         /*cur_write_speed*/0,
406         /*num_speed_descr*/0,
407 };
408
409 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
410     "CAM Target Layer");
411 static int worker_threads = -1;
412 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN,
413     &worker_threads, 1, "Number of worker threads");
414 static int ctl_debug = CTL_DEBUG_NONE;
415 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN,
416     &ctl_debug, 0, "Enabled debug flags");
417 static int ctl_lun_map_size = 1024;
418 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN,
419     &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)");
420 #ifdef  CTL_TIME_IO
421 static int ctl_time_io_secs = CTL_TIME_IO_DEFAULT_SECS;
422 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, time_io_secs, CTLFLAG_RWTUN,
423     &ctl_time_io_secs, 0, "Log requests taking more seconds");
424 #endif
425
426 /*
427  * Maximum number of LUNs we support.  MUST be a power of 2.
428  */
429 #define CTL_DEFAULT_MAX_LUNS    1024
430 static int ctl_max_luns = CTL_DEFAULT_MAX_LUNS;
431 TUNABLE_INT("kern.cam.ctl.max_luns", &ctl_max_luns);
432 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_luns, CTLFLAG_RDTUN,
433     &ctl_max_luns, CTL_DEFAULT_MAX_LUNS, "Maximum number of LUNs");
434
435 /*
436  * Maximum number of ports registered at one time.
437  */
438 #define CTL_DEFAULT_MAX_PORTS           256
439 static int ctl_max_ports = CTL_DEFAULT_MAX_PORTS;
440 TUNABLE_INT("kern.cam.ctl.max_ports", &ctl_max_ports);
441 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_ports, CTLFLAG_RDTUN,
442     &ctl_max_ports, CTL_DEFAULT_MAX_LUNS, "Maximum number of ports");
443
444 /*
445  * Maximum number of initiators we support.
446  */
447 #define CTL_MAX_INITIATORS      (CTL_MAX_INIT_PER_PORT * ctl_max_ports)
448
449 /*
450  * Supported pages (0x00), Serial number (0x80), Device ID (0x83),
451  * Extended INQUIRY Data (0x86), Mode Page Policy (0x87),
452  * SCSI Ports (0x88), Third-party Copy (0x8F), SCSI Feature Sets (0x92),
453  * Block limits (0xB0), Block Device Characteristics (0xB1) and
454  * Logical Block Provisioning (0xB2)
455  */
456 #define SCSI_EVPD_NUM_SUPPORTED_PAGES   11
457
458 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event,
459                                   int param);
460 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest);
461 static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest);
462 static int ctl_init(void);
463 static int ctl_shutdown(void);
464 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td);
465 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td);
466 static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio);
467 static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
468                               struct ctl_ooa *ooa_hdr,
469                               struct ctl_ooa_entry *kern_entries);
470 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
471                      struct thread *td);
472 static int ctl_enable_lun(struct ctl_lun *lun);
473 static int ctl_disable_lun(struct ctl_lun *lun);
474 static int ctl_free_lun(struct ctl_lun *lun);
475
476 static int ctl_do_mode_select(union ctl_io *io);
477 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun,
478                            uint64_t res_key, uint64_t sa_res_key,
479                            uint8_t type, uint32_t residx,
480                            struct ctl_scsiio *ctsio,
481                            struct scsi_per_res_out *cdb,
482                            struct scsi_per_res_out_parms* param);
483 static void ctl_pro_preempt_other(struct ctl_lun *lun,
484                                   union ctl_ha_msg *msg);
485 static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io);
486 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len);
487 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len);
488 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len);
489 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len);
490 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len);
491 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio,
492                                          int alloc_len);
493 static int ctl_inquiry_evpd_sfs(struct ctl_scsiio *ctsio, int alloc_len);
494 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio,
495                                          int alloc_len);
496 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len);
497 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len);
498 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio);
499 static int ctl_inquiry_std(struct ctl_scsiio *ctsio);
500 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len);
501 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2,
502     bool seq);
503 static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2);
504 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun,
505     union ctl_io *pending_io, union ctl_io *ooa_io);
506 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
507                                 union ctl_io **starting_io);
508 static void ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io,
509     bool skip);
510 static void ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *io,
511     bool skip);
512 static int ctl_scsiio_lun_check(struct ctl_lun *lun,
513                                 const struct ctl_cmd_entry *entry,
514                                 struct ctl_scsiio *ctsio);
515 static void ctl_failover_lun(union ctl_io *io);
516 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc,
517                                struct ctl_scsiio *ctsio);
518 static int ctl_scsiio(struct ctl_scsiio *ctsio);
519
520 static int ctl_target_reset(union ctl_io *io);
521 static void ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx,
522                          ctl_ua_type ua_type);
523 static int ctl_lun_reset(union ctl_io *io);
524 static int ctl_abort_task(union ctl_io *io);
525 static int ctl_abort_task_set(union ctl_io *io);
526 static int ctl_query_task(union ctl_io *io, int task_set);
527 static void ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx,
528                               ctl_ua_type ua_type);
529 static int ctl_i_t_nexus_reset(union ctl_io *io);
530 static int ctl_query_async_event(union ctl_io *io);
531 static void ctl_run_task(union ctl_io *io);
532 #ifdef CTL_IO_DELAY
533 static void ctl_datamove_timer_wakeup(void *arg);
534 static void ctl_done_timer_wakeup(void *arg);
535 #endif /* CTL_IO_DELAY */
536
537 static void ctl_send_datamove_done(union ctl_io *io, int have_lock);
538 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq);
539 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io);
540 static void ctl_datamove_remote_write(union ctl_io *io);
541 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io);
542 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq);
543 static int ctl_datamove_remote_sgl_setup(union ctl_io *io);
544 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
545                                     ctl_ha_dt_cb callback);
546 static void ctl_datamove_remote_read(union ctl_io *io);
547 static void ctl_datamove_remote(union ctl_io *io);
548 static void ctl_process_done(union ctl_io *io);
549 static void ctl_thresh_thread(void *arg);
550 static void ctl_work_thread(void *arg);
551 static void ctl_enqueue_incoming(union ctl_io *io);
552 static void ctl_enqueue_rtr(union ctl_io *io);
553 static void ctl_enqueue_done(union ctl_io *io);
554 static void ctl_enqueue_isc(union ctl_io *io);
555 static const struct ctl_cmd_entry *
556     ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa);
557 static const struct ctl_cmd_entry *
558     ctl_validate_command(struct ctl_scsiio *ctsio);
559 static int ctl_cmd_applicable(uint8_t lun_type,
560     const struct ctl_cmd_entry *entry);
561 static int ctl_ha_init(void);
562 static int ctl_ha_shutdown(void);
563
564 static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx);
565 static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx);
566 static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx);
567 static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key);
568
569 /*
570  * Load the serialization table.  This isn't very pretty, but is probably
571  * the easiest way to do it.
572  */
573 #include "ctl_ser_table.c"
574
575 /*
576  * We only need to define open, close and ioctl routines for this driver.
577  */
578 static struct cdevsw ctl_cdevsw = {
579         .d_version =    D_VERSION,
580         .d_flags =      0,
581         .d_open =       ctl_open,
582         .d_close =      ctl_close,
583         .d_ioctl =      ctl_ioctl,
584         .d_name =       "ctl",
585 };
586
587
588 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL");
589
590 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *);
591
592 static moduledata_t ctl_moduledata = {
593         "ctl",
594         ctl_module_event_handler,
595         NULL
596 };
597
598 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD);
599 MODULE_VERSION(ctl, 1);
600
601 static struct ctl_frontend ha_frontend =
602 {
603         .name = "ha",
604         .init = ctl_ha_init,
605         .shutdown = ctl_ha_shutdown,
606 };
607
608 static int
609 ctl_ha_init(void)
610 {
611         struct ctl_softc *softc = control_softc;
612
613         if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC,
614                             &softc->othersc_pool) != 0)
615                 return (ENOMEM);
616         if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) {
617                 ctl_pool_free(softc->othersc_pool);
618                 return (EIO);
619         }
620         if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler)
621             != CTL_HA_STATUS_SUCCESS) {
622                 ctl_ha_msg_destroy(softc);
623                 ctl_pool_free(softc->othersc_pool);
624                 return (EIO);
625         }
626         return (0);
627 };
628
629 static int
630 ctl_ha_shutdown(void)
631 {
632         struct ctl_softc *softc = control_softc;
633         struct ctl_port *port;
634
635         ctl_ha_msg_shutdown(softc);
636         if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) != CTL_HA_STATUS_SUCCESS)
637                 return (EIO);
638         if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS)
639                 return (EIO);
640         ctl_pool_free(softc->othersc_pool);
641         while ((port = STAILQ_FIRST(&ha_frontend.port_list)) != NULL) {
642                 ctl_port_deregister(port);
643                 free(port->port_name, M_CTL);
644                 free(port, M_CTL);
645         }
646         return (0);
647 };
648
649 static void
650 ctl_ha_datamove(union ctl_io *io)
651 {
652         struct ctl_lun *lun = CTL_LUN(io);
653         struct ctl_sg_entry *sgl;
654         union ctl_ha_msg msg;
655         uint32_t sg_entries_sent;
656         int do_sg_copy, i, j;
657
658         memset(&msg.dt, 0, sizeof(msg.dt));
659         msg.hdr.msg_type = CTL_MSG_DATAMOVE;
660         msg.hdr.original_sc = io->io_hdr.remote_io;
661         msg.hdr.serializing_sc = io;
662         msg.hdr.nexus = io->io_hdr.nexus;
663         msg.hdr.status = io->io_hdr.status;
664         msg.dt.flags = io->io_hdr.flags;
665
666         /*
667          * We convert everything into a S/G list here.  We can't
668          * pass by reference, only by value between controllers.
669          * So we can't pass a pointer to the S/G list, only as many
670          * S/G entries as we can fit in here.  If it's possible for
671          * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries,
672          * then we need to break this up into multiple transfers.
673          */
674         if (io->scsiio.kern_sg_entries == 0) {
675                 msg.dt.kern_sg_entries = 1;
676 #if 0
677                 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
678                         msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr;
679                 } else {
680                         /* XXX KDM use busdma here! */
681                         msg.dt.sg_list[0].addr =
682                             (void *)vtophys(io->scsiio.kern_data_ptr);
683                 }
684 #else
685                 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0,
686                     ("HA does not support BUS_ADDR"));
687                 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr;
688 #endif
689                 msg.dt.sg_list[0].len = io->scsiio.kern_data_len;
690                 do_sg_copy = 0;
691         } else {
692                 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries;
693                 do_sg_copy = 1;
694         }
695
696         msg.dt.kern_data_len = io->scsiio.kern_data_len;
697         msg.dt.kern_total_len = io->scsiio.kern_total_len;
698         msg.dt.kern_data_resid = io->scsiio.kern_data_resid;
699         msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset;
700         msg.dt.sg_sequence = 0;
701
702         /*
703          * Loop until we've sent all of the S/G entries.  On the
704          * other end, we'll recompose these S/G entries into one
705          * contiguous list before processing.
706          */
707         for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries;
708             msg.dt.sg_sequence++) {
709                 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) /
710                     sizeof(msg.dt.sg_list[0])),
711                     msg.dt.kern_sg_entries - sg_entries_sent);
712                 if (do_sg_copy != 0) {
713                         sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
714                         for (i = sg_entries_sent, j = 0;
715                              i < msg.dt.cur_sg_entries; i++, j++) {
716 #if 0
717                                 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
718                                         msg.dt.sg_list[j].addr = sgl[i].addr;
719                                 } else {
720                                         /* XXX KDM use busdma here! */
721                                         msg.dt.sg_list[j].addr =
722                                             (void *)vtophys(sgl[i].addr);
723                                 }
724 #else
725                                 KASSERT((io->io_hdr.flags &
726                                     CTL_FLAG_BUS_ADDR) == 0,
727                                     ("HA does not support BUS_ADDR"));
728                                 msg.dt.sg_list[j].addr = sgl[i].addr;
729 #endif
730                                 msg.dt.sg_list[j].len = sgl[i].len;
731                         }
732                 }
733
734                 sg_entries_sent += msg.dt.cur_sg_entries;
735                 msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries);
736                 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
737                     sizeof(msg.dt) - sizeof(msg.dt.sg_list) +
738                     sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries,
739                     M_WAITOK) > CTL_HA_STATUS_SUCCESS) {
740                         io->io_hdr.port_status = 31341;
741                         io->scsiio.be_move_done(io);
742                         return;
743                 }
744                 msg.dt.sent_sg_entries = sg_entries_sent;
745         }
746
747         /*
748          * Officially handover the request from us to peer.
749          * If failover has just happened, then we must return error.
750          * If failover happen just after, then it is not our problem.
751          */
752         if (lun)
753                 mtx_lock(&lun->lun_lock);
754         if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
755                 if (lun)
756                         mtx_unlock(&lun->lun_lock);
757                 io->io_hdr.port_status = 31342;
758                 io->scsiio.be_move_done(io);
759                 return;
760         }
761         io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
762         io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
763         if (lun)
764                 mtx_unlock(&lun->lun_lock);
765 }
766
767 static void
768 ctl_ha_done(union ctl_io *io)
769 {
770         union ctl_ha_msg msg;
771
772         if (io->io_hdr.io_type == CTL_IO_SCSI) {
773                 memset(&msg, 0, sizeof(msg));
774                 msg.hdr.msg_type = CTL_MSG_FINISH_IO;
775                 msg.hdr.original_sc = io->io_hdr.remote_io;
776                 msg.hdr.nexus = io->io_hdr.nexus;
777                 msg.hdr.status = io->io_hdr.status;
778                 msg.scsi.scsi_status = io->scsiio.scsi_status;
779                 msg.scsi.tag_num = io->scsiio.tag_num;
780                 msg.scsi.tag_type = io->scsiio.tag_type;
781                 msg.scsi.sense_len = io->scsiio.sense_len;
782                 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
783                     io->scsiio.sense_len);
784                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
785                     sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) +
786                     msg.scsi.sense_len, M_WAITOK);
787         }
788         ctl_free_io(io);
789 }
790
791 static void
792 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc,
793                             union ctl_ha_msg *msg_info)
794 {
795         struct ctl_scsiio *ctsio;
796
797         if (msg_info->hdr.original_sc == NULL) {
798                 printf("%s: original_sc == NULL!\n", __func__);
799                 /* XXX KDM now what? */
800                 return;
801         }
802
803         ctsio = &msg_info->hdr.original_sc->scsiio;
804         ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
805         ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
806         ctsio->io_hdr.status = msg_info->hdr.status;
807         ctsio->scsi_status = msg_info->scsi.scsi_status;
808         ctsio->sense_len = msg_info->scsi.sense_len;
809         memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data,
810                msg_info->scsi.sense_len);
811         ctl_enqueue_isc((union ctl_io *)ctsio);
812 }
813
814 static void
815 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc,
816                                 union ctl_ha_msg *msg_info)
817 {
818         struct ctl_scsiio *ctsio;
819
820         if (msg_info->hdr.serializing_sc == NULL) {
821                 printf("%s: serializing_sc == NULL!\n", __func__);
822                 /* XXX KDM now what? */
823                 return;
824         }
825
826         ctsio = &msg_info->hdr.serializing_sc->scsiio;
827         ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
828         ctl_enqueue_isc((union ctl_io *)ctsio);
829 }
830
831 void
832 ctl_isc_announce_lun(struct ctl_lun *lun)
833 {
834         struct ctl_softc *softc = lun->ctl_softc;
835         union ctl_ha_msg *msg;
836         struct ctl_ha_msg_lun_pr_key pr_key;
837         int i, k;
838
839         if (softc->ha_link != CTL_HA_LINK_ONLINE)
840                 return;
841         mtx_lock(&lun->lun_lock);
842         i = sizeof(msg->lun);
843         if (lun->lun_devid)
844                 i += lun->lun_devid->len;
845         i += sizeof(pr_key) * lun->pr_key_count;
846 alloc:
847         mtx_unlock(&lun->lun_lock);
848         msg = malloc(i, M_CTL, M_WAITOK);
849         mtx_lock(&lun->lun_lock);
850         k = sizeof(msg->lun);
851         if (lun->lun_devid)
852                 k += lun->lun_devid->len;
853         k += sizeof(pr_key) * lun->pr_key_count;
854         if (i < k) {
855                 free(msg, M_CTL);
856                 i = k;
857                 goto alloc;
858         }
859         bzero(&msg->lun, sizeof(msg->lun));
860         msg->hdr.msg_type = CTL_MSG_LUN_SYNC;
861         msg->hdr.nexus.targ_lun = lun->lun;
862         msg->hdr.nexus.targ_mapped_lun = lun->lun;
863         msg->lun.flags = lun->flags;
864         msg->lun.pr_generation = lun->pr_generation;
865         msg->lun.pr_res_idx = lun->pr_res_idx;
866         msg->lun.pr_res_type = lun->pr_res_type;
867         msg->lun.pr_key_count = lun->pr_key_count;
868         i = 0;
869         if (lun->lun_devid) {
870                 msg->lun.lun_devid_len = lun->lun_devid->len;
871                 memcpy(&msg->lun.data[i], lun->lun_devid->data,
872                     msg->lun.lun_devid_len);
873                 i += msg->lun.lun_devid_len;
874         }
875         for (k = 0; k < CTL_MAX_INITIATORS; k++) {
876                 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0)
877                         continue;
878                 pr_key.pr_iid = k;
879                 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key));
880                 i += sizeof(pr_key);
881         }
882         mtx_unlock(&lun->lun_lock);
883         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i,
884             M_WAITOK);
885         free(msg, M_CTL);
886
887         if (lun->flags & CTL_LUN_PRIMARY_SC) {
888                 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
889                         ctl_isc_announce_mode(lun, -1,
890                             lun->mode_pages.index[i].page_code & SMPH_PC_MASK,
891                             lun->mode_pages.index[i].subpage);
892                 }
893         }
894 }
895
896 void
897 ctl_isc_announce_port(struct ctl_port *port)
898 {
899         struct ctl_softc *softc = port->ctl_softc;
900         union ctl_ha_msg *msg;
901         int i;
902
903         if (port->targ_port < softc->port_min ||
904             port->targ_port >= softc->port_max ||
905             softc->ha_link != CTL_HA_LINK_ONLINE)
906                 return;
907         i = sizeof(msg->port) + strlen(port->port_name) + 1;
908         if (port->lun_map)
909                 i += port->lun_map_size * sizeof(uint32_t);
910         if (port->port_devid)
911                 i += port->port_devid->len;
912         if (port->target_devid)
913                 i += port->target_devid->len;
914         if (port->init_devid)
915                 i += port->init_devid->len;
916         msg = malloc(i, M_CTL, M_WAITOK);
917         bzero(&msg->port, sizeof(msg->port));
918         msg->hdr.msg_type = CTL_MSG_PORT_SYNC;
919         msg->hdr.nexus.targ_port = port->targ_port;
920         msg->port.port_type = port->port_type;
921         msg->port.physical_port = port->physical_port;
922         msg->port.virtual_port = port->virtual_port;
923         msg->port.status = port->status;
924         i = 0;
925         msg->port.name_len = sprintf(&msg->port.data[i],
926             "%d:%s", softc->ha_id, port->port_name) + 1;
927         i += msg->port.name_len;
928         if (port->lun_map) {
929                 msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t);
930                 memcpy(&msg->port.data[i], port->lun_map,
931                     msg->port.lun_map_len);
932                 i += msg->port.lun_map_len;
933         }
934         if (port->port_devid) {
935                 msg->port.port_devid_len = port->port_devid->len;
936                 memcpy(&msg->port.data[i], port->port_devid->data,
937                     msg->port.port_devid_len);
938                 i += msg->port.port_devid_len;
939         }
940         if (port->target_devid) {
941                 msg->port.target_devid_len = port->target_devid->len;
942                 memcpy(&msg->port.data[i], port->target_devid->data,
943                     msg->port.target_devid_len);
944                 i += msg->port.target_devid_len;
945         }
946         if (port->init_devid) {
947                 msg->port.init_devid_len = port->init_devid->len;
948                 memcpy(&msg->port.data[i], port->init_devid->data,
949                     msg->port.init_devid_len);
950                 i += msg->port.init_devid_len;
951         }
952         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i,
953             M_WAITOK);
954         free(msg, M_CTL);
955 }
956
957 void
958 ctl_isc_announce_iid(struct ctl_port *port, int iid)
959 {
960         struct ctl_softc *softc = port->ctl_softc;
961         union ctl_ha_msg *msg;
962         int i, l;
963
964         if (port->targ_port < softc->port_min ||
965             port->targ_port >= softc->port_max ||
966             softc->ha_link != CTL_HA_LINK_ONLINE)
967                 return;
968         mtx_lock(&softc->ctl_lock);
969         i = sizeof(msg->iid);
970         l = 0;
971         if (port->wwpn_iid[iid].name)
972                 l = strlen(port->wwpn_iid[iid].name) + 1;
973         i += l;
974         msg = malloc(i, M_CTL, M_NOWAIT);
975         if (msg == NULL) {
976                 mtx_unlock(&softc->ctl_lock);
977                 return;
978         }
979         bzero(&msg->iid, sizeof(msg->iid));
980         msg->hdr.msg_type = CTL_MSG_IID_SYNC;
981         msg->hdr.nexus.targ_port = port->targ_port;
982         msg->hdr.nexus.initid = iid;
983         msg->iid.in_use = port->wwpn_iid[iid].in_use;
984         msg->iid.name_len = l;
985         msg->iid.wwpn = port->wwpn_iid[iid].wwpn;
986         if (port->wwpn_iid[iid].name)
987                 strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l);
988         mtx_unlock(&softc->ctl_lock);
989         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT);
990         free(msg, M_CTL);
991 }
992
993 void
994 ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx,
995     uint8_t page, uint8_t subpage)
996 {
997         struct ctl_softc *softc = lun->ctl_softc;
998         union ctl_ha_msg msg;
999         u_int i;
1000
1001         if (softc->ha_link != CTL_HA_LINK_ONLINE)
1002                 return;
1003         for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
1004                 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) ==
1005                     page && lun->mode_pages.index[i].subpage == subpage)
1006                         break;
1007         }
1008         if (i == CTL_NUM_MODE_PAGES)
1009                 return;
1010
1011         /* Don't try to replicate pages not present on this device. */
1012         if (lun->mode_pages.index[i].page_data == NULL)
1013                 return;
1014
1015         bzero(&msg.mode, sizeof(msg.mode));
1016         msg.hdr.msg_type = CTL_MSG_MODE_SYNC;
1017         msg.hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT;
1018         msg.hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT;
1019         msg.hdr.nexus.targ_lun = lun->lun;
1020         msg.hdr.nexus.targ_mapped_lun = lun->lun;
1021         msg.mode.page_code = page;
1022         msg.mode.subpage = subpage;
1023         msg.mode.page_len = lun->mode_pages.index[i].page_len;
1024         memcpy(msg.mode.data, lun->mode_pages.index[i].page_data,
1025             msg.mode.page_len);
1026         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.mode, sizeof(msg.mode),
1027             M_WAITOK);
1028 }
1029
1030 static void
1031 ctl_isc_ha_link_up(struct ctl_softc *softc)
1032 {
1033         struct ctl_port *port;
1034         struct ctl_lun *lun;
1035         union ctl_ha_msg msg;
1036         int i;
1037
1038         /* Announce this node parameters to peer for validation. */
1039         msg.login.msg_type = CTL_MSG_LOGIN;
1040         msg.login.version = CTL_HA_VERSION;
1041         msg.login.ha_mode = softc->ha_mode;
1042         msg.login.ha_id = softc->ha_id;
1043         msg.login.max_luns = ctl_max_luns;
1044         msg.login.max_ports = ctl_max_ports;
1045         msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT;
1046         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login),
1047             M_WAITOK);
1048
1049         STAILQ_FOREACH(port, &softc->port_list, links) {
1050                 ctl_isc_announce_port(port);
1051                 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
1052                         if (port->wwpn_iid[i].in_use)
1053                                 ctl_isc_announce_iid(port, i);
1054                 }
1055         }
1056         STAILQ_FOREACH(lun, &softc->lun_list, links)
1057                 ctl_isc_announce_lun(lun);
1058 }
1059
1060 static void
1061 ctl_isc_ha_link_down(struct ctl_softc *softc)
1062 {
1063         struct ctl_port *port;
1064         struct ctl_lun *lun;
1065         union ctl_io *io;
1066         int i;
1067
1068         mtx_lock(&softc->ctl_lock);
1069         STAILQ_FOREACH(lun, &softc->lun_list, links) {
1070                 mtx_lock(&lun->lun_lock);
1071                 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) {
1072                         lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY;
1073                         ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
1074                 }
1075                 mtx_unlock(&lun->lun_lock);
1076
1077                 mtx_unlock(&softc->ctl_lock);
1078                 io = ctl_alloc_io(softc->othersc_pool);
1079                 mtx_lock(&softc->ctl_lock);
1080                 ctl_zero_io(io);
1081                 io->io_hdr.msg_type = CTL_MSG_FAILOVER;
1082                 io->io_hdr.nexus.targ_mapped_lun = lun->lun;
1083                 ctl_enqueue_isc(io);
1084         }
1085
1086         STAILQ_FOREACH(port, &softc->port_list, links) {
1087                 if (port->targ_port >= softc->port_min &&
1088                     port->targ_port < softc->port_max)
1089                         continue;
1090                 port->status &= ~CTL_PORT_STATUS_ONLINE;
1091                 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
1092                         port->wwpn_iid[i].in_use = 0;
1093                         free(port->wwpn_iid[i].name, M_CTL);
1094                         port->wwpn_iid[i].name = NULL;
1095                 }
1096         }
1097         mtx_unlock(&softc->ctl_lock);
1098 }
1099
1100 static void
1101 ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
1102 {
1103         struct ctl_lun *lun;
1104         uint32_t iid = ctl_get_initindex(&msg->hdr.nexus);
1105
1106         mtx_lock(&softc->ctl_lock);
1107         if (msg->hdr.nexus.targ_mapped_lun >= ctl_max_luns ||
1108             (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) {
1109                 mtx_unlock(&softc->ctl_lock);
1110                 return;
1111         }
1112         mtx_lock(&lun->lun_lock);
1113         mtx_unlock(&softc->ctl_lock);
1114         if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set)
1115                 memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8);
1116         if (msg->ua.ua_all) {
1117                 if (msg->ua.ua_set)
1118                         ctl_est_ua_all(lun, iid, msg->ua.ua_type);
1119                 else
1120                         ctl_clr_ua_all(lun, iid, msg->ua.ua_type);
1121         } else {
1122                 if (msg->ua.ua_set)
1123                         ctl_est_ua(lun, iid, msg->ua.ua_type);
1124                 else
1125                         ctl_clr_ua(lun, iid, msg->ua.ua_type);
1126         }
1127         mtx_unlock(&lun->lun_lock);
1128 }
1129
1130 static void
1131 ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
1132 {
1133         struct ctl_lun *lun;
1134         struct ctl_ha_msg_lun_pr_key pr_key;
1135         int i, k;
1136         ctl_lun_flags oflags;
1137         uint32_t targ_lun;
1138
1139         targ_lun = msg->hdr.nexus.targ_mapped_lun;
1140         mtx_lock(&softc->ctl_lock);
1141         if (targ_lun >= ctl_max_luns ||
1142             (lun = softc->ctl_luns[targ_lun]) == NULL) {
1143                 mtx_unlock(&softc->ctl_lock);
1144                 return;
1145         }
1146         mtx_lock(&lun->lun_lock);
1147         mtx_unlock(&softc->ctl_lock);
1148         if (lun->flags & CTL_LUN_DISABLED) {
1149                 mtx_unlock(&lun->lun_lock);
1150                 return;
1151         }
1152         i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0;
1153         if (msg->lun.lun_devid_len != i || (i > 0 &&
1154             memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) {
1155                 mtx_unlock(&lun->lun_lock);
1156                 printf("%s: Received conflicting HA LUN %d\n",
1157                     __func__, targ_lun);
1158                 return;
1159         } else {
1160                 /* Record whether peer is primary. */
1161                 oflags = lun->flags;
1162                 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) &&
1163                     (msg->lun.flags & CTL_LUN_DISABLED) == 0)
1164                         lun->flags |= CTL_LUN_PEER_SC_PRIMARY;
1165                 else
1166                         lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY;
1167                 if (oflags != lun->flags)
1168                         ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
1169
1170                 /* If peer is primary and we are not -- use data */
1171                 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
1172                     (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) {
1173                         lun->pr_generation = msg->lun.pr_generation;
1174                         lun->pr_res_idx = msg->lun.pr_res_idx;
1175                         lun->pr_res_type = msg->lun.pr_res_type;
1176                         lun->pr_key_count = msg->lun.pr_key_count;
1177                         for (k = 0; k < CTL_MAX_INITIATORS; k++)
1178                                 ctl_clr_prkey(lun, k);
1179                         for (k = 0; k < msg->lun.pr_key_count; k++) {
1180                                 memcpy(&pr_key, &msg->lun.data[i],
1181                                     sizeof(pr_key));
1182                                 ctl_alloc_prkey(lun, pr_key.pr_iid);
1183                                 ctl_set_prkey(lun, pr_key.pr_iid,
1184                                     pr_key.pr_key);
1185                                 i += sizeof(pr_key);
1186                         }
1187                 }
1188
1189                 mtx_unlock(&lun->lun_lock);
1190                 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n",
1191                     __func__, targ_lun,
1192                     (msg->lun.flags & CTL_LUN_PRIMARY_SC) ?
1193                     "primary" : "secondary"));
1194
1195                 /* If we are primary but peer doesn't know -- notify */
1196                 if ((lun->flags & CTL_LUN_PRIMARY_SC) &&
1197                     (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0)
1198                         ctl_isc_announce_lun(lun);
1199         }
1200 }
1201
1202 static void
1203 ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
1204 {
1205         struct ctl_port *port;
1206         struct ctl_lun *lun;
1207         int i, new;
1208
1209         port = softc->ctl_ports[msg->hdr.nexus.targ_port];
1210         if (port == NULL) {
1211                 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__,
1212                     msg->hdr.nexus.targ_port));
1213                 new = 1;
1214                 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO);
1215                 port->frontend = &ha_frontend;
1216                 port->targ_port = msg->hdr.nexus.targ_port;
1217                 port->fe_datamove = ctl_ha_datamove;
1218                 port->fe_done = ctl_ha_done;
1219         } else if (port->frontend == &ha_frontend) {
1220                 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__,
1221                     msg->hdr.nexus.targ_port));
1222                 new = 0;
1223         } else {
1224                 printf("%s: Received conflicting HA port %d\n",
1225                     __func__, msg->hdr.nexus.targ_port);
1226                 return;
1227         }
1228         port->port_type = msg->port.port_type;
1229         port->physical_port = msg->port.physical_port;
1230         port->virtual_port = msg->port.virtual_port;
1231         port->status = msg->port.status;
1232         i = 0;
1233         free(port->port_name, M_CTL);
1234         port->port_name = strndup(&msg->port.data[i], msg->port.name_len,
1235             M_CTL);
1236         i += msg->port.name_len;
1237         if (msg->port.lun_map_len != 0) {
1238                 if (port->lun_map == NULL ||
1239                     port->lun_map_size * sizeof(uint32_t) <
1240                     msg->port.lun_map_len) {
1241                         port->lun_map_size = 0;
1242                         free(port->lun_map, M_CTL);
1243                         port->lun_map = malloc(msg->port.lun_map_len,
1244                             M_CTL, M_WAITOK);
1245                 }
1246                 memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len);
1247                 port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t);
1248                 i += msg->port.lun_map_len;
1249         } else {
1250                 port->lun_map_size = 0;
1251                 free(port->lun_map, M_CTL);
1252                 port->lun_map = NULL;
1253         }
1254         if (msg->port.port_devid_len != 0) {
1255                 if (port->port_devid == NULL ||
1256                     port->port_devid->len < msg->port.port_devid_len) {
1257                         free(port->port_devid, M_CTL);
1258                         port->port_devid = malloc(sizeof(struct ctl_devid) +
1259                             msg->port.port_devid_len, M_CTL, M_WAITOK);
1260                 }
1261                 memcpy(port->port_devid->data, &msg->port.data[i],
1262                     msg->port.port_devid_len);
1263                 port->port_devid->len = msg->port.port_devid_len;
1264                 i += msg->port.port_devid_len;
1265         } else {
1266                 free(port->port_devid, M_CTL);
1267                 port->port_devid = NULL;
1268         }
1269         if (msg->port.target_devid_len != 0) {
1270                 if (port->target_devid == NULL ||
1271                     port->target_devid->len < msg->port.target_devid_len) {
1272                         free(port->target_devid, M_CTL);
1273                         port->target_devid = malloc(sizeof(struct ctl_devid) +
1274                             msg->port.target_devid_len, M_CTL, M_WAITOK);
1275                 }
1276                 memcpy(port->target_devid->data, &msg->port.data[i],
1277                     msg->port.target_devid_len);
1278                 port->target_devid->len = msg->port.target_devid_len;
1279                 i += msg->port.target_devid_len;
1280         } else {
1281                 free(port->target_devid, M_CTL);
1282                 port->target_devid = NULL;
1283         }
1284         if (msg->port.init_devid_len != 0) {
1285                 if (port->init_devid == NULL ||
1286                     port->init_devid->len < msg->port.init_devid_len) {
1287                         free(port->init_devid, M_CTL);
1288                         port->init_devid = malloc(sizeof(struct ctl_devid) +
1289                             msg->port.init_devid_len, M_CTL, M_WAITOK);
1290                 }
1291                 memcpy(port->init_devid->data, &msg->port.data[i],
1292                     msg->port.init_devid_len);
1293                 port->init_devid->len = msg->port.init_devid_len;
1294                 i += msg->port.init_devid_len;
1295         } else {
1296                 free(port->init_devid, M_CTL);
1297                 port->init_devid = NULL;
1298         }
1299         if (new) {
1300                 if (ctl_port_register(port) != 0) {
1301                         printf("%s: ctl_port_register() failed with error\n",
1302                             __func__);
1303                 }
1304         }
1305         mtx_lock(&softc->ctl_lock);
1306         STAILQ_FOREACH(lun, &softc->lun_list, links) {
1307                 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
1308                         continue;
1309                 mtx_lock(&lun->lun_lock);
1310                 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE);
1311                 mtx_unlock(&lun->lun_lock);
1312         }
1313         mtx_unlock(&softc->ctl_lock);
1314 }
1315
1316 static void
1317 ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
1318 {
1319         struct ctl_port *port;
1320         int iid;
1321
1322         port = softc->ctl_ports[msg->hdr.nexus.targ_port];
1323         if (port == NULL) {
1324                 printf("%s: Received IID for unknown port %d\n",
1325                     __func__, msg->hdr.nexus.targ_port);
1326                 return;
1327         }
1328         iid = msg->hdr.nexus.initid;
1329         if (port->wwpn_iid[iid].in_use != 0 &&
1330             msg->iid.in_use == 0)
1331                 ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON);
1332         port->wwpn_iid[iid].in_use = msg->iid.in_use;
1333         port->wwpn_iid[iid].wwpn = msg->iid.wwpn;
1334         free(port->wwpn_iid[iid].name, M_CTL);
1335         if (msg->iid.name_len) {
1336                 port->wwpn_iid[iid].name = strndup(&msg->iid.data[0],
1337                     msg->iid.name_len, M_CTL);
1338         } else
1339                 port->wwpn_iid[iid].name = NULL;
1340 }
1341
1342 static void
1343 ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
1344 {
1345
1346         if (msg->login.version != CTL_HA_VERSION) {
1347                 printf("CTL HA peers have different versions %d != %d\n",
1348                     msg->login.version, CTL_HA_VERSION);
1349                 ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
1350                 return;
1351         }
1352         if (msg->login.ha_mode != softc->ha_mode) {
1353                 printf("CTL HA peers have different ha_mode %d != %d\n",
1354                     msg->login.ha_mode, softc->ha_mode);
1355                 ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
1356                 return;
1357         }
1358         if (msg->login.ha_id == softc->ha_id) {
1359                 printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id);
1360                 ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
1361                 return;
1362         }
1363         if (msg->login.max_luns != ctl_max_luns ||
1364             msg->login.max_ports != ctl_max_ports ||
1365             msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) {
1366                 printf("CTL HA peers have different limits\n");
1367                 ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
1368                 return;
1369         }
1370 }
1371
1372 static void
1373 ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
1374 {
1375         struct ctl_lun *lun;
1376         u_int i;
1377         uint32_t initidx, targ_lun;
1378
1379         targ_lun = msg->hdr.nexus.targ_mapped_lun;
1380         mtx_lock(&softc->ctl_lock);
1381         if (targ_lun >= ctl_max_luns ||
1382             (lun = softc->ctl_luns[targ_lun]) == NULL) {
1383                 mtx_unlock(&softc->ctl_lock);
1384                 return;
1385         }
1386         mtx_lock(&lun->lun_lock);
1387         mtx_unlock(&softc->ctl_lock);
1388         if (lun->flags & CTL_LUN_DISABLED) {
1389                 mtx_unlock(&lun->lun_lock);
1390                 return;
1391         }
1392         for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
1393                 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) ==
1394                     msg->mode.page_code &&
1395                     lun->mode_pages.index[i].subpage == msg->mode.subpage)
1396                         break;
1397         }
1398         if (i == CTL_NUM_MODE_PAGES) {
1399                 mtx_unlock(&lun->lun_lock);
1400                 return;
1401         }
1402         memcpy(lun->mode_pages.index[i].page_data, msg->mode.data,
1403             lun->mode_pages.index[i].page_len);
1404         initidx = ctl_get_initindex(&msg->hdr.nexus);
1405         if (initidx != -1)
1406                 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE);
1407         mtx_unlock(&lun->lun_lock);
1408 }
1409
1410 /*
1411  * ISC (Inter Shelf Communication) event handler.  Events from the HA
1412  * subsystem come in here.
1413  */
1414 static void
1415 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
1416 {
1417         struct ctl_softc *softc = control_softc;
1418         union ctl_io *io;
1419         struct ctl_prio *presio;
1420         ctl_ha_status isc_status;
1421
1422         CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event));
1423         if (event == CTL_HA_EVT_MSG_RECV) {
1424                 union ctl_ha_msg *msg, msgbuf;
1425
1426                 if (param > sizeof(msgbuf))
1427                         msg = malloc(param, M_CTL, M_WAITOK);
1428                 else
1429                         msg = &msgbuf;
1430                 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param,
1431                     M_WAITOK);
1432                 if (isc_status != CTL_HA_STATUS_SUCCESS) {
1433                         printf("%s: Error receiving message: %d\n",
1434                             __func__, isc_status);
1435                         if (msg != &msgbuf)
1436                                 free(msg, M_CTL);
1437                         return;
1438                 }
1439
1440                 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->hdr.msg_type));
1441                 switch (msg->hdr.msg_type) {
1442                 case CTL_MSG_SERIALIZE:
1443                         io = ctl_alloc_io(softc->othersc_pool);
1444                         ctl_zero_io(io);
1445                         // populate ctsio from msg
1446                         io->io_hdr.io_type = CTL_IO_SCSI;
1447                         io->io_hdr.msg_type = CTL_MSG_SERIALIZE;
1448                         io->io_hdr.remote_io = msg->hdr.original_sc;
1449                         io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC |
1450                                             CTL_FLAG_IO_ACTIVE;
1451                         /*
1452                          * If we're in serialization-only mode, we don't
1453                          * want to go through full done processing.  Thus
1454                          * the COPY flag.
1455                          *
1456                          * XXX KDM add another flag that is more specific.
1457                          */
1458                         if (softc->ha_mode != CTL_HA_MODE_XFER)
1459                                 io->io_hdr.flags |= CTL_FLAG_INT_COPY;
1460                         io->io_hdr.nexus = msg->hdr.nexus;
1461                         io->scsiio.tag_num = msg->scsi.tag_num;
1462                         io->scsiio.tag_type = msg->scsi.tag_type;
1463 #ifdef CTL_TIME_IO
1464                         io->io_hdr.start_time = time_uptime;
1465                         getbinuptime(&io->io_hdr.start_bt);
1466 #endif /* CTL_TIME_IO */
1467                         io->scsiio.cdb_len = msg->scsi.cdb_len;
1468                         memcpy(io->scsiio.cdb, msg->scsi.cdb,
1469                                CTL_MAX_CDBLEN);
1470                         if (softc->ha_mode == CTL_HA_MODE_XFER) {
1471                                 const struct ctl_cmd_entry *entry;
1472
1473                                 entry = ctl_get_cmd_entry(&io->scsiio, NULL);
1474                                 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
1475                                 io->io_hdr.flags |=
1476                                         entry->flags & CTL_FLAG_DATA_MASK;
1477                         }
1478                         ctl_enqueue_isc(io);
1479                         break;
1480
1481                 /* Performed on the Originating SC, XFER mode only */
1482                 case CTL_MSG_DATAMOVE: {
1483                         struct ctl_sg_entry *sgl;
1484                         int i, j;
1485
1486                         io = msg->hdr.original_sc;
1487                         if (io == NULL) {
1488                                 printf("%s: original_sc == NULL!\n", __func__);
1489                                 /* XXX KDM do something here */
1490                                 break;
1491                         }
1492                         io->io_hdr.msg_type = CTL_MSG_DATAMOVE;
1493                         io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
1494                         /*
1495                          * Keep track of this, we need to send it back over
1496                          * when the datamove is complete.
1497                          */
1498                         io->io_hdr.remote_io = msg->hdr.serializing_sc;
1499                         if (msg->hdr.status == CTL_SUCCESS)
1500                                 io->io_hdr.status = msg->hdr.status;
1501
1502                         if (msg->dt.sg_sequence == 0) {
1503 #ifdef CTL_TIME_IO
1504                                 getbinuptime(&io->io_hdr.dma_start_bt);
1505 #endif
1506                                 i = msg->dt.kern_sg_entries +
1507                                     msg->dt.kern_data_len /
1508                                     CTL_HA_DATAMOVE_SEGMENT + 1;
1509                                 sgl = malloc(sizeof(*sgl) * i, M_CTL,
1510                                     M_WAITOK | M_ZERO);
1511                                 CTL_RSGL(io) = sgl;
1512                                 CTL_LSGL(io) = &sgl[msg->dt.kern_sg_entries];
1513
1514                                 io->scsiio.kern_data_ptr = (uint8_t *)sgl;
1515
1516                                 io->scsiio.kern_sg_entries =
1517                                         msg->dt.kern_sg_entries;
1518                                 io->scsiio.rem_sg_entries =
1519                                         msg->dt.kern_sg_entries;
1520                                 io->scsiio.kern_data_len =
1521                                         msg->dt.kern_data_len;
1522                                 io->scsiio.kern_total_len =
1523                                         msg->dt.kern_total_len;
1524                                 io->scsiio.kern_data_resid =
1525                                         msg->dt.kern_data_resid;
1526                                 io->scsiio.kern_rel_offset =
1527                                         msg->dt.kern_rel_offset;
1528                                 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR;
1529                                 io->io_hdr.flags |= msg->dt.flags &
1530                                     CTL_FLAG_BUS_ADDR;
1531                         } else
1532                                 sgl = (struct ctl_sg_entry *)
1533                                         io->scsiio.kern_data_ptr;
1534
1535                         for (i = msg->dt.sent_sg_entries, j = 0;
1536                              i < (msg->dt.sent_sg_entries +
1537                              msg->dt.cur_sg_entries); i++, j++) {
1538                                 sgl[i].addr = msg->dt.sg_list[j].addr;
1539                                 sgl[i].len = msg->dt.sg_list[j].len;
1540                         }
1541
1542                         /*
1543                          * If this is the last piece of the I/O, we've got
1544                          * the full S/G list.  Queue processing in the thread.
1545                          * Otherwise wait for the next piece.
1546                          */
1547                         if (msg->dt.sg_last != 0)
1548                                 ctl_enqueue_isc(io);
1549                         break;
1550                 }
1551                 /* Performed on the Serializing (primary) SC, XFER mode only */
1552                 case CTL_MSG_DATAMOVE_DONE: {
1553                         if (msg->hdr.serializing_sc == NULL) {
1554                                 printf("%s: serializing_sc == NULL!\n",
1555                                        __func__);
1556                                 /* XXX KDM now what? */
1557                                 break;
1558                         }
1559                         /*
1560                          * We grab the sense information here in case
1561                          * there was a failure, so we can return status
1562                          * back to the initiator.
1563                          */
1564                         io = msg->hdr.serializing_sc;
1565                         io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
1566                         io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG;
1567                         io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
1568                         io->io_hdr.port_status = msg->scsi.port_status;
1569                         io->scsiio.kern_data_resid = msg->scsi.kern_data_resid;
1570                         if (msg->hdr.status != CTL_STATUS_NONE) {
1571                                 io->io_hdr.status = msg->hdr.status;
1572                                 io->scsiio.scsi_status = msg->scsi.scsi_status;
1573                                 io->scsiio.sense_len = msg->scsi.sense_len;
1574                                 memcpy(&io->scsiio.sense_data,
1575                                     &msg->scsi.sense_data,
1576                                     msg->scsi.sense_len);
1577                                 if (msg->hdr.status == CTL_SUCCESS)
1578                                         io->io_hdr.flags |= CTL_FLAG_STATUS_SENT;
1579                         }
1580                         ctl_enqueue_isc(io);
1581                         break;
1582                 }
1583
1584                 /* Preformed on Originating SC, SER_ONLY mode */
1585                 case CTL_MSG_R2R:
1586                         io = msg->hdr.original_sc;
1587                         if (io == NULL) {
1588                                 printf("%s: original_sc == NULL!\n",
1589                                     __func__);
1590                                 break;
1591                         }
1592                         io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
1593                         io->io_hdr.msg_type = CTL_MSG_R2R;
1594                         io->io_hdr.remote_io = msg->hdr.serializing_sc;
1595                         ctl_enqueue_isc(io);
1596                         break;
1597
1598                 /*
1599                  * Performed on Serializing(i.e. primary SC) SC in SER_ONLY
1600                  * mode.
1601                  * Performed on the Originating (i.e. secondary) SC in XFER
1602                  * mode
1603                  */
1604                 case CTL_MSG_FINISH_IO:
1605                         if (softc->ha_mode == CTL_HA_MODE_XFER)
1606                                 ctl_isc_handler_finish_xfer(softc, msg);
1607                         else
1608                                 ctl_isc_handler_finish_ser_only(softc, msg);
1609                         break;
1610
1611                 /* Preformed on Originating SC */
1612                 case CTL_MSG_BAD_JUJU:
1613                         io = msg->hdr.original_sc;
1614                         if (io == NULL) {
1615                                 printf("%s: Bad JUJU!, original_sc is NULL!\n",
1616                                        __func__);
1617                                 break;
1618                         }
1619                         ctl_copy_sense_data(msg, io);
1620                         /*
1621                          * IO should have already been cleaned up on other
1622                          * SC so clear this flag so we won't send a message
1623                          * back to finish the IO there.
1624                          */
1625                         io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
1626                         io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
1627
1628                         /* io = msg->hdr.serializing_sc; */
1629                         io->io_hdr.msg_type = CTL_MSG_BAD_JUJU;
1630                         ctl_enqueue_isc(io);
1631                         break;
1632
1633                 /* Handle resets sent from the other side */
1634                 case CTL_MSG_MANAGE_TASKS: {
1635                         struct ctl_taskio *taskio;
1636                         taskio = (struct ctl_taskio *)ctl_alloc_io(
1637                             softc->othersc_pool);
1638                         ctl_zero_io((union ctl_io *)taskio);
1639                         taskio->io_hdr.io_type = CTL_IO_TASK;
1640                         taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC;
1641                         taskio->io_hdr.nexus = msg->hdr.nexus;
1642                         taskio->task_action = msg->task.task_action;
1643                         taskio->tag_num = msg->task.tag_num;
1644                         taskio->tag_type = msg->task.tag_type;
1645 #ifdef CTL_TIME_IO
1646                         taskio->io_hdr.start_time = time_uptime;
1647                         getbinuptime(&taskio->io_hdr.start_bt);
1648 #endif /* CTL_TIME_IO */
1649                         ctl_run_task((union ctl_io *)taskio);
1650                         break;
1651                 }
1652                 /* Persistent Reserve action which needs attention */
1653                 case CTL_MSG_PERS_ACTION:
1654                         presio = (struct ctl_prio *)ctl_alloc_io(
1655                             softc->othersc_pool);
1656                         ctl_zero_io((union ctl_io *)presio);
1657                         presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION;
1658                         presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC;
1659                         presio->io_hdr.nexus = msg->hdr.nexus;
1660                         presio->pr_msg = msg->pr;
1661                         ctl_enqueue_isc((union ctl_io *)presio);
1662                         break;
1663                 case CTL_MSG_UA:
1664                         ctl_isc_ua(softc, msg, param);
1665                         break;
1666                 case CTL_MSG_PORT_SYNC:
1667                         ctl_isc_port_sync(softc, msg, param);
1668                         break;
1669                 case CTL_MSG_LUN_SYNC:
1670                         ctl_isc_lun_sync(softc, msg, param);
1671                         break;
1672                 case CTL_MSG_IID_SYNC:
1673                         ctl_isc_iid_sync(softc, msg, param);
1674                         break;
1675                 case CTL_MSG_LOGIN:
1676                         ctl_isc_login(softc, msg, param);
1677                         break;
1678                 case CTL_MSG_MODE_SYNC:
1679                         ctl_isc_mode_sync(softc, msg, param);
1680                         break;
1681                 default:
1682                         printf("Received HA message of unknown type %d\n",
1683                             msg->hdr.msg_type);
1684                         ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
1685                         break;
1686                 }
1687                 if (msg != &msgbuf)
1688                         free(msg, M_CTL);
1689         } else if (event == CTL_HA_EVT_LINK_CHANGE) {
1690                 printf("CTL: HA link status changed from %d to %d\n",
1691                     softc->ha_link, param);
1692                 if (param == softc->ha_link)
1693                         return;
1694                 if (softc->ha_link == CTL_HA_LINK_ONLINE) {
1695                         softc->ha_link = param;
1696                         ctl_isc_ha_link_down(softc);
1697                 } else {
1698                         softc->ha_link = param;
1699                         if (softc->ha_link == CTL_HA_LINK_ONLINE)
1700                                 ctl_isc_ha_link_up(softc);
1701                 }
1702                 return;
1703         } else {
1704                 printf("ctl_isc_event_handler: Unknown event %d\n", event);
1705                 return;
1706         }
1707 }
1708
1709 static void
1710 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest)
1711 {
1712
1713         memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data,
1714             src->scsi.sense_len);
1715         dest->scsiio.scsi_status = src->scsi.scsi_status;
1716         dest->scsiio.sense_len = src->scsi.sense_len;
1717         dest->io_hdr.status = src->hdr.status;
1718 }
1719
1720 static void
1721 ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest)
1722 {
1723
1724         memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data,
1725             src->scsiio.sense_len);
1726         dest->scsi.scsi_status = src->scsiio.scsi_status;
1727         dest->scsi.sense_len = src->scsiio.sense_len;
1728         dest->hdr.status = src->io_hdr.status;
1729 }
1730
1731 void
1732 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
1733 {
1734         struct ctl_softc *softc = lun->ctl_softc;
1735         ctl_ua_type *pu;
1736
1737         if (initidx < softc->init_min || initidx >= softc->init_max)
1738                 return;
1739         mtx_assert(&lun->lun_lock, MA_OWNED);
1740         pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT];
1741         if (pu == NULL)
1742                 return;
1743         pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua;
1744 }
1745
1746 void
1747 ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua)
1748 {
1749         int i;
1750
1751         mtx_assert(&lun->lun_lock, MA_OWNED);
1752         if (lun->pending_ua[port] == NULL)
1753                 return;
1754         for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
1755                 if (port * CTL_MAX_INIT_PER_PORT + i == except)
1756                         continue;
1757                 lun->pending_ua[port][i] |= ua;
1758         }
1759 }
1760
1761 void
1762 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
1763 {
1764         struct ctl_softc *softc = lun->ctl_softc;
1765         int i;
1766
1767         mtx_assert(&lun->lun_lock, MA_OWNED);
1768         for (i = softc->port_min; i < softc->port_max; i++)
1769                 ctl_est_ua_port(lun, i, except, ua);
1770 }
1771
1772 void
1773 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
1774 {
1775         struct ctl_softc *softc = lun->ctl_softc;
1776         ctl_ua_type *pu;
1777
1778         if (initidx < softc->init_min || initidx >= softc->init_max)
1779                 return;
1780         mtx_assert(&lun->lun_lock, MA_OWNED);
1781         pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT];
1782         if (pu == NULL)
1783                 return;
1784         pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua;
1785 }
1786
1787 void
1788 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
1789 {
1790         struct ctl_softc *softc = lun->ctl_softc;
1791         int i, j;
1792
1793         mtx_assert(&lun->lun_lock, MA_OWNED);
1794         for (i = softc->port_min; i < softc->port_max; i++) {
1795                 if (lun->pending_ua[i] == NULL)
1796                         continue;
1797                 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
1798                         if (i * CTL_MAX_INIT_PER_PORT + j == except)
1799                                 continue;
1800                         lun->pending_ua[i][j] &= ~ua;
1801                 }
1802         }
1803 }
1804
1805 void
1806 ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx,
1807     ctl_ua_type ua_type)
1808 {
1809         struct ctl_lun *lun;
1810
1811         mtx_assert(&ctl_softc->ctl_lock, MA_OWNED);
1812         STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) {
1813                 mtx_lock(&lun->lun_lock);
1814                 ctl_clr_ua(lun, initidx, ua_type);
1815                 mtx_unlock(&lun->lun_lock);
1816         }
1817 }
1818
1819 static int
1820 ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS)
1821 {
1822         struct ctl_softc *softc = (struct ctl_softc *)arg1;
1823         struct ctl_lun *lun;
1824         struct ctl_lun_req ireq;
1825         int error, value;
1826
1827         value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1;
1828         error = sysctl_handle_int(oidp, &value, 0, req);
1829         if ((error != 0) || (req->newptr == NULL))
1830                 return (error);
1831
1832         mtx_lock(&softc->ctl_lock);
1833         if (value == 0)
1834                 softc->flags |= CTL_FLAG_ACTIVE_SHELF;
1835         else
1836                 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF;
1837         STAILQ_FOREACH(lun, &softc->lun_list, links) {
1838                 mtx_unlock(&softc->ctl_lock);
1839                 bzero(&ireq, sizeof(ireq));
1840                 ireq.reqtype = CTL_LUNREQ_MODIFY;
1841                 ireq.reqdata.modify.lun_id = lun->lun;
1842                 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0,
1843                     curthread);
1844                 if (ireq.status != CTL_LUN_OK) {
1845                         printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n",
1846                             __func__, ireq.status, ireq.error_str);
1847                 }
1848                 mtx_lock(&softc->ctl_lock);
1849         }
1850         mtx_unlock(&softc->ctl_lock);
1851         return (0);
1852 }
1853
1854 static int
1855 ctl_init(void)
1856 {
1857         struct make_dev_args args;
1858         struct ctl_softc *softc;
1859         int i, error;
1860
1861         softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF,
1862                                M_WAITOK | M_ZERO);
1863
1864         make_dev_args_init(&args);
1865         args.mda_devsw = &ctl_cdevsw;
1866         args.mda_uid = UID_ROOT;
1867         args.mda_gid = GID_OPERATOR;
1868         args.mda_mode = 0600;
1869         args.mda_si_drv1 = softc;
1870         args.mda_si_drv2 = NULL;
1871         error = make_dev_s(&args, &softc->dev, "cam/ctl");
1872         if (error != 0) {
1873                 free(softc, M_DEVBUF);
1874                 control_softc = NULL;
1875                 return (error);
1876         }
1877
1878         sysctl_ctx_init(&softc->sysctl_ctx);
1879         softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
1880                 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl",
1881                 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Target Layer");
1882
1883         if (softc->sysctl_tree == NULL) {
1884                 printf("%s: unable to allocate sysctl tree\n", __func__);
1885                 destroy_dev(softc->dev);
1886                 free(softc, M_DEVBUF);
1887                 control_softc = NULL;
1888                 return (ENOMEM);
1889         }
1890
1891         mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF);
1892         softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io),
1893             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1894         softc->flags = 0;
1895
1896         SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1897             OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0,
1898             "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)");
1899
1900         if (ctl_max_luns <= 0 || powerof2(ctl_max_luns) == 0) {
1901                 printf("Bad value %d for kern.cam.ctl.max_luns, must be a power of two, using %d\n",
1902                     ctl_max_luns, CTL_DEFAULT_MAX_LUNS);
1903                 ctl_max_luns = CTL_DEFAULT_MAX_LUNS;
1904         }
1905         softc->ctl_luns = malloc(sizeof(struct ctl_lun *) * ctl_max_luns,
1906             M_DEVBUF, M_WAITOK | M_ZERO);
1907         softc->ctl_lun_mask = malloc(sizeof(uint32_t) *
1908             ((ctl_max_luns + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO);
1909         if (ctl_max_ports <= 0 || powerof2(ctl_max_ports) == 0) {
1910                 printf("Bad value %d for kern.cam.ctl.max_ports, must be a power of two, using %d\n",
1911                     ctl_max_ports, CTL_DEFAULT_MAX_PORTS);
1912                 ctl_max_ports = CTL_DEFAULT_MAX_PORTS;
1913         }
1914         softc->ctl_port_mask = malloc(sizeof(uint32_t) *
1915           ((ctl_max_ports + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO);
1916         softc->ctl_ports = malloc(sizeof(struct ctl_port *) * ctl_max_ports,
1917              M_DEVBUF, M_WAITOK | M_ZERO);
1918
1919
1920         /*
1921          * In Copan's HA scheme, the "master" and "slave" roles are
1922          * figured out through the slot the controller is in.  Although it
1923          * is an active/active system, someone has to be in charge.
1924          */
1925         SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1926             OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0,
1927             "HA head ID (0 - no HA)");
1928         if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) {
1929                 softc->flags |= CTL_FLAG_ACTIVE_SHELF;
1930                 softc->is_single = 1;
1931                 softc->port_cnt = ctl_max_ports;
1932                 softc->port_min = 0;
1933         } else {
1934                 softc->port_cnt = ctl_max_ports / NUM_HA_SHELVES;
1935                 softc->port_min = (softc->ha_id - 1) * softc->port_cnt;
1936         }
1937         softc->port_max = softc->port_min + softc->port_cnt;
1938         softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT;
1939         softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT;
1940
1941         SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1942             OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0,
1943             "HA link state (0 - offline, 1 - unknown, 2 - online)");
1944
1945         STAILQ_INIT(&softc->lun_list);
1946         STAILQ_INIT(&softc->fe_list);
1947         STAILQ_INIT(&softc->port_list);
1948         STAILQ_INIT(&softc->be_list);
1949         ctl_tpc_init(softc);
1950
1951         if (worker_threads <= 0)
1952                 worker_threads = max(1, mp_ncpus / 4);
1953         if (worker_threads > CTL_MAX_THREADS)
1954                 worker_threads = CTL_MAX_THREADS;
1955
1956         for (i = 0; i < worker_threads; i++) {
1957                 struct ctl_thread *thr = &softc->threads[i];
1958
1959                 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF);
1960                 thr->ctl_softc = softc;
1961                 STAILQ_INIT(&thr->incoming_queue);
1962                 STAILQ_INIT(&thr->rtr_queue);
1963                 STAILQ_INIT(&thr->done_queue);
1964                 STAILQ_INIT(&thr->isc_queue);
1965
1966                 error = kproc_kthread_add(ctl_work_thread, thr,
1967                     &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i);
1968                 if (error != 0) {
1969                         printf("error creating CTL work thread!\n");
1970                         return (error);
1971                 }
1972         }
1973         error = kproc_kthread_add(ctl_thresh_thread, softc,
1974             &softc->ctl_proc, &softc->thresh_thread, 0, 0, "ctl", "thresh");
1975         if (error != 0) {
1976                 printf("error creating CTL threshold thread!\n");
1977                 return (error);
1978         }
1979
1980         SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree),
1981             OID_AUTO, "ha_role",
1982             CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT,
1983             softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head");
1984
1985         if (softc->is_single == 0) {
1986                 if (ctl_frontend_register(&ha_frontend) != 0)
1987                         softc->is_single = 1;
1988         }
1989         return (0);
1990 }
1991
1992 static int
1993 ctl_shutdown(void)
1994 {
1995         struct ctl_softc *softc = control_softc;
1996         int i;
1997
1998         if (softc->is_single == 0)
1999                 ctl_frontend_deregister(&ha_frontend);
2000
2001         destroy_dev(softc->dev);
2002
2003         /* Shutdown CTL threads. */
2004         softc->shutdown = 1;
2005         for (i = 0; i < worker_threads; i++) {
2006                 struct ctl_thread *thr = &softc->threads[i];
2007                 while (thr->thread != NULL) {
2008                         wakeup(thr);
2009                         if (thr->thread != NULL)
2010                                 pause("CTL thr shutdown", 1);
2011                 }
2012                 mtx_destroy(&thr->queue_lock);
2013         }
2014         while (softc->thresh_thread != NULL) {
2015                 wakeup(softc->thresh_thread);
2016                 if (softc->thresh_thread != NULL)
2017                         pause("CTL thr shutdown", 1);
2018         }
2019
2020         ctl_tpc_shutdown(softc);
2021         uma_zdestroy(softc->io_zone);
2022         mtx_destroy(&softc->ctl_lock);
2023
2024         free(softc->ctl_luns, M_DEVBUF);
2025         free(softc->ctl_lun_mask, M_DEVBUF);
2026         free(softc->ctl_port_mask, M_DEVBUF);
2027         free(softc->ctl_ports, M_DEVBUF);
2028
2029         sysctl_ctx_free(&softc->sysctl_ctx);
2030
2031         free(softc, M_DEVBUF);
2032         control_softc = NULL;
2033         return (0);
2034 }
2035
2036 static int
2037 ctl_module_event_handler(module_t mod, int what, void *arg)
2038 {
2039
2040         switch (what) {
2041         case MOD_LOAD:
2042                 return (ctl_init());
2043         case MOD_UNLOAD:
2044                 return (ctl_shutdown());
2045         default:
2046                 return (EOPNOTSUPP);
2047         }
2048 }
2049
2050 /*
2051  * XXX KDM should we do some access checks here?  Bump a reference count to
2052  * prevent a CTL module from being unloaded while someone has it open?
2053  */
2054 static int
2055 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2056 {
2057         return (0);
2058 }
2059
2060 static int
2061 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2062 {
2063         return (0);
2064 }
2065
2066 /*
2067  * Remove an initiator by port number and initiator ID.
2068  * Returns 0 for success, -1 for failure.
2069  */
2070 int
2071 ctl_remove_initiator(struct ctl_port *port, int iid)
2072 {
2073         struct ctl_softc *softc = port->ctl_softc;
2074         int last;
2075
2076         mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
2077
2078         if (iid > CTL_MAX_INIT_PER_PORT) {
2079                 printf("%s: initiator ID %u > maximun %u!\n",
2080                        __func__, iid, CTL_MAX_INIT_PER_PORT);
2081                 return (-1);
2082         }
2083
2084         mtx_lock(&softc->ctl_lock);
2085         last = (--port->wwpn_iid[iid].in_use == 0);
2086         port->wwpn_iid[iid].last_use = time_uptime;
2087         mtx_unlock(&softc->ctl_lock);
2088         if (last)
2089                 ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON);
2090         ctl_isc_announce_iid(port, iid);
2091
2092         return (0);
2093 }
2094
2095 /*
2096  * Add an initiator to the initiator map.
2097  * Returns iid for success, < 0 for failure.
2098  */
2099 int
2100 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name)
2101 {
2102         struct ctl_softc *softc = port->ctl_softc;
2103         time_t best_time;
2104         int i, best;
2105
2106         mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
2107
2108         if (iid >= CTL_MAX_INIT_PER_PORT) {
2109                 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n",
2110                        __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT);
2111                 free(name, M_CTL);
2112                 return (-1);
2113         }
2114
2115         mtx_lock(&softc->ctl_lock);
2116
2117         if (iid < 0 && (wwpn != 0 || name != NULL)) {
2118                 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
2119                         if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) {
2120                                 iid = i;
2121                                 break;
2122                         }
2123                         if (name != NULL && port->wwpn_iid[i].name != NULL &&
2124                             strcmp(name, port->wwpn_iid[i].name) == 0) {
2125                                 iid = i;
2126                                 break;
2127                         }
2128                 }
2129         }
2130
2131         if (iid < 0) {
2132                 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
2133                         if (port->wwpn_iid[i].in_use == 0 &&
2134                             port->wwpn_iid[i].wwpn == 0 &&
2135                             port->wwpn_iid[i].name == NULL) {
2136                                 iid = i;
2137                                 break;
2138                         }
2139                 }
2140         }
2141
2142         if (iid < 0) {
2143                 best = -1;
2144                 best_time = INT32_MAX;
2145                 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
2146                         if (port->wwpn_iid[i].in_use == 0) {
2147                                 if (port->wwpn_iid[i].last_use < best_time) {
2148                                         best = i;
2149                                         best_time = port->wwpn_iid[i].last_use;
2150                                 }
2151                         }
2152                 }
2153                 iid = best;
2154         }
2155
2156         if (iid < 0) {
2157                 mtx_unlock(&softc->ctl_lock);
2158                 free(name, M_CTL);
2159                 return (-2);
2160         }
2161
2162         if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) {
2163                 /*
2164                  * This is not an error yet.
2165                  */
2166                 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) {
2167 #if 0
2168                         printf("%s: port %d iid %u WWPN %#jx arrived"
2169                             " again\n", __func__, port->targ_port,
2170                             iid, (uintmax_t)wwpn);
2171 #endif
2172                         goto take;
2173                 }
2174                 if (name != NULL && port->wwpn_iid[iid].name != NULL &&
2175                     strcmp(name, port->wwpn_iid[iid].name) == 0) {
2176 #if 0
2177                         printf("%s: port %d iid %u name '%s' arrived"
2178                             " again\n", __func__, port->targ_port,
2179                             iid, name);
2180 #endif
2181                         goto take;
2182                 }
2183
2184                 /*
2185                  * This is an error, but what do we do about it?  The
2186                  * driver is telling us we have a new WWPN for this
2187                  * initiator ID, so we pretty much need to use it.
2188                  */
2189                 printf("%s: port %d iid %u WWPN %#jx '%s' arrived,"
2190                     " but WWPN %#jx '%s' is still at that address\n",
2191                     __func__, port->targ_port, iid, wwpn, name,
2192                     (uintmax_t)port->wwpn_iid[iid].wwpn,
2193                     port->wwpn_iid[iid].name);
2194         }
2195 take:
2196         free(port->wwpn_iid[iid].name, M_CTL);
2197         port->wwpn_iid[iid].name = name;
2198         port->wwpn_iid[iid].wwpn = wwpn;
2199         port->wwpn_iid[iid].in_use++;
2200         mtx_unlock(&softc->ctl_lock);
2201         ctl_isc_announce_iid(port, iid);
2202
2203         return (iid);
2204 }
2205
2206 static int
2207 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf)
2208 {
2209         int len;
2210
2211         switch (port->port_type) {
2212         case CTL_PORT_FC:
2213         {
2214                 struct scsi_transportid_fcp *id =
2215                     (struct scsi_transportid_fcp *)buf;
2216                 if (port->wwpn_iid[iid].wwpn == 0)
2217                         return (0);
2218                 memset(id, 0, sizeof(*id));
2219                 id->format_protocol = SCSI_PROTO_FC;
2220                 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name);
2221                 return (sizeof(*id));
2222         }
2223         case CTL_PORT_ISCSI:
2224         {
2225                 struct scsi_transportid_iscsi_port *id =
2226                     (struct scsi_transportid_iscsi_port *)buf;
2227                 if (port->wwpn_iid[iid].name == NULL)
2228                         return (0);
2229                 memset(id, 0, 256);
2230                 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT |
2231                     SCSI_PROTO_ISCSI;
2232                 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1;
2233                 len = roundup2(min(len, 252), 4);
2234                 scsi_ulto2b(len, id->additional_length);
2235                 return (sizeof(*id) + len);
2236         }
2237         case CTL_PORT_SAS:
2238         {
2239                 struct scsi_transportid_sas *id =
2240                     (struct scsi_transportid_sas *)buf;
2241                 if (port->wwpn_iid[iid].wwpn == 0)
2242                         return (0);
2243                 memset(id, 0, sizeof(*id));
2244                 id->format_protocol = SCSI_PROTO_SAS;
2245                 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address);
2246                 return (sizeof(*id));
2247         }
2248         default:
2249         {
2250                 struct scsi_transportid_spi *id =
2251                     (struct scsi_transportid_spi *)buf;
2252                 memset(id, 0, sizeof(*id));
2253                 id->format_protocol = SCSI_PROTO_SPI;
2254                 scsi_ulto2b(iid, id->scsi_addr);
2255                 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id);
2256                 return (sizeof(*id));
2257         }
2258         }
2259 }
2260
2261 /*
2262  * Serialize a command that went down the "wrong" side, and so was sent to
2263  * this controller for execution.  The logic is a little different than the
2264  * standard case in ctl_scsiio_precheck().  Errors in this case need to get
2265  * sent back to the other side, but in the success case, we execute the
2266  * command on this side (XFER mode) or tell the other side to execute it
2267  * (SER_ONLY mode).
2268  */
2269 static void
2270 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
2271 {
2272         struct ctl_softc *softc = CTL_SOFTC(ctsio);
2273         struct ctl_port *port = CTL_PORT(ctsio);
2274         union ctl_ha_msg msg_info;
2275         struct ctl_lun *lun;
2276         const struct ctl_cmd_entry *entry;
2277         union ctl_io *bio;
2278         uint32_t targ_lun;
2279
2280         targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
2281
2282         /* Make sure that we know about this port. */
2283         if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) {
2284                 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0,
2285                                          /*retry_count*/ 1);
2286                 goto badjuju;
2287         }
2288
2289         /* Make sure that we know about this LUN. */
2290         mtx_lock(&softc->ctl_lock);
2291         if (targ_lun >= ctl_max_luns ||
2292             (lun = softc->ctl_luns[targ_lun]) == NULL) {
2293                 mtx_unlock(&softc->ctl_lock);
2294
2295                 /*
2296                  * The other node would not send this request to us unless
2297                  * received announce that we are primary node for this LUN.
2298                  * If this LUN does not exist now, it is probably result of
2299                  * a race, so respond to initiator in the most opaque way.
2300                  */
2301                 ctl_set_busy(ctsio);
2302                 goto badjuju;
2303         }
2304         mtx_lock(&lun->lun_lock);
2305         mtx_unlock(&softc->ctl_lock);
2306
2307         /*
2308          * If the LUN is invalid, pretend that it doesn't exist.
2309          * It will go away as soon as all pending I/Os completed.
2310          */
2311         if (lun->flags & CTL_LUN_DISABLED) {
2312                 mtx_unlock(&lun->lun_lock);
2313                 ctl_set_busy(ctsio);
2314                 goto badjuju;
2315         }
2316
2317         entry = ctl_get_cmd_entry(ctsio, NULL);
2318         if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) {
2319                 mtx_unlock(&lun->lun_lock);
2320                 goto badjuju;
2321         }
2322
2323         CTL_LUN(ctsio) = lun;
2324         CTL_BACKEND_LUN(ctsio) = lun->be_lun;
2325
2326         /*
2327          * Every I/O goes into the OOA queue for a
2328          * particular LUN, and stays there until completion.
2329          */
2330 #ifdef CTL_TIME_IO
2331         if (TAILQ_EMPTY(&lun->ooa_queue))
2332                 lun->idle_time += getsbinuptime() - lun->last_busy;
2333 #endif
2334         TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
2335
2336         bio = (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, ooa_links);
2337         switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) {
2338         case CTL_ACTION_BLOCK:
2339                 ctsio->io_hdr.blocker = bio;
2340                 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr,
2341                                   blocked_links);
2342                 mtx_unlock(&lun->lun_lock);
2343                 break;
2344         case CTL_ACTION_PASS:
2345         case CTL_ACTION_SKIP:
2346                 if (softc->ha_mode == CTL_HA_MODE_XFER) {
2347                         ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
2348                         ctl_enqueue_rtr((union ctl_io *)ctsio);
2349                         mtx_unlock(&lun->lun_lock);
2350                 } else {
2351                         ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
2352                         mtx_unlock(&lun->lun_lock);
2353
2354                         /* send msg back to other side */
2355                         msg_info.hdr.original_sc = ctsio->io_hdr.remote_io;
2356                         msg_info.hdr.serializing_sc = (union ctl_io *)ctsio;
2357                         msg_info.hdr.msg_type = CTL_MSG_R2R;
2358                         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
2359                             sizeof(msg_info.hdr), M_WAITOK);
2360                 }
2361                 break;
2362         case CTL_ACTION_OVERLAP:
2363                 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
2364                 mtx_unlock(&lun->lun_lock);
2365                 ctl_set_overlapped_cmd(ctsio);
2366                 goto badjuju;
2367         case CTL_ACTION_OVERLAP_TAG:
2368                 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
2369                 mtx_unlock(&lun->lun_lock);
2370                 ctl_set_overlapped_tag(ctsio, ctsio->tag_num);
2371                 goto badjuju;
2372         case CTL_ACTION_ERROR:
2373         default:
2374                 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
2375                 mtx_unlock(&lun->lun_lock);
2376
2377                 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0,
2378                                          /*retry_count*/ 0);
2379 badjuju:
2380                 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info);
2381                 msg_info.hdr.original_sc = ctsio->io_hdr.remote_io;
2382                 msg_info.hdr.serializing_sc = NULL;
2383                 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
2384                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
2385                     sizeof(msg_info.scsi), M_WAITOK);
2386                 ctl_free_io((union ctl_io *)ctsio);
2387                 break;
2388         }
2389 }
2390
2391 /*
2392  * Returns 0 for success, errno for failure.
2393  */
2394 static void
2395 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
2396                    struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries)
2397 {
2398         union ctl_io *io;
2399
2400         mtx_lock(&lun->lun_lock);
2401         for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL);
2402              (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
2403              ooa_links)) {
2404                 struct ctl_ooa_entry *entry;
2405
2406                 /*
2407                  * If we've got more than we can fit, just count the
2408                  * remaining entries.
2409                  */
2410                 if (*cur_fill_num >= ooa_hdr->alloc_num)
2411                         continue;
2412
2413                 entry = &kern_entries[*cur_fill_num];
2414
2415                 entry->tag_num = io->scsiio.tag_num;
2416                 entry->lun_num = lun->lun;
2417 #ifdef CTL_TIME_IO
2418                 entry->start_bt = io->io_hdr.start_bt;
2419 #endif
2420                 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len);
2421                 entry->cdb_len = io->scsiio.cdb_len;
2422                 if (io->io_hdr.blocker != NULL)
2423                         entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED;
2424
2425                 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG)
2426                         entry->cmd_flags |= CTL_OOACMD_FLAG_DMA;
2427
2428                 if (io->io_hdr.flags & CTL_FLAG_ABORT)
2429                         entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT;
2430
2431                 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR)
2432                         entry->cmd_flags |= CTL_OOACMD_FLAG_RTR;
2433
2434                 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED)
2435                         entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED;
2436
2437                 if (io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED)
2438                         entry->cmd_flags |= CTL_OOACMD_FLAG_STATUS_QUEUED;
2439
2440                 if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT)
2441                         entry->cmd_flags |= CTL_OOACMD_FLAG_STATUS_SENT;
2442         }
2443         mtx_unlock(&lun->lun_lock);
2444 }
2445
2446 /*
2447  * Escape characters that are illegal or not recommended in XML.
2448  */
2449 int
2450 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size)
2451 {
2452         char *end = str + size;
2453         int retval;
2454
2455         retval = 0;
2456
2457         for (; *str && str < end; str++) {
2458                 switch (*str) {
2459                 case '&':
2460                         retval = sbuf_printf(sb, "&amp;");
2461                         break;
2462                 case '>':
2463                         retval = sbuf_printf(sb, "&gt;");
2464                         break;
2465                 case '<':
2466                         retval = sbuf_printf(sb, "&lt;");
2467                         break;
2468                 default:
2469                         retval = sbuf_putc(sb, *str);
2470                         break;
2471                 }
2472
2473                 if (retval != 0)
2474                         break;
2475
2476         }
2477
2478         return (retval);
2479 }
2480
2481 static void
2482 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb)
2483 {
2484         struct scsi_vpd_id_descriptor *desc;
2485         int i;
2486
2487         if (id == NULL || id->len < 4)
2488                 return;
2489         desc = (struct scsi_vpd_id_descriptor *)id->data;
2490         switch (desc->id_type & SVPD_ID_TYPE_MASK) {
2491         case SVPD_ID_TYPE_T10:
2492                 sbuf_printf(sb, "t10.");
2493                 break;
2494         case SVPD_ID_TYPE_EUI64:
2495                 sbuf_printf(sb, "eui.");
2496                 break;
2497         case SVPD_ID_TYPE_NAA:
2498                 sbuf_printf(sb, "naa.");
2499                 break;
2500         case SVPD_ID_TYPE_SCSI_NAME:
2501                 break;
2502         }
2503         switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) {
2504         case SVPD_ID_CODESET_BINARY:
2505                 for (i = 0; i < desc->length; i++)
2506                         sbuf_printf(sb, "%02x", desc->identifier[i]);
2507                 break;
2508         case SVPD_ID_CODESET_ASCII:
2509                 sbuf_printf(sb, "%.*s", (int)desc->length,
2510                     (char *)desc->identifier);
2511                 break;
2512         case SVPD_ID_CODESET_UTF8:
2513                 sbuf_printf(sb, "%s", (char *)desc->identifier);
2514                 break;
2515         }
2516 }
2517
2518 static int
2519 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
2520           struct thread *td)
2521 {
2522         struct ctl_softc *softc = dev->si_drv1;
2523         struct ctl_port *port;
2524         struct ctl_lun *lun;
2525         int retval;
2526
2527         retval = 0;
2528
2529         switch (cmd) {
2530         case CTL_IO:
2531                 retval = ctl_ioctl_io(dev, cmd, addr, flag, td);
2532                 break;
2533         case CTL_ENABLE_PORT:
2534         case CTL_DISABLE_PORT:
2535         case CTL_SET_PORT_WWNS: {
2536                 struct ctl_port *port;
2537                 struct ctl_port_entry *entry;
2538
2539                 entry = (struct ctl_port_entry *)addr;
2540                 
2541                 mtx_lock(&softc->ctl_lock);
2542                 STAILQ_FOREACH(port, &softc->port_list, links) {
2543                         int action, done;
2544
2545                         if (port->targ_port < softc->port_min ||
2546                             port->targ_port >= softc->port_max)
2547                                 continue;
2548
2549                         action = 0;
2550                         done = 0;
2551                         if ((entry->port_type == CTL_PORT_NONE)
2552                          && (entry->targ_port == port->targ_port)) {
2553                                 /*
2554                                  * If the user only wants to enable or
2555                                  * disable or set WWNs on a specific port,
2556                                  * do the operation and we're done.
2557                                  */
2558                                 action = 1;
2559                                 done = 1;
2560                         } else if (entry->port_type & port->port_type) {
2561                                 /*
2562                                  * Compare the user's type mask with the
2563                                  * particular frontend type to see if we
2564                                  * have a match.
2565                                  */
2566                                 action = 1;
2567                                 done = 0;
2568
2569                                 /*
2570                                  * Make sure the user isn't trying to set
2571                                  * WWNs on multiple ports at the same time.
2572                                  */
2573                                 if (cmd == CTL_SET_PORT_WWNS) {
2574                                         printf("%s: Can't set WWNs on "
2575                                                "multiple ports\n", __func__);
2576                                         retval = EINVAL;
2577                                         break;
2578                                 }
2579                         }
2580                         if (action == 0)
2581                                 continue;
2582
2583                         /*
2584                          * XXX KDM we have to drop the lock here, because
2585                          * the online/offline operations can potentially
2586                          * block.  We need to reference count the frontends
2587                          * so they can't go away,
2588                          */
2589                         if (cmd == CTL_ENABLE_PORT) {
2590                                 mtx_unlock(&softc->ctl_lock);
2591                                 ctl_port_online(port);
2592                                 mtx_lock(&softc->ctl_lock);
2593                         } else if (cmd == CTL_DISABLE_PORT) {
2594                                 mtx_unlock(&softc->ctl_lock);
2595                                 ctl_port_offline(port);
2596                                 mtx_lock(&softc->ctl_lock);
2597                         } else if (cmd == CTL_SET_PORT_WWNS) {
2598                                 ctl_port_set_wwns(port,
2599                                     (entry->flags & CTL_PORT_WWNN_VALID) ?
2600                                     1 : 0, entry->wwnn,
2601                                     (entry->flags & CTL_PORT_WWPN_VALID) ?
2602                                     1 : 0, entry->wwpn);
2603                         }
2604                         if (done != 0)
2605                                 break;
2606                 }
2607                 mtx_unlock(&softc->ctl_lock);
2608                 break;
2609         }
2610         case CTL_GET_OOA: {
2611                 struct ctl_ooa *ooa_hdr;
2612                 struct ctl_ooa_entry *entries;
2613                 uint32_t cur_fill_num;
2614
2615                 ooa_hdr = (struct ctl_ooa *)addr;
2616
2617                 if ((ooa_hdr->alloc_len == 0)
2618                  || (ooa_hdr->alloc_num == 0)) {
2619                         printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u "
2620                                "must be non-zero\n", __func__,
2621                                ooa_hdr->alloc_len, ooa_hdr->alloc_num);
2622                         retval = EINVAL;
2623                         break;
2624                 }
2625
2626                 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num *
2627                     sizeof(struct ctl_ooa_entry))) {
2628                         printf("%s: CTL_GET_OOA: alloc len %u must be alloc "
2629                                "num %d * sizeof(struct ctl_ooa_entry) %zd\n",
2630                                __func__, ooa_hdr->alloc_len,
2631                                ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry));
2632                         retval = EINVAL;
2633                         break;
2634                 }
2635
2636                 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO);
2637                 if (entries == NULL) {
2638                         printf("%s: could not allocate %d bytes for OOA "
2639                                "dump\n", __func__, ooa_hdr->alloc_len);
2640                         retval = ENOMEM;
2641                         break;
2642                 }
2643
2644                 mtx_lock(&softc->ctl_lock);
2645                 if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 &&
2646                     (ooa_hdr->lun_num >= ctl_max_luns ||
2647                      softc->ctl_luns[ooa_hdr->lun_num] == NULL)) {
2648                         mtx_unlock(&softc->ctl_lock);
2649                         free(entries, M_CTL);
2650                         printf("%s: CTL_GET_OOA: invalid LUN %ju\n",
2651                                __func__, (uintmax_t)ooa_hdr->lun_num);
2652                         retval = EINVAL;
2653                         break;
2654                 }
2655
2656                 cur_fill_num = 0;
2657
2658                 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) {
2659                         STAILQ_FOREACH(lun, &softc->lun_list, links) {
2660                                 ctl_ioctl_fill_ooa(lun, &cur_fill_num,
2661                                     ooa_hdr, entries);
2662                         }
2663                 } else {
2664                         lun = softc->ctl_luns[ooa_hdr->lun_num];
2665                         ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr,
2666                             entries);
2667                 }
2668                 mtx_unlock(&softc->ctl_lock);
2669
2670                 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num);
2671                 ooa_hdr->fill_len = ooa_hdr->fill_num *
2672                         sizeof(struct ctl_ooa_entry);
2673                 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len);
2674                 if (retval != 0) {
2675                         printf("%s: error copying out %d bytes for OOA dump\n", 
2676                                __func__, ooa_hdr->fill_len);
2677                 }
2678
2679                 getbinuptime(&ooa_hdr->cur_bt);
2680
2681                 if (cur_fill_num > ooa_hdr->alloc_num) {
2682                         ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num;
2683                         ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE;
2684                 } else {
2685                         ooa_hdr->dropped_num = 0;
2686                         ooa_hdr->status = CTL_OOA_OK;
2687                 }
2688
2689                 free(entries, M_CTL);
2690                 break;
2691         }
2692         case CTL_DELAY_IO: {
2693                 struct ctl_io_delay_info *delay_info;
2694
2695                 delay_info = (struct ctl_io_delay_info *)addr;
2696
2697 #ifdef CTL_IO_DELAY
2698                 mtx_lock(&softc->ctl_lock);
2699                 if (delay_info->lun_id >= ctl_max_luns ||
2700                     (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) {
2701                         mtx_unlock(&softc->ctl_lock);
2702                         delay_info->status = CTL_DELAY_STATUS_INVALID_LUN;
2703                         break;
2704                 }
2705                 mtx_lock(&lun->lun_lock);
2706                 mtx_unlock(&softc->ctl_lock);
2707                 delay_info->status = CTL_DELAY_STATUS_OK;
2708                 switch (delay_info->delay_type) {
2709                 case CTL_DELAY_TYPE_CONT:
2710                 case CTL_DELAY_TYPE_ONESHOT:
2711                         break;
2712                 default:
2713                         delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE;
2714                         break;
2715                 }
2716                 switch (delay_info->delay_loc) {
2717                 case CTL_DELAY_LOC_DATAMOVE:
2718                         lun->delay_info.datamove_type = delay_info->delay_type;
2719                         lun->delay_info.datamove_delay = delay_info->delay_secs;
2720                         break;
2721                 case CTL_DELAY_LOC_DONE:
2722                         lun->delay_info.done_type = delay_info->delay_type;
2723                         lun->delay_info.done_delay = delay_info->delay_secs;
2724                         break;
2725                 default:
2726                         delay_info->status = CTL_DELAY_STATUS_INVALID_LOC;
2727                         break;
2728                 }
2729                 mtx_unlock(&lun->lun_lock);
2730 #else
2731                 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED;
2732 #endif /* CTL_IO_DELAY */
2733                 break;
2734         }
2735         case CTL_ERROR_INJECT: {
2736                 struct ctl_error_desc *err_desc, *new_err_desc;
2737
2738                 err_desc = (struct ctl_error_desc *)addr;
2739
2740                 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL,
2741                                       M_WAITOK | M_ZERO);
2742                 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc));
2743
2744                 mtx_lock(&softc->ctl_lock);
2745                 if (err_desc->lun_id >= ctl_max_luns ||
2746                     (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) {
2747                         mtx_unlock(&softc->ctl_lock);
2748                         free(new_err_desc, M_CTL);
2749                         printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n",
2750                                __func__, (uintmax_t)err_desc->lun_id);
2751                         retval = EINVAL;
2752                         break;
2753                 }
2754                 mtx_lock(&lun->lun_lock);
2755                 mtx_unlock(&softc->ctl_lock);
2756
2757                 /*
2758                  * We could do some checking here to verify the validity
2759                  * of the request, but given the complexity of error
2760                  * injection requests, the checking logic would be fairly
2761                  * complex.
2762                  *
2763                  * For now, if the request is invalid, it just won't get
2764                  * executed and might get deleted.
2765                  */
2766                 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links);
2767
2768                 /*
2769                  * XXX KDM check to make sure the serial number is unique,
2770                  * in case we somehow manage to wrap.  That shouldn't
2771                  * happen for a very long time, but it's the right thing to
2772                  * do.
2773                  */
2774                 new_err_desc->serial = lun->error_serial;
2775                 err_desc->serial = lun->error_serial;
2776                 lun->error_serial++;
2777
2778                 mtx_unlock(&lun->lun_lock);
2779                 break;
2780         }
2781         case CTL_ERROR_INJECT_DELETE: {
2782                 struct ctl_error_desc *delete_desc, *desc, *desc2;
2783                 int delete_done;
2784
2785                 delete_desc = (struct ctl_error_desc *)addr;
2786                 delete_done = 0;
2787
2788                 mtx_lock(&softc->ctl_lock);
2789                 if (delete_desc->lun_id >= ctl_max_luns ||
2790                     (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) {
2791                         mtx_unlock(&softc->ctl_lock);
2792                         printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n",
2793                                __func__, (uintmax_t)delete_desc->lun_id);
2794                         retval = EINVAL;
2795                         break;
2796                 }
2797                 mtx_lock(&lun->lun_lock);
2798                 mtx_unlock(&softc->ctl_lock);
2799                 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
2800                         if (desc->serial != delete_desc->serial)
2801                                 continue;
2802
2803                         STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc,
2804                                       links);
2805                         free(desc, M_CTL);
2806                         delete_done = 1;
2807                 }
2808                 mtx_unlock(&lun->lun_lock);
2809                 if (delete_done == 0) {
2810                         printf("%s: CTL_ERROR_INJECT_DELETE: can't find "
2811                                "error serial %ju on LUN %u\n", __func__, 
2812                                delete_desc->serial, delete_desc->lun_id);
2813                         retval = EINVAL;
2814                         break;
2815                 }
2816                 break;
2817         }
2818         case CTL_DUMP_STRUCTS: {
2819                 int j, k;
2820                 struct ctl_port *port;
2821                 struct ctl_frontend *fe;
2822
2823                 mtx_lock(&softc->ctl_lock);
2824                 printf("CTL Persistent Reservation information start:\n");
2825                 STAILQ_FOREACH(lun, &softc->lun_list, links) {
2826                         mtx_lock(&lun->lun_lock);
2827                         if ((lun->flags & CTL_LUN_DISABLED) != 0) {
2828                                 mtx_unlock(&lun->lun_lock);
2829                                 continue;
2830                         }
2831
2832                         for (j = 0; j < ctl_max_ports; j++) {
2833                                 if (lun->pr_keys[j] == NULL)
2834                                         continue;
2835                                 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){
2836                                         if (lun->pr_keys[j][k] == 0)
2837                                                 continue;
2838                                         printf("  LUN %ju port %d iid %d key "
2839                                                "%#jx\n", lun->lun, j, k,
2840                                                (uintmax_t)lun->pr_keys[j][k]);
2841                                 }
2842                         }
2843                         mtx_unlock(&lun->lun_lock);
2844                 }
2845                 printf("CTL Persistent Reservation information end\n");
2846                 printf("CTL Ports:\n");
2847                 STAILQ_FOREACH(port, &softc->port_list, links) {
2848                         printf("  Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN "
2849                                "%#jx WWPN %#jx\n", port->targ_port, port->port_name,
2850                                port->frontend->name, port->port_type,
2851                                port->physical_port, port->virtual_port,
2852                                (uintmax_t)port->wwnn, (uintmax_t)port->wwpn);
2853                         for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
2854                                 if (port->wwpn_iid[j].in_use == 0 &&
2855                                     port->wwpn_iid[j].wwpn == 0 &&
2856                                     port->wwpn_iid[j].name == NULL)
2857                                         continue;
2858
2859                                 printf("    iid %u use %d WWPN %#jx '%s'\n",
2860                                     j, port->wwpn_iid[j].in_use,
2861                                     (uintmax_t)port->wwpn_iid[j].wwpn,
2862                                     port->wwpn_iid[j].name);
2863                         }
2864                 }
2865                 printf("CTL Port information end\n");
2866                 mtx_unlock(&softc->ctl_lock);
2867                 /*
2868                  * XXX KDM calling this without a lock.  We'd likely want
2869                  * to drop the lock before calling the frontend's dump
2870                  * routine anyway.
2871                  */
2872                 printf("CTL Frontends:\n");
2873                 STAILQ_FOREACH(fe, &softc->fe_list, links) {
2874                         printf("  Frontend '%s'\n", fe->name);
2875                         if (fe->fe_dump != NULL)
2876                                 fe->fe_dump();
2877                 }
2878                 printf("CTL Frontend information end\n");
2879                 break;
2880         }
2881         case CTL_LUN_REQ: {
2882                 struct ctl_lun_req *lun_req;
2883                 struct ctl_backend_driver *backend;
2884                 void *packed;
2885                 nvlist_t *tmp_args_nvl;
2886                 size_t packed_len;
2887
2888                 lun_req = (struct ctl_lun_req *)addr;
2889                 tmp_args_nvl = lun_req->args_nvl;
2890
2891                 backend = ctl_backend_find(lun_req->backend);
2892                 if (backend == NULL) {
2893                         lun_req->status = CTL_LUN_ERROR;
2894                         snprintf(lun_req->error_str,
2895                                  sizeof(lun_req->error_str),
2896                                  "Backend \"%s\" not found.",
2897                                  lun_req->backend);
2898                         break;
2899                 }
2900
2901                 if (lun_req->args != NULL) {
2902                         packed = malloc(lun_req->args_len, M_CTL, M_WAITOK);
2903                         if (copyin(lun_req->args, packed, lun_req->args_len) != 0) {
2904                                 free(packed, M_CTL);
2905                                 lun_req->status = CTL_LUN_ERROR;
2906                                 snprintf(lun_req->error_str, sizeof(lun_req->error_str),
2907                                     "Cannot copyin args.");
2908                                 break;
2909                         }
2910                         lun_req->args_nvl = nvlist_unpack(packed,
2911                             lun_req->args_len, 0);
2912                         free(packed, M_CTL);
2913
2914                         if (lun_req->args_nvl == NULL) {
2915                                 lun_req->status = CTL_LUN_ERROR;
2916                                 snprintf(lun_req->error_str, sizeof(lun_req->error_str),
2917                                     "Cannot unpack args nvlist.");
2918                                 break;
2919                         }
2920                 } else
2921                         lun_req->args_nvl = nvlist_create(0);
2922
2923                 retval = backend->ioctl(dev, cmd, addr, flag, td);
2924                 nvlist_destroy(lun_req->args_nvl);
2925                 lun_req->args_nvl = tmp_args_nvl;
2926
2927                 if (lun_req->result_nvl != NULL) {
2928                         if (lun_req->result != NULL) {
2929                                 packed = nvlist_pack(lun_req->result_nvl,
2930                                     &packed_len);
2931                                 if (packed == NULL) {
2932                                         lun_req->status = CTL_LUN_ERROR;
2933                                         snprintf(lun_req->error_str,
2934                                             sizeof(lun_req->error_str),
2935                                             "Cannot pack result nvlist.");
2936                                         break;
2937                                 }
2938
2939                                 if (packed_len > lun_req->result_len) {
2940                                         lun_req->status = CTL_LUN_ERROR;
2941                                         snprintf(lun_req->error_str,
2942                                             sizeof(lun_req->error_str),
2943                                             "Result nvlist too large.");
2944                                         free(packed, M_NVLIST);
2945                                         break;
2946                                 }
2947
2948                                 if (copyout(packed, lun_req->result, packed_len)) {
2949                                         lun_req->status = CTL_LUN_ERROR;
2950                                         snprintf(lun_req->error_str,
2951                                             sizeof(lun_req->error_str),
2952                                             "Cannot copyout() the result.");
2953                                         free(packed, M_NVLIST);
2954                                         break;
2955                                 }
2956
2957                                 lun_req->result_len = packed_len;
2958                                 free(packed, M_NVLIST);
2959                         }
2960
2961                         nvlist_destroy(lun_req->result_nvl);
2962                 }
2963                 break;
2964         }
2965         case CTL_LUN_LIST: {
2966                 struct sbuf *sb;
2967                 struct ctl_lun_list *list;
2968                 const char *name, *value;
2969                 void *cookie;
2970                 int type;
2971
2972                 list = (struct ctl_lun_list *)addr;
2973
2974                 /*
2975                  * Allocate a fixed length sbuf here, based on the length
2976                  * of the user's buffer.  We could allocate an auto-extending
2977                  * buffer, and then tell the user how much larger our
2978                  * amount of data is than his buffer, but that presents
2979                  * some problems:
2980                  *
2981                  * 1.  The sbuf(9) routines use a blocking malloc, and so
2982                  *     we can't hold a lock while calling them with an
2983                  *     auto-extending buffer.
2984                  *
2985                  * 2.  There is not currently a LUN reference counting
2986                  *     mechanism, outside of outstanding transactions on
2987                  *     the LUN's OOA queue.  So a LUN could go away on us
2988                  *     while we're getting the LUN number, backend-specific
2989                  *     information, etc.  Thus, given the way things
2990                  *     currently work, we need to hold the CTL lock while
2991                  *     grabbing LUN information.
2992                  *
2993                  * So, from the user's standpoint, the best thing to do is
2994                  * allocate what he thinks is a reasonable buffer length,
2995                  * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error,
2996                  * double the buffer length and try again.  (And repeat
2997                  * that until he succeeds.)
2998                  */
2999                 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN);
3000                 if (sb == NULL) {
3001                         list->status = CTL_LUN_LIST_ERROR;
3002                         snprintf(list->error_str, sizeof(list->error_str),
3003                                  "Unable to allocate %d bytes for LUN list",
3004                                  list->alloc_len);
3005                         break;
3006                 }
3007
3008                 sbuf_printf(sb, "<ctllunlist>\n");
3009
3010                 mtx_lock(&softc->ctl_lock);
3011                 STAILQ_FOREACH(lun, &softc->lun_list, links) {
3012                         mtx_lock(&lun->lun_lock);
3013                         retval = sbuf_printf(sb, "<lun id=\"%ju\">\n",
3014                                              (uintmax_t)lun->lun);
3015
3016                         /*
3017                          * Bail out as soon as we see that we've overfilled
3018                          * the buffer.
3019                          */
3020                         if (retval != 0)
3021                                 break;
3022
3023                         retval = sbuf_printf(sb, "\t<backend_type>%s"
3024                                              "</backend_type>\n",
3025                                              (lun->backend == NULL) ?  "none" :
3026                                              lun->backend->name);
3027
3028                         if (retval != 0)
3029                                 break;
3030
3031                         retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n",
3032                                              lun->be_lun->lun_type);
3033
3034                         if (retval != 0)
3035                                 break;
3036
3037                         if (lun->backend == NULL) {
3038                                 retval = sbuf_printf(sb, "</lun>\n");
3039                                 if (retval != 0)
3040                                         break;
3041                                 continue;
3042                         }
3043
3044                         retval = sbuf_printf(sb, "\t<size>%ju</size>\n",
3045                                              (lun->be_lun->maxlba > 0) ?
3046                                              lun->be_lun->maxlba + 1 : 0);
3047
3048                         if (retval != 0)
3049                                 break;
3050
3051                         retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n",
3052                                              lun->be_lun->blocksize);
3053
3054                         if (retval != 0)
3055                                 break;
3056
3057                         retval = sbuf_printf(sb, "\t<serial_number>");
3058
3059                         if (retval != 0)
3060                                 break;
3061
3062                         retval = ctl_sbuf_printf_esc(sb,
3063                             lun->be_lun->serial_num,
3064                             sizeof(lun->be_lun->serial_num));
3065
3066                         if (retval != 0)
3067                                 break;
3068
3069                         retval = sbuf_printf(sb, "</serial_number>\n");
3070                 
3071                         if (retval != 0)
3072                                 break;
3073
3074                         retval = sbuf_printf(sb, "\t<device_id>");
3075
3076                         if (retval != 0)
3077                                 break;
3078
3079                         retval = ctl_sbuf_printf_esc(sb,
3080                             lun->be_lun->device_id,
3081                             sizeof(lun->be_lun->device_id));
3082
3083                         if (retval != 0)
3084                                 break;
3085
3086                         retval = sbuf_printf(sb, "</device_id>\n");
3087
3088                         if (retval != 0)
3089                                 break;
3090
3091                         if (lun->backend->lun_info != NULL) {
3092                                 retval = lun->backend->lun_info(lun->be_lun, sb);
3093                                 if (retval != 0)
3094                                         break;
3095                         }
3096
3097                         cookie = NULL;
3098                         while ((name = nvlist_next(lun->be_lun->options, &type,
3099                             &cookie)) != NULL) {
3100                                 sbuf_printf(sb, "\t<%s>", name);
3101
3102                                 if (type == NV_TYPE_STRING) {
3103                                         value = dnvlist_get_string(
3104                                             lun->be_lun->options, name, NULL);
3105                                         if (value != NULL)
3106                                                 sbuf_printf(sb, "%s", value);
3107                                 }
3108
3109                                 sbuf_printf(sb, "</%s>\n", name);
3110                         }
3111
3112                         retval = sbuf_printf(sb, "</lun>\n");
3113
3114                         if (retval != 0)
3115                                 break;
3116                         mtx_unlock(&lun->lun_lock);
3117                 }
3118                 if (lun != NULL)
3119                         mtx_unlock(&lun->lun_lock);
3120                 mtx_unlock(&softc->ctl_lock);
3121
3122                 if ((retval != 0)
3123                  || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) {
3124                         retval = 0;
3125                         sbuf_delete(sb);
3126                         list->status = CTL_LUN_LIST_NEED_MORE_SPACE;
3127                         snprintf(list->error_str, sizeof(list->error_str),
3128                                  "Out of space, %d bytes is too small",
3129                                  list->alloc_len);
3130                         break;
3131                 }
3132
3133                 sbuf_finish(sb);
3134
3135                 retval = copyout(sbuf_data(sb), list->lun_xml,
3136                                  sbuf_len(sb) + 1);
3137
3138                 list->fill_len = sbuf_len(sb) + 1;
3139                 list->status = CTL_LUN_LIST_OK;
3140                 sbuf_delete(sb);
3141                 break;
3142         }
3143         case CTL_ISCSI: {
3144                 struct ctl_iscsi *ci;
3145                 struct ctl_frontend *fe;
3146
3147                 ci = (struct ctl_iscsi *)addr;
3148
3149                 fe = ctl_frontend_find("iscsi");
3150                 if (fe == NULL) {
3151                         ci->status = CTL_ISCSI_ERROR;
3152                         snprintf(ci->error_str, sizeof(ci->error_str),
3153                             "Frontend \"iscsi\" not found.");
3154                         break;
3155                 }
3156
3157                 retval = fe->ioctl(dev, cmd, addr, flag, td);
3158                 break;
3159         }
3160         case CTL_PORT_REQ: {
3161                 struct ctl_req *req;
3162                 struct ctl_frontend *fe;
3163                 void *packed;
3164                 nvlist_t *tmp_args_nvl;
3165                 size_t packed_len;
3166
3167                 req = (struct ctl_req *)addr;
3168                 tmp_args_nvl = req->args_nvl;
3169
3170                 fe = ctl_frontend_find(req->driver);
3171                 if (fe == NULL) {
3172                         req->status = CTL_LUN_ERROR;
3173                         snprintf(req->error_str, sizeof(req->error_str),
3174                             "Frontend \"%s\" not found.", req->driver);
3175                         break;
3176                 }
3177
3178                 if (req->args != NULL) {
3179                         packed = malloc(req->args_len, M_CTL, M_WAITOK);
3180                         if (copyin(req->args, packed, req->args_len) != 0) {
3181                                 free(packed, M_CTL);
3182                                 req->status = CTL_LUN_ERROR;
3183                                 snprintf(req->error_str, sizeof(req->error_str),
3184                                     "Cannot copyin args.");
3185                                 break;
3186                         }
3187                         req->args_nvl = nvlist_unpack(packed,
3188                             req->args_len, 0);
3189                         free(packed, M_CTL);
3190
3191                         if (req->args_nvl == NULL) {
3192                                 req->status = CTL_LUN_ERROR;
3193                                 snprintf(req->error_str, sizeof(req->error_str),
3194                                     "Cannot unpack args nvlist.");
3195                                 break;
3196                         }
3197                 } else
3198                         req->args_nvl = nvlist_create(0);
3199
3200                 if (fe->ioctl)
3201                         retval = fe->ioctl(dev, cmd, addr, flag, td);
3202                 else
3203                         retval = ENODEV;
3204
3205                 nvlist_destroy(req->args_nvl);
3206                 req->args_nvl = tmp_args_nvl;
3207
3208                 if (req->result_nvl != NULL) {
3209                         if (req->result != NULL) {
3210                                 packed = nvlist_pack(req->result_nvl,
3211                                     &packed_len);
3212                                 if (packed == NULL) {
3213                                         req->status = CTL_LUN_ERROR;
3214                                         snprintf(req->error_str,
3215                                             sizeof(req->error_str),
3216                                             "Cannot pack result nvlist.");
3217                                         break;
3218                                 }
3219
3220                                 if (packed_len > req->result_len) {
3221                                         req->status = CTL_LUN_ERROR;
3222                                         snprintf(req->error_str,
3223                                             sizeof(req->error_str),
3224                                             "Result nvlist too large.");
3225                                         free(packed, M_NVLIST);
3226                                         break;
3227                                 }
3228
3229                                 if (copyout(packed, req->result, packed_len)) {
3230                                         req->status = CTL_LUN_ERROR;
3231                                         snprintf(req->error_str,
3232                                             sizeof(req->error_str),
3233                                             "Cannot copyout() the result.");
3234                                         free(packed, M_NVLIST);
3235                                         break;
3236                                 }
3237
3238                                 req->result_len = packed_len;
3239                                 free(packed, M_NVLIST);
3240                         }
3241
3242                         nvlist_destroy(req->result_nvl);
3243                 }
3244                 break;
3245         }
3246         case CTL_PORT_LIST: {
3247                 struct sbuf *sb;
3248                 struct ctl_port *port;
3249                 struct ctl_lun_list *list;
3250                 const char *name, *value;
3251                 void *cookie;
3252                 int j, type;
3253                 uint32_t plun;
3254
3255                 list = (struct ctl_lun_list *)addr;
3256
3257                 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN);
3258                 if (sb == NULL) {
3259                         list->status = CTL_LUN_LIST_ERROR;
3260                         snprintf(list->error_str, sizeof(list->error_str),
3261                                  "Unable to allocate %d bytes for LUN list",
3262                                  list->alloc_len);
3263                         break;
3264                 }
3265
3266                 sbuf_printf(sb, "<ctlportlist>\n");
3267
3268                 mtx_lock(&softc->ctl_lock);
3269                 STAILQ_FOREACH(port, &softc->port_list, links) {
3270                         retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n",
3271                                              (uintmax_t)port->targ_port);
3272
3273                         /*
3274                          * Bail out as soon as we see that we've overfilled
3275                          * the buffer.
3276                          */
3277                         if (retval != 0)
3278                                 break;
3279
3280                         retval = sbuf_printf(sb, "\t<frontend_type>%s"
3281                             "</frontend_type>\n", port->frontend->name);
3282                         if (retval != 0)
3283                                 break;
3284
3285                         retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n",
3286                                              port->port_type);
3287                         if (retval != 0)
3288                                 break;
3289
3290                         retval = sbuf_printf(sb, "\t<online>%s</online>\n",
3291                             (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO");
3292                         if (retval != 0)
3293                                 break;
3294
3295                         retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n",
3296                             port->port_name);
3297                         if (retval != 0)
3298                                 break;
3299
3300                         retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n",
3301                             port->physical_port);
3302                         if (retval != 0)
3303                                 break;
3304
3305                         retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n",
3306                             port->virtual_port);
3307                         if (retval != 0)
3308                                 break;
3309
3310                         if (port->target_devid != NULL) {
3311                                 sbuf_printf(sb, "\t<target>");
3312                                 ctl_id_sbuf(port->target_devid, sb);
3313                                 sbuf_printf(sb, "</target>\n");
3314                         }
3315
3316                         if (port->port_devid != NULL) {
3317                                 sbuf_printf(sb, "\t<port>");
3318                                 ctl_id_sbuf(port->port_devid, sb);
3319                                 sbuf_printf(sb, "</port>\n");
3320                         }
3321
3322                         if (port->port_info != NULL) {
3323                                 retval = port->port_info(port->onoff_arg, sb);
3324                                 if (retval != 0)
3325                                         break;
3326                         }
3327
3328                         cookie = NULL;
3329                         while ((name = nvlist_next(port->options, &type,
3330                             &cookie)) != NULL) {
3331                                 sbuf_printf(sb, "\t<%s>", name);
3332
3333                                 if (type == NV_TYPE_STRING) {
3334                                         value = dnvlist_get_string(port->options,
3335                                             name, NULL);
3336                                         if (value != NULL)
3337                                                 sbuf_printf(sb, "%s", value);
3338                                 }
3339
3340                                 sbuf_printf(sb, "</%s>\n", name);
3341                         }
3342
3343                         if (port->lun_map != NULL) {
3344                                 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n");
3345                                 for (j = 0; j < port->lun_map_size; j++) {
3346                                         plun = ctl_lun_map_from_port(port, j);
3347                                         if (plun == UINT32_MAX)
3348                                                 continue;
3349                                         sbuf_printf(sb,
3350                                             "\t<lun id=\"%u\">%u</lun>\n",
3351                                             j, plun);
3352                                 }
3353                         }
3354
3355                         for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
3356                                 if (port->wwpn_iid[j].in_use == 0 ||
3357                                     (port->wwpn_iid[j].wwpn == 0 &&
3358                                      port->wwpn_iid[j].name == NULL))
3359                                         continue;
3360
3361                                 if (port->wwpn_iid[j].name != NULL)
3362                                         retval = sbuf_printf(sb,
3363                                             "\t<initiator id=\"%u\">%s</initiator>\n",
3364                                             j, port->wwpn_iid[j].name);
3365                                 else
3366                                         retval = sbuf_printf(sb,
3367                                             "\t<initiator id=\"%u\">naa.%08jx</initiator>\n",
3368                                             j, port->wwpn_iid[j].wwpn);
3369                                 if (retval != 0)
3370                                         break;
3371                         }
3372                         if (retval != 0)
3373                                 break;
3374
3375                         retval = sbuf_printf(sb, "</targ_port>\n");
3376                         if (retval != 0)
3377                                 break;
3378                 }
3379                 mtx_unlock(&softc->ctl_lock);
3380
3381                 if ((retval != 0)
3382                  || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) {
3383                         retval = 0;
3384                         sbuf_delete(sb);
3385                         list->status = CTL_LUN_LIST_NEED_MORE_SPACE;
3386                         snprintf(list->error_str, sizeof(list->error_str),
3387                                  "Out of space, %d bytes is too small",
3388                                  list->alloc_len);
3389                         break;
3390                 }
3391
3392                 sbuf_finish(sb);
3393
3394                 retval = copyout(sbuf_data(sb), list->lun_xml,
3395                                  sbuf_len(sb) + 1);
3396
3397                 list->fill_len = sbuf_len(sb) + 1;
3398                 list->status = CTL_LUN_LIST_OK;
3399                 sbuf_delete(sb);
3400                 break;
3401         }
3402         case CTL_LUN_MAP: {
3403                 struct ctl_lun_map *lm  = (struct ctl_lun_map *)addr;
3404                 struct ctl_port *port;
3405
3406                 mtx_lock(&softc->ctl_lock);
3407                 if (lm->port < softc->port_min ||
3408                     lm->port >= softc->port_max ||
3409                     (port = softc->ctl_ports[lm->port]) == NULL) {
3410                         mtx_unlock(&softc->ctl_lock);
3411                         return (ENXIO);
3412                 }
3413                 if (port->status & CTL_PORT_STATUS_ONLINE) {
3414                         STAILQ_FOREACH(lun, &softc->lun_list, links) {
3415                                 if (ctl_lun_map_to_port(port, lun->lun) ==
3416                                     UINT32_MAX)
3417                                         continue;
3418                                 mtx_lock(&lun->lun_lock);
3419                                 ctl_est_ua_port(lun, lm->port, -1,
3420                                     CTL_UA_LUN_CHANGE);
3421                                 mtx_unlock(&lun->lun_lock);
3422                         }
3423                 }
3424                 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps
3425                 if (lm->plun != UINT32_MAX) {
3426                         if (lm->lun == UINT32_MAX)
3427                                 retval = ctl_lun_map_unset(port, lm->plun);
3428                         else if (lm->lun < ctl_max_luns &&
3429                             softc->ctl_luns[lm->lun] != NULL)
3430                                 retval = ctl_lun_map_set(port, lm->plun, lm->lun);
3431                         else
3432                                 return (ENXIO);
3433                 } else {
3434                         if (lm->lun == UINT32_MAX)
3435                                 retval = ctl_lun_map_deinit(port);
3436                         else
3437                                 retval = ctl_lun_map_init(port);
3438                 }
3439                 if (port->status & CTL_PORT_STATUS_ONLINE)
3440                         ctl_isc_announce_port(port);
3441                 break;
3442         }
3443         case CTL_GET_LUN_STATS: {
3444                 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr;
3445                 int i;
3446
3447                 /*
3448                  * XXX KDM no locking here.  If the LUN list changes,
3449                  * things can blow up.
3450                  */
3451                 i = 0;
3452                 stats->status = CTL_SS_OK;
3453                 stats->fill_len = 0;
3454                 STAILQ_FOREACH(lun, &softc->lun_list, links) {
3455                         if (lun->lun < stats->first_item)
3456                                 continue;
3457                         if (stats->fill_len + sizeof(lun->stats) >
3458                             stats->alloc_len) {
3459                                 stats->status = CTL_SS_NEED_MORE_SPACE;
3460                                 break;
3461                         }
3462                         retval = copyout(&lun->stats, &stats->stats[i++],
3463                                          sizeof(lun->stats));
3464                         if (retval != 0)
3465                                 break;
3466                         stats->fill_len += sizeof(lun->stats);
3467                 }
3468                 stats->num_items = softc->num_luns;
3469                 stats->flags = CTL_STATS_FLAG_NONE;
3470 #ifdef CTL_TIME_IO
3471                 stats->flags |= CTL_STATS_FLAG_TIME_VALID;
3472 #endif
3473                 getnanouptime(&stats->timestamp);
3474                 break;
3475         }
3476         case CTL_GET_PORT_STATS: {
3477                 struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr;
3478                 int i;
3479
3480                 /*
3481                  * XXX KDM no locking here.  If the LUN list changes,
3482                  * things can blow up.
3483                  */
3484                 i = 0;
3485                 stats->status = CTL_SS_OK;
3486                 stats->fill_len = 0;
3487                 STAILQ_FOREACH(port, &softc->port_list, links) {
3488                         if (port->targ_port < stats->first_item)
3489                                 continue;
3490                         if (stats->fill_len + sizeof(port->stats) >
3491                             stats->alloc_len) {
3492                                 stats->status = CTL_SS_NEED_MORE_SPACE;
3493                                 break;
3494                         }
3495                         retval = copyout(&port->stats, &stats->stats[i++],
3496                                          sizeof(port->stats));
3497                         if (retval != 0)
3498                                 break;
3499                         stats->fill_len += sizeof(port->stats);
3500                 }
3501                 stats->num_items = softc->num_ports;
3502                 stats->flags = CTL_STATS_FLAG_NONE;
3503 #ifdef CTL_TIME_IO
3504                 stats->flags |= CTL_STATS_FLAG_TIME_VALID;
3505 #endif
3506                 getnanouptime(&stats->timestamp);
3507                 break;
3508         }
3509         default: {
3510                 /* XXX KDM should we fix this? */
3511 #if 0
3512                 struct ctl_backend_driver *backend;
3513                 unsigned int type;
3514                 int found;
3515
3516                 found = 0;
3517
3518                 /*
3519                  * We encode the backend type as the ioctl type for backend
3520                  * ioctls.  So parse it out here, and then search for a
3521                  * backend of this type.
3522                  */
3523                 type = _IOC_TYPE(cmd);
3524
3525                 STAILQ_FOREACH(backend, &softc->be_list, links) {
3526                         if (backend->type == type) {
3527                                 found = 1;
3528                                 break;
3529                         }
3530                 }
3531                 if (found == 0) {
3532                         printf("ctl: unknown ioctl command %#lx or backend "
3533                                "%d\n", cmd, type);
3534                         retval = EINVAL;
3535                         break;
3536                 }
3537                 retval = backend->ioctl(dev, cmd, addr, flag, td);
3538 #endif
3539                 retval = ENOTTY;
3540                 break;
3541         }
3542         }
3543         return (retval);
3544 }
3545
3546 uint32_t
3547 ctl_get_initindex(struct ctl_nexus *nexus)
3548 {
3549         return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT));
3550 }
3551
3552 int
3553 ctl_lun_map_init(struct ctl_port *port)
3554 {
3555         struct ctl_softc *softc = port->ctl_softc;
3556         struct ctl_lun *lun;
3557         int size = ctl_lun_map_size;
3558         uint32_t i;
3559
3560         if (port->lun_map == NULL || port->lun_map_size < size) {
3561                 port->lun_map_size = 0;
3562                 free(port->lun_map, M_CTL);
3563                 port->lun_map = malloc(size * sizeof(uint32_t),
3564                     M_CTL, M_NOWAIT);
3565         }
3566         if (port->lun_map == NULL)
3567                 return (ENOMEM);
3568         for (i = 0; i < size; i++)
3569                 port->lun_map[i] = UINT32_MAX;
3570         port->lun_map_size = size;
3571         if (port->status & CTL_PORT_STATUS_ONLINE) {
3572                 if (port->lun_disable != NULL) {
3573                         STAILQ_FOREACH(lun, &softc->lun_list, links)
3574                                 port->lun_disable(port->targ_lun_arg, lun->lun);
3575                 }
3576                 ctl_isc_announce_port(port);
3577         }
3578         return (0);
3579 }
3580
3581 int
3582 ctl_lun_map_deinit(struct ctl_port *port)
3583 {
3584         struct ctl_softc *softc = port->ctl_softc;
3585         struct ctl_lun *lun;
3586
3587         if (port->lun_map == NULL)
3588                 return (0);
3589         port->lun_map_size = 0;
3590         free(port->lun_map, M_CTL);
3591         port->lun_map = NULL;
3592         if (port->status & CTL_PORT_STATUS_ONLINE) {
3593                 if (port->lun_enable != NULL) {
3594                         STAILQ_FOREACH(lun, &softc->lun_list, links)
3595                                 port->lun_enable(port->targ_lun_arg, lun->lun);
3596                 }
3597                 ctl_isc_announce_port(port);
3598         }
3599         return (0);
3600 }
3601
3602 int
3603 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun)
3604 {
3605         int status;
3606         uint32_t old;
3607
3608         if (port->lun_map == NULL) {
3609                 status = ctl_lun_map_init(port);
3610                 if (status != 0)
3611                         return (status);
3612         }
3613         if (plun >= port->lun_map_size)
3614                 return (EINVAL);
3615         old = port->lun_map[plun];
3616         port->lun_map[plun] = glun;
3617         if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) {
3618                 if (port->lun_enable != NULL)
3619                         port->lun_enable(port->targ_lun_arg, plun);
3620                 ctl_isc_announce_port(port);
3621         }
3622         return (0);
3623 }
3624
3625 int
3626 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun)
3627 {
3628         uint32_t old;
3629
3630         if (port->lun_map == NULL || plun >= port->lun_map_size)
3631                 return (0);
3632         old = port->lun_map[plun];
3633         port->lun_map[plun] = UINT32_MAX;
3634         if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) {
3635                 if (port->lun_disable != NULL)
3636                         port->lun_disable(port->targ_lun_arg, plun);
3637                 ctl_isc_announce_port(port);
3638         }
3639         return (0);
3640 }
3641
3642 uint32_t
3643 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id)
3644 {
3645
3646         if (port == NULL)
3647                 return (UINT32_MAX);
3648         if (port->lun_map == NULL)
3649                 return (lun_id);
3650         if (lun_id > port->lun_map_size)
3651                 return (UINT32_MAX);
3652         return (port->lun_map[lun_id]);
3653 }
3654
3655 uint32_t
3656 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id)
3657 {
3658         uint32_t i;
3659
3660         if (port == NULL)
3661                 return (UINT32_MAX);
3662         if (port->lun_map == NULL)
3663                 return (lun_id);
3664         for (i = 0; i < port->lun_map_size; i++) {
3665                 if (port->lun_map[i] == lun_id)
3666                         return (i);
3667         }
3668         return (UINT32_MAX);
3669 }
3670
3671 uint32_t
3672 ctl_decode_lun(uint64_t encoded)
3673 {
3674         uint8_t lun[8];
3675         uint32_t result = 0xffffffff;
3676
3677         be64enc(lun, encoded);
3678         switch (lun[0] & RPL_LUNDATA_ATYP_MASK) {
3679         case RPL_LUNDATA_ATYP_PERIPH:
3680                 if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 &&
3681                     lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0)
3682                         result = lun[1];
3683                 break;
3684         case RPL_LUNDATA_ATYP_FLAT:
3685                 if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 &&
3686                     lun[6] == 0 && lun[7] == 0)
3687                         result = ((lun[0] & 0x3f) << 8) + lun[1];
3688                 break;
3689         case RPL_LUNDATA_ATYP_EXTLUN:
3690                 switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) {
3691                 case 0x02:
3692                         switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) {
3693                         case 0x00:
3694                                 result = lun[1];
3695                                 break;
3696                         case 0x10:
3697                                 result = (lun[1] << 16) + (lun[2] << 8) +
3698                                     lun[3];
3699                                 break;
3700                         case 0x20:
3701                                 if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0)
3702                                         result = (lun[2] << 24) +
3703                                             (lun[3] << 16) + (lun[4] << 8) +
3704                                             lun[5];
3705                                 break;
3706                         }
3707                         break;
3708                 case RPL_LUNDATA_EXT_EAM_NOT_SPEC:
3709                         result = 0xffffffff;
3710                         break;
3711                 }
3712                 break;
3713         }
3714         return (result);
3715 }
3716
3717 uint64_t
3718 ctl_encode_lun(uint32_t decoded)
3719 {
3720         uint64_t l = decoded;
3721
3722         if (l <= 0xff)
3723                 return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48));
3724         if (l <= 0x3fff)
3725                 return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48));
3726         if (l <= 0xffffff)
3727                 return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) |
3728                     (l << 32));
3729         return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16));
3730 }
3731
3732 int
3733 ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last)
3734 {
3735         int i;
3736
3737         for (i = first; i < last; i++) {
3738                 if ((mask[i / 32] & (1 << (i % 32))) == 0)
3739                         return (i);
3740         }
3741         return (-1);
3742 }
3743
3744 int
3745 ctl_set_mask(uint32_t *mask, uint32_t bit)
3746 {
3747         uint32_t chunk, piece;
3748
3749         chunk = bit >> 5;
3750         piece = bit % (sizeof(uint32_t) * 8);
3751
3752         if ((mask[chunk] & (1 << piece)) != 0)
3753                 return (-1);
3754         else
3755                 mask[chunk] |= (1 << piece);
3756
3757         return (0);
3758 }
3759
3760 int
3761 ctl_clear_mask(uint32_t *mask, uint32_t bit)
3762 {
3763         uint32_t chunk, piece;
3764
3765         chunk = bit >> 5;
3766         piece = bit % (sizeof(uint32_t) * 8);
3767
3768         if ((mask[chunk] & (1 << piece)) == 0)
3769                 return (-1);
3770         else
3771                 mask[chunk] &= ~(1 << piece);
3772
3773         return (0);
3774 }
3775
3776 int
3777 ctl_is_set(uint32_t *mask, uint32_t bit)
3778 {
3779         uint32_t chunk, piece;
3780
3781         chunk = bit >> 5;
3782         piece = bit % (sizeof(uint32_t) * 8);
3783
3784         if ((mask[chunk] & (1 << piece)) == 0)
3785                 return (0);
3786         else
3787                 return (1);
3788 }
3789
3790 static uint64_t
3791 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx)
3792 {
3793         uint64_t *t;
3794
3795         t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT];
3796         if (t == NULL)
3797                 return (0);
3798         return (t[residx % CTL_MAX_INIT_PER_PORT]);
3799 }
3800
3801 static void
3802 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx)
3803 {
3804         uint64_t *t;
3805
3806         t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT];
3807         if (t == NULL)
3808                 return;
3809         t[residx % CTL_MAX_INIT_PER_PORT] = 0;
3810 }
3811
3812 static void
3813 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx)
3814 {
3815         uint64_t *p;
3816         u_int i;
3817
3818         i = residx/CTL_MAX_INIT_PER_PORT;
3819         if (lun->pr_keys[i] != NULL)
3820                 return;
3821         mtx_unlock(&lun->lun_lock);
3822         p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL,
3823             M_WAITOK | M_ZERO);
3824         mtx_lock(&lun->lun_lock);
3825         if (lun->pr_keys[i] == NULL)
3826                 lun->pr_keys[i] = p;
3827         else
3828                 free(p, M_CTL);
3829 }
3830
3831 static void
3832 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key)
3833 {
3834         uint64_t *t;
3835
3836         t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT];
3837         KASSERT(t != NULL, ("prkey %d is not allocated", residx));
3838         t[residx % CTL_MAX_INIT_PER_PORT] = key;
3839 }
3840
3841 /*
3842  * ctl_softc, pool_name, total_ctl_io are passed in.
3843  * npool is passed out.
3844  */
3845 int
3846 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name,
3847                 uint32_t total_ctl_io, void **npool)
3848 {
3849         struct ctl_io_pool *pool;
3850
3851         pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL,
3852                                             M_NOWAIT | M_ZERO);
3853         if (pool == NULL)
3854                 return (ENOMEM);
3855
3856         snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name);
3857         pool->ctl_softc = ctl_softc;
3858 #ifdef IO_POOLS
3859         pool->zone = uma_zsecond_create(pool->name, NULL,
3860             NULL, NULL, NULL, ctl_softc->io_zone);
3861         /* uma_prealloc(pool->zone, total_ctl_io); */
3862 #else
3863         pool->zone = ctl_softc->io_zone;
3864 #endif
3865
3866         *npool = pool;
3867         return (0);
3868 }
3869
3870 void
3871 ctl_pool_free(struct ctl_io_pool *pool)
3872 {
3873
3874         if (pool == NULL)
3875                 return;
3876
3877 #ifdef IO_POOLS
3878         uma_zdestroy(pool->zone);
3879 #endif
3880         free(pool, M_CTL);
3881 }
3882
3883 union ctl_io *
3884 ctl_alloc_io(void *pool_ref)
3885 {
3886         struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
3887         union ctl_io *io;
3888
3889         io = uma_zalloc(pool->zone, M_WAITOK);
3890         if (io != NULL) {
3891                 io->io_hdr.pool = pool_ref;
3892                 CTL_SOFTC(io) = pool->ctl_softc;
3893                 TAILQ_INIT(&io->io_hdr.blocked_queue);
3894         }
3895         return (io);
3896 }
3897
3898 union ctl_io *
3899 ctl_alloc_io_nowait(void *pool_ref)
3900 {
3901         struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
3902         union ctl_io *io;
3903
3904         io = uma_zalloc(pool->zone, M_NOWAIT);
3905         if (io != NULL) {
3906                 io->io_hdr.pool = pool_ref;
3907                 CTL_SOFTC(io) = pool->ctl_softc;
3908                 TAILQ_INIT(&io->io_hdr.blocked_queue);
3909         }
3910         return (io);
3911 }
3912
3913 void
3914 ctl_free_io(union ctl_io *io)
3915 {
3916         struct ctl_io_pool *pool;
3917
3918         if (io == NULL)
3919                 return;
3920
3921         pool = (struct ctl_io_pool *)io->io_hdr.pool;
3922         uma_zfree(pool->zone, io);
3923 }
3924
3925 void
3926 ctl_zero_io(union ctl_io *io)
3927 {
3928         struct ctl_io_pool *pool;
3929
3930         if (io == NULL)
3931                 return;
3932
3933         /*
3934          * May need to preserve linked list pointers at some point too.
3935          */
3936         pool = io->io_hdr.pool;
3937         memset(io, 0, sizeof(*io));
3938         io->io_hdr.pool = pool;
3939         CTL_SOFTC(io) = pool->ctl_softc;
3940         TAILQ_INIT(&io->io_hdr.blocked_queue);
3941 }
3942
3943 int
3944 ctl_expand_number(const char *buf, uint64_t *num)
3945 {
3946         char *endptr;
3947         uint64_t number;
3948         unsigned shift;
3949
3950         number = strtoq(buf, &endptr, 0);
3951
3952         switch (tolower((unsigned char)*endptr)) {
3953         case 'e':
3954                 shift = 60;
3955                 break;
3956         case 'p':
3957                 shift = 50;
3958                 break;
3959         case 't':
3960                 shift = 40;
3961                 break;
3962         case 'g':
3963                 shift = 30;
3964                 break;
3965         case 'm':
3966                 shift = 20;
3967                 break;
3968         case 'k':
3969                 shift = 10;
3970                 break;
3971         case 'b':
3972         case '\0': /* No unit. */
3973                 *num = number;
3974                 return (0);
3975         default:
3976                 /* Unrecognized unit. */
3977                 return (-1);
3978         }
3979
3980         if ((number << shift) >> shift != number) {
3981                 /* Overflow */
3982                 return (-1);
3983         }
3984         *num = number << shift;
3985         return (0);
3986 }
3987
3988
3989 /*
3990  * This routine could be used in the future to load default and/or saved
3991  * mode page parameters for a particuar lun.
3992  */
3993 static int
3994 ctl_init_page_index(struct ctl_lun *lun)
3995 {
3996         int i, page_code;
3997         struct ctl_page_index *page_index;
3998         const char *value;
3999         uint64_t ival;
4000
4001         memcpy(&lun->mode_pages.index, page_index_template,
4002                sizeof(page_index_template));
4003
4004         for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
4005
4006                 page_index = &lun->mode_pages.index[i];
4007                 if (lun->be_lun->lun_type == T_DIRECT &&
4008                     (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
4009                         continue;
4010                 if (lun->be_lun->lun_type == T_PROCESSOR &&
4011                     (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
4012                         continue;
4013                 if (lun->be_lun->lun_type == T_CDROM &&
4014                     (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
4015                         continue;
4016
4017                 page_code = page_index->page_code & SMPH_PC_MASK;
4018                 switch (page_code) {
4019                 case SMS_RW_ERROR_RECOVERY_PAGE: {
4020                         KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
4021                             ("subpage %#x for page %#x is incorrect!",
4022                             page_index->subpage, page_code));
4023                         memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT],
4024                                &rw_er_page_default,
4025                                sizeof(rw_er_page_default));
4026                         memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE],
4027                                &rw_er_page_changeable,
4028                                sizeof(rw_er_page_changeable));
4029                         memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT],
4030                                &rw_er_page_default,
4031                                sizeof(rw_er_page_default));
4032                         memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED],
4033                                &rw_er_page_default,
4034                                sizeof(rw_er_page_default));
4035                         page_index->page_data =
4036                                 (uint8_t *)lun->mode_pages.rw_er_page;
4037                         break;
4038                 }
4039                 case SMS_FORMAT_DEVICE_PAGE: {
4040                         struct scsi_format_page *format_page;
4041
4042                         KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
4043                             ("subpage %#x for page %#x is incorrect!",
4044                             page_index->subpage, page_code));
4045
4046                         /*
4047                          * Sectors per track are set above.  Bytes per
4048                          * sector need to be set here on a per-LUN basis.
4049                          */
4050                         memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT],
4051                                &format_page_default,
4052                                sizeof(format_page_default));
4053                         memcpy(&lun->mode_pages.format_page[
4054                                CTL_PAGE_CHANGEABLE], &format_page_changeable,
4055                                sizeof(format_page_changeable));
4056                         memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT],
4057                                &format_page_default,
4058                                sizeof(format_page_default));
4059                         memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED],
4060                                &format_page_default,
4061                                sizeof(format_page_default));
4062
4063                         format_page = &lun->mode_pages.format_page[
4064                                 CTL_PAGE_CURRENT];
4065                         scsi_ulto2b(lun->be_lun->blocksize,
4066                                     format_page->bytes_per_sector);
4067
4068                         format_page = &lun->mode_pages.format_page[
4069                                 CTL_PAGE_DEFAULT];
4070                         scsi_ulto2b(lun->be_lun->blocksize,
4071                                     format_page->bytes_per_sector);
4072
4073                         format_page = &lun->mode_pages.format_page[
4074                                 CTL_PAGE_SAVED];
4075                         scsi_ulto2b(lun->be_lun->blocksize,
4076                                     format_page->bytes_per_sector);
4077
4078                         page_index->page_data =
4079                                 (uint8_t *)lun->mode_pages.format_page;
4080                         break;
4081                 }
4082                 case SMS_RIGID_DISK_PAGE: {
4083                         struct scsi_rigid_disk_page *rigid_disk_page;
4084                         uint32_t sectors_per_cylinder;
4085                         uint64_t cylinders;
4086 #ifndef __XSCALE__
4087                         int shift;
4088 #endif /* !__XSCALE__ */
4089
4090                         KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
4091                             ("subpage %#x for page %#x is incorrect!",
4092                             page_index->subpage, page_code));
4093
4094                         /*
4095                          * Rotation rate and sectors per track are set
4096                          * above.  We calculate the cylinders here based on
4097                          * capacity.  Due to the number of heads and
4098                          * sectors per track we're using, smaller arrays
4099                          * may turn out to have 0 cylinders.  Linux and
4100                          * FreeBSD don't pay attention to these mode pages
4101                          * to figure out capacity, but Solaris does.  It
4102                          * seems to deal with 0 cylinders just fine, and
4103                          * works out a fake geometry based on the capacity.
4104                          */
4105                         memcpy(&lun->mode_pages.rigid_disk_page[
4106                                CTL_PAGE_DEFAULT], &rigid_disk_page_default,
4107                                sizeof(rigid_disk_page_default));
4108                         memcpy(&lun->mode_pages.rigid_disk_page[
4109                                CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable,
4110                                sizeof(rigid_disk_page_changeable));
4111
4112                         sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK *
4113                                 CTL_DEFAULT_HEADS;
4114
4115                         /*
4116                          * The divide method here will be more accurate,
4117                          * probably, but results in floating point being
4118                          * used in the kernel on i386 (__udivdi3()).  On the
4119                          * XScale, though, __udivdi3() is implemented in
4120                          * software.
4121                          *
4122                          * The shift method for cylinder calculation is
4123                          * accurate if sectors_per_cylinder is a power of
4124                          * 2.  Otherwise it might be slightly off -- you
4125                          * might have a bit of a truncation problem.
4126                          */
4127 #ifdef  __XSCALE__
4128                         cylinders = (lun->be_lun->maxlba + 1) /
4129                                 sectors_per_cylinder;
4130 #else
4131                         for (shift = 31; shift > 0; shift--) {
4132                                 if (sectors_per_cylinder & (1 << shift))
4133                                         break;
4134                         }
4135                         cylinders = (lun->be_lun->maxlba + 1) >> shift;
4136 #endif
4137
4138                         /*
4139                          * We've basically got 3 bytes, or 24 bits for the
4140                          * cylinder size in the mode page.  If we're over,
4141                          * just round down to 2^24.
4142                          */
4143                         if (cylinders > 0xffffff)
4144                                 cylinders = 0xffffff;
4145
4146                         rigid_disk_page = &lun->mode_pages.rigid_disk_page[
4147                                 CTL_PAGE_DEFAULT];
4148                         scsi_ulto3b(cylinders, rigid_disk_page->cylinders);
4149
4150                         if ((value = dnvlist_get_string(lun->be_lun->options,
4151                             "rpm", NULL)) != NULL) {
4152                                 scsi_ulto2b(strtol(value, NULL, 0),
4153                                      rigid_disk_page->rotation_rate);
4154                         }
4155
4156                         memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT],
4157                                &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT],
4158                                sizeof(rigid_disk_page_default));
4159                         memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED],
4160                                &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT],
4161                                sizeof(rigid_disk_page_default));
4162
4163                         page_index->page_data =
4164                                 (uint8_t *)lun->mode_pages.rigid_disk_page;
4165                         break;
4166                 }
4167                 case SMS_VERIFY_ERROR_RECOVERY_PAGE: {
4168                         KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
4169                             ("subpage %#x for page %#x is incorrect!",
4170                             page_index->subpage, page_code));
4171                         memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT],
4172                                &verify_er_page_default,
4173                                sizeof(verify_er_page_default));
4174                         memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE],
4175                                &verify_er_page_changeable,
4176                                sizeof(verify_er_page_changeable));
4177                         memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT],
4178                                &verify_er_page_default,
4179                                sizeof(verify_er_page_default));
4180                         memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED],
4181                                &verify_er_page_default,
4182                                sizeof(verify_er_page_default));
4183                         page_index->page_data =
4184                                 (uint8_t *)lun->mode_pages.verify_er_page;
4185                         break;
4186                 }
4187                 case SMS_CACHING_PAGE: {
4188                         struct scsi_caching_page *caching_page;
4189
4190                         KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
4191                             ("subpage %#x for page %#x is incorrect!",
4192                             page_index->subpage, page_code));
4193                         memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT],
4194                                &caching_page_default,
4195                                sizeof(caching_page_default));
4196                         memcpy(&lun->mode_pages.caching_page[
4197                                CTL_PAGE_CHANGEABLE], &caching_page_changeable,
4198                                sizeof(caching_page_changeable));
4199                         memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED],
4200                                &caching_page_default,
4201                                sizeof(caching_page_default));
4202                         caching_page = &lun->mode_pages.caching_page[
4203                             CTL_PAGE_SAVED];
4204                         value = dnvlist_get_string(lun->be_lun->options,
4205                             "writecache", NULL);
4206                         if (value != NULL && strcmp(value, "off") == 0)
4207                                 caching_page->flags1 &= ~SCP_WCE;
4208                         value = dnvlist_get_string(lun->be_lun->options,
4209                             "readcache", NULL);
4210                         if (value != NULL && strcmp(value, "off") == 0)
4211                                 caching_page->flags1 |= SCP_RCD;
4212                         memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT],
4213                                &lun->mode_pages.caching_page[CTL_PAGE_SAVED],
4214                                sizeof(caching_page_default));
4215                         page_index->page_data =
4216                                 (uint8_t *)lun->mode_pages.caching_page;
4217                         break;
4218                 }
4219                 case SMS_CONTROL_MODE_PAGE: {
4220                         switch (page_index->subpage) {
4221                         case SMS_SUBPAGE_PAGE_0: {
4222                                 struct scsi_control_page *control_page;
4223
4224                                 memcpy(&lun->mode_pages.control_page[
4225                                     CTL_PAGE_DEFAULT],
4226                                        &control_page_default,
4227                                        sizeof(control_page_default));
4228                                 memcpy(&lun->mode_pages.control_page[
4229                                     CTL_PAGE_CHANGEABLE],
4230                                        &control_page_changeable,
4231                                        sizeof(control_page_changeable));
4232                                 memcpy(&lun->mode_pages.control_page[
4233                                     CTL_PAGE_SAVED],
4234                                        &control_page_default,
4235                                        sizeof(control_page_default));
4236                                 control_page = &lun->mode_pages.control_page[
4237                                     CTL_PAGE_SAVED];
4238                                 value = dnvlist_get_string(lun->be_lun->options,
4239                                     "reordering", NULL);
4240                                 if (value != NULL &&
4241                                     strcmp(value, "unrestricted") == 0) {
4242                                         control_page->queue_flags &=
4243                                             ~SCP_QUEUE_ALG_MASK;
4244                                         control_page->queue_flags |=
4245                                             SCP_QUEUE_ALG_UNRESTRICTED;
4246                                 }
4247                                 memcpy(&lun->mode_pages.control_page[
4248                                     CTL_PAGE_CURRENT],
4249                                        &lun->mode_pages.control_page[
4250                                     CTL_PAGE_SAVED],
4251                                        sizeof(control_page_default));
4252                                 page_index->page_data =
4253                                     (uint8_t *)lun->mode_pages.control_page;
4254                                 break;
4255                         }
4256                         case 0x01:
4257                                 memcpy(&lun->mode_pages.control_ext_page[
4258                                     CTL_PAGE_DEFAULT],
4259                                        &control_ext_page_default,
4260                                        sizeof(control_ext_page_default));
4261                                 memcpy(&lun->mode_pages.control_ext_page[
4262                                     CTL_PAGE_CHANGEABLE],
4263                                        &control_ext_page_changeable,
4264                                        sizeof(control_ext_page_changeable));
4265                                 memcpy(&lun->mode_pages.control_ext_page[
4266                                     CTL_PAGE_SAVED],
4267                                        &control_ext_page_default,
4268                                        sizeof(control_ext_page_default));
4269                                 memcpy(&lun->mode_pages.control_ext_page[
4270                                     CTL_PAGE_CURRENT],
4271                                        &lun->mode_pages.control_ext_page[
4272                                     CTL_PAGE_SAVED],
4273                                        sizeof(control_ext_page_default));
4274                                 page_index->page_data =
4275                                     (uint8_t *)lun->mode_pages.control_ext_page;
4276                                 break;
4277                         default:
4278                                 panic("subpage %#x for page %#x is incorrect!",
4279                                       page_index->subpage, page_code);
4280                         }
4281                         break;
4282                 }
4283                 case SMS_INFO_EXCEPTIONS_PAGE: {
4284                         switch (page_index->subpage) {
4285                         case SMS_SUBPAGE_PAGE_0:
4286                                 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT],
4287                                        &ie_page_default,
4288                                        sizeof(ie_page_default));
4289                                 memcpy(&lun->mode_pages.ie_page[
4290                                        CTL_PAGE_CHANGEABLE], &ie_page_changeable,
4291                                        sizeof(ie_page_changeable));
4292                                 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT],
4293                                        &ie_page_default,
4294                                        sizeof(ie_page_default));
4295                                 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED],
4296                                        &ie_page_default,
4297                                        sizeof(ie_page_default));
4298                                 page_index->page_data =
4299                                         (uint8_t *)lun->mode_pages.ie_page;
4300                                 break;
4301                         case 0x02: {
4302                                 struct ctl_logical_block_provisioning_page *page;
4303
4304                                 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT],
4305                                        &lbp_page_default,
4306                                        sizeof(lbp_page_default));
4307                                 memcpy(&lun->mode_pages.lbp_page[
4308                                        CTL_PAGE_CHANGEABLE], &lbp_page_changeable,
4309                                        sizeof(lbp_page_changeable));
4310                                 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED],
4311                                        &lbp_page_default,
4312                                        sizeof(lbp_page_default));
4313                                 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED];
4314                                 value = dnvlist_get_string(lun->be_lun->options,
4315                                     "avail-threshold", NULL);
4316                                 if (value != NULL &&
4317                                     ctl_expand_number(value, &ival) == 0) {
4318                                         page->descr[0].flags |= SLBPPD_ENABLED |
4319                                             SLBPPD_ARMING_DEC;
4320                                         if (lun->be_lun->blocksize)
4321                                                 ival /= lun->be_lun->blocksize;
4322                                         else
4323                                                 ival /= 512;
4324                                         scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
4325                                             page->descr[0].count);
4326                                 }
4327                                 value = dnvlist_get_string(lun->be_lun->options,
4328                                     "used-threshold", NULL);
4329                                 if (value != NULL &&
4330                                     ctl_expand_number(value, &ival) == 0) {
4331                                         page->descr[1].flags |= SLBPPD_ENABLED |
4332                                             SLBPPD_ARMING_INC;
4333                                         if (lun->be_lun->blocksize)
4334                                                 ival /= lun->be_lun->blocksize;
4335                                         else
4336                                                 ival /= 512;
4337                                         scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
4338                                             page->descr[1].count);
4339                                 }
4340                                 value = dnvlist_get_string(lun->be_lun->options,
4341                                     "pool-avail-threshold", NULL);
4342                                 if (value != NULL &&
4343                                     ctl_expand_number(value, &ival) == 0) {
4344                                         page->descr[2].flags |= SLBPPD_ENABLED |
4345                                             SLBPPD_ARMING_DEC;
4346                                         if (lun->be_lun->blocksize)
4347                                                 ival /= lun->be_lun->blocksize;
4348                                         else
4349                                                 ival /= 512;
4350                                         scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
4351                                             page->descr[2].count);
4352                                 }
4353                                 value = dnvlist_get_string(lun->be_lun->options,
4354                                     "pool-used-threshold", NULL);
4355                                 if (value != NULL &&
4356                                     ctl_expand_number(value, &ival) == 0) {
4357                                         page->descr[3].flags |= SLBPPD_ENABLED |
4358                                             SLBPPD_ARMING_INC;
4359                                         if (lun->be_lun->blocksize)
4360                                                 ival /= lun->be_lun->blocksize;
4361                                         else
4362                                                 ival /= 512;
4363                                         scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
4364                                             page->descr[3].count);
4365                                 }
4366                                 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT],
4367                                        &lun->mode_pages.lbp_page[CTL_PAGE_SAVED],
4368                                        sizeof(lbp_page_default));
4369                                 page_index->page_data =
4370                                         (uint8_t *)lun->mode_pages.lbp_page;
4371                                 break;
4372                         }
4373                         default:
4374                                 panic("subpage %#x for page %#x is incorrect!",
4375                                       page_index->subpage, page_code);
4376                         }
4377                         break;
4378                 }
4379                 case SMS_CDDVD_CAPS_PAGE:{
4380                         KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
4381                             ("subpage %#x for page %#x is incorrect!",
4382                             page_index->subpage, page_code));
4383                         memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT],
4384                                &cddvd_page_default,
4385                                sizeof(cddvd_page_default));
4386                         memcpy(&lun->mode_pages.cddvd_page[
4387                                CTL_PAGE_CHANGEABLE], &cddvd_page_changeable,
4388                                sizeof(cddvd_page_changeable));
4389                         memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED],
4390                                &cddvd_page_default,
4391                                sizeof(cddvd_page_default));
4392                         memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT],
4393                                &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED],
4394                                sizeof(cddvd_page_default));
4395                         page_index->page_data =
4396                                 (uint8_t *)lun->mode_pages.cddvd_page;
4397                         break;
4398                 }
4399                 default:
4400                         panic("invalid page code value %#x", page_code);
4401                 }
4402         }
4403
4404         return (CTL_RETVAL_COMPLETE);
4405 }
4406
4407 static int
4408 ctl_init_log_page_index(struct ctl_lun *lun)
4409 {
4410         struct ctl_page_index *page_index;
4411         int i, j, k, prev;
4412
4413         memcpy(&lun->log_pages.index, log_page_index_template,
4414                sizeof(log_page_index_template));
4415
4416         prev = -1;
4417         for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) {
4418
4419                 page_index = &lun->log_pages.index[i];
4420                 if (lun->be_lun->lun_type == T_DIRECT &&
4421                     (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
4422                         continue;
4423                 if (lun->be_lun->lun_type == T_PROCESSOR &&
4424                     (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
4425                         continue;
4426                 if (lun->be_lun->lun_type == T_CDROM &&
4427                     (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
4428                         continue;
4429
4430                 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING &&
4431                     lun->backend->lun_attr == NULL)
4432                         continue;
4433
4434                 if (page_index->page_code != prev) {
4435                         lun->log_pages.pages_page[j] = page_index->page_code;
4436                         prev = page_index->page_code;
4437                         j++;
4438                 }
4439                 lun->log_pages.subpages_page[k*2] = page_index->page_code;
4440                 lun->log_pages.subpages_page[k*2+1] = page_index->subpage;
4441                 k++;
4442         }
4443         lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0];
4444         lun->log_pages.index[0].page_len = j;
4445         lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0];
4446         lun->log_pages.index[1].page_len = k * 2;
4447         lun->log_pages.index[2].page_data = (uint8_t *)&lun->log_pages.temp_page;
4448         lun->log_pages.index[2].page_len = sizeof(lun->log_pages.temp_page);
4449         lun->log_pages.index[3].page_data = &lun->log_pages.lbp_page[0];
4450         lun->log_pages.index[3].page_len = 12*CTL_NUM_LBP_PARAMS;
4451         lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.stat_page;
4452         lun->log_pages.index[4].page_len = sizeof(lun->log_pages.stat_page);
4453         lun->log_pages.index[5].page_data = (uint8_t *)&lun->log_pages.ie_page;
4454         lun->log_pages.index[5].page_len = sizeof(lun->log_pages.ie_page);
4455
4456         return (CTL_RETVAL_COMPLETE);
4457 }
4458
4459 static int
4460 hex2bin(const char *str, uint8_t *buf, int buf_size)
4461 {
4462         int i;
4463         u_char c;
4464
4465         memset(buf, 0, buf_size);
4466         while (isspace(str[0]))
4467                 str++;
4468         if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X'))
4469                 str += 2;
4470         buf_size *= 2;
4471         for (i = 0; str[i] != 0 && i < buf_size; i++) {
4472                 while (str[i] == '-')   /* Skip dashes in UUIDs. */
4473                         str++;
4474                 c = str[i];
4475                 if (isdigit(c))
4476                         c -= '0';
4477                 else if (isalpha(c))
4478                         c -= isupper(c) ? 'A' - 10 : 'a' - 10;
4479                 else
4480                         break;
4481                 if (c >= 16)
4482                         break;
4483                 if ((i & 1) == 0)
4484                         buf[i / 2] |= (c << 4);
4485                 else
4486                         buf[i / 2] |= c;
4487         }
4488         return ((i + 1) / 2);
4489 }
4490
4491 /*
4492  * Add LUN.
4493  *
4494  * Returns 0 for success, non-zero (errno) for failure.
4495  */
4496 int
4497 ctl_add_lun(struct ctl_be_lun *be_lun)
4498 {
4499         struct ctl_softc *ctl_softc = control_softc;
4500         struct ctl_lun *nlun, *lun;
4501         struct scsi_vpd_id_descriptor *desc;
4502         struct scsi_vpd_id_t10 *t10id;
4503         const char *eui, *naa, *scsiname, *uuid, *vendor, *value;
4504         int lun_number;
4505         int devidlen, idlen1, idlen2 = 0, len;
4506
4507         /*
4508          * We support only Direct Access, CD-ROM or Processor LUN types.
4509          */
4510         switch (be_lun->lun_type) {
4511         case T_DIRECT:
4512         case T_PROCESSOR:
4513         case T_CDROM:
4514                 break;
4515         case T_SEQUENTIAL:
4516         case T_CHANGER:
4517         default:
4518                 return (EINVAL);
4519         }
4520         lun = malloc(sizeof(*lun), M_CTL, M_WAITOK | M_ZERO);
4521
4522         lun->pending_sense = malloc(sizeof(struct scsi_sense_data *) *
4523             ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO);
4524         lun->pending_ua = malloc(sizeof(ctl_ua_type *) * ctl_max_ports,
4525             M_DEVBUF, M_WAITOK | M_ZERO);
4526         lun->pr_keys = malloc(sizeof(uint64_t *) * ctl_max_ports,
4527             M_DEVBUF, M_WAITOK | M_ZERO);
4528
4529         /* Generate LUN ID. */
4530         devidlen = max(CTL_DEVID_MIN_LEN,
4531             strnlen(be_lun->device_id, CTL_DEVID_LEN));
4532         idlen1 = sizeof(*t10id) + devidlen;
4533         len = sizeof(struct scsi_vpd_id_descriptor) + idlen1;
4534         scsiname = dnvlist_get_string(be_lun->options, "scsiname", NULL);
4535         if (scsiname != NULL) {
4536                 idlen2 = roundup2(strlen(scsiname) + 1, 4);
4537                 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2;
4538         }
4539         eui = dnvlist_get_string(be_lun->options, "eui", NULL);
4540         if (eui != NULL) {
4541                 len += sizeof(struct scsi_vpd_id_descriptor) + 16;
4542         }
4543         naa = dnvlist_get_string(be_lun->options, "naa", NULL);
4544         if (naa != NULL) {
4545                 len += sizeof(struct scsi_vpd_id_descriptor) + 16;
4546         }
4547         uuid = dnvlist_get_string(be_lun->options, "uuid", NULL);
4548         if (uuid != NULL) {
4549                 len += sizeof(struct scsi_vpd_id_descriptor) + 18;
4550         }
4551         lun->lun_devid = malloc(sizeof(struct ctl_devid) + len,
4552             M_CTL, M_WAITOK | M_ZERO);
4553         desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data;
4554         desc->proto_codeset = SVPD_ID_CODESET_ASCII;
4555         desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10;
4556         desc->length = idlen1;
4557         t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0];
4558         memset(t10id->vendor, ' ', sizeof(t10id->vendor));
4559         if ((vendor = dnvlist_get_string(be_lun->options, "vendor", NULL)) == NULL) {
4560                 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor));
4561         } else {
4562                 strncpy(t10id->vendor, vendor,
4563                     min(sizeof(t10id->vendor), strlen(vendor)));
4564         }
4565         strncpy((char *)t10id->vendor_spec_id,
4566             (char *)be_lun->device_id, devidlen);
4567         if (scsiname != NULL) {
4568                 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
4569                     desc->length);
4570                 desc->proto_codeset = SVPD_ID_CODESET_UTF8;
4571                 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
4572                     SVPD_ID_TYPE_SCSI_NAME;
4573                 desc->length = idlen2;
4574                 strlcpy(desc->identifier, scsiname, idlen2);
4575         }
4576         if (eui != NULL) {
4577                 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
4578                     desc->length);
4579                 desc->proto_codeset = SVPD_ID_CODESET_BINARY;
4580                 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
4581                     SVPD_ID_TYPE_EUI64;
4582                 desc->length = hex2bin(eui, desc->identifier, 16);
4583                 desc->length = desc->length > 12 ? 16 :
4584                     (desc->length > 8 ? 12 : 8);
4585                 len -= 16 - desc->length;
4586         }
4587         if (naa != NULL) {
4588                 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
4589                     desc->length);
4590                 desc->proto_codeset = SVPD_ID_CODESET_BINARY;
4591                 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
4592                     SVPD_ID_TYPE_NAA;
4593                 desc->length = hex2bin(naa, desc->identifier, 16);
4594                 desc->length = desc->length > 8 ? 16 : 8;
4595                 len -= 16 - desc->length;
4596         }
4597         if (uuid != NULL) {
4598                 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
4599                     desc->length);
4600                 desc->proto_codeset = SVPD_ID_CODESET_BINARY;
4601                 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
4602                     SVPD_ID_TYPE_UUID;
4603                 desc->identifier[0] = 0x10;
4604                 hex2bin(uuid, &desc->identifier[2], 16);
4605                 desc->length = 18;
4606         }
4607         lun->lun_devid->len = len;
4608
4609         mtx_lock(&ctl_softc->ctl_lock);
4610         /*
4611          * See if the caller requested a particular LUN number.  If so, see
4612          * if it is available.  Otherwise, allocate the first available LUN.
4613          */
4614         if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) {
4615                 if ((be_lun->req_lun_id > (ctl_max_luns - 1))
4616                  || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) {
4617                         mtx_unlock(&ctl_softc->ctl_lock);
4618                         if (be_lun->req_lun_id > (ctl_max_luns - 1)) {
4619                                 printf("ctl: requested LUN ID %d is higher "
4620                                        "than ctl_max_luns - 1 (%d)\n",
4621                                        be_lun->req_lun_id, ctl_max_luns - 1);
4622                         } else {
4623                                 /*
4624                                  * XXX KDM return an error, or just assign
4625                                  * another LUN ID in this case??
4626                                  */
4627                                 printf("ctl: requested LUN ID %d is already "
4628                                        "in use\n", be_lun->req_lun_id);
4629                         }
4630 fail:
4631                         free(lun->lun_devid, M_CTL);
4632                         free(lun, M_CTL);
4633                         return (ENOSPC);
4634                 }
4635                 lun_number = be_lun->req_lun_id;
4636         } else {
4637                 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, ctl_max_luns);
4638                 if (lun_number == -1) {
4639                         mtx_unlock(&ctl_softc->ctl_lock);
4640                         printf("ctl: can't allocate LUN, out of LUNs\n");
4641                         goto fail;
4642                 }
4643         }
4644         ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number);
4645         mtx_unlock(&ctl_softc->ctl_lock);
4646
4647         mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF);
4648         lun->lun = lun_number;
4649         lun->be_lun = be_lun;
4650         /*
4651          * The processor LUN is always enabled.  Disk LUNs come on line
4652          * disabled, and must be enabled by the backend.
4653          */
4654         lun->flags |= CTL_LUN_DISABLED;
4655         lun->backend = be_lun->be;
4656         be_lun->ctl_lun = lun;
4657         be_lun->lun_id = lun_number;
4658         if (be_lun->flags & CTL_LUN_FLAG_EJECTED)
4659                 lun->flags |= CTL_LUN_EJECTED;
4660         if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA)
4661                 lun->flags |= CTL_LUN_NO_MEDIA;
4662         if (be_lun->flags & CTL_LUN_FLAG_STOPPED)
4663                 lun->flags |= CTL_LUN_STOPPED;
4664
4665         if (be_lun->flags & CTL_LUN_FLAG_PRIMARY)
4666                 lun->flags |= CTL_LUN_PRIMARY_SC;
4667
4668         value = dnvlist_get_string(be_lun->options, "removable", NULL);
4669         if (value != NULL) {
4670                 if (strcmp(value, "on") == 0)
4671                         lun->flags |= CTL_LUN_REMOVABLE;
4672         } else if (be_lun->lun_type == T_CDROM)
4673                 lun->flags |= CTL_LUN_REMOVABLE;
4674
4675         lun->ctl_softc = ctl_softc;
4676 #ifdef CTL_TIME_IO
4677         lun->last_busy = getsbinuptime();
4678 #endif
4679         TAILQ_INIT(&lun->ooa_queue);
4680         STAILQ_INIT(&lun->error_list);
4681         lun->ie_reported = 1;
4682         callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0);
4683         ctl_tpc_lun_init(lun);
4684         if (lun->flags & CTL_LUN_REMOVABLE) {
4685                 lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4,
4686                     M_CTL, M_WAITOK);
4687         }
4688
4689         /*
4690          * Initialize the mode and log page index.
4691          */
4692         ctl_init_page_index(lun);
4693         ctl_init_log_page_index(lun);
4694
4695         /* Setup statistics gathering */
4696         lun->stats.item = lun_number;
4697
4698         /*
4699          * Now, before we insert this lun on the lun list, set the lun
4700          * inventory changed UA for all other luns.
4701          */
4702         mtx_lock(&ctl_softc->ctl_lock);
4703         STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) {
4704                 mtx_lock(&nlun->lun_lock);
4705                 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE);
4706                 mtx_unlock(&nlun->lun_lock);
4707         }
4708         STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links);
4709         ctl_softc->ctl_luns[lun_number] = lun;
4710         ctl_softc->num_luns++;
4711         mtx_unlock(&ctl_softc->ctl_lock);
4712
4713         /*
4714          * We successfully added the LUN, attempt to enable it.
4715          */
4716         if (ctl_enable_lun(lun) != 0) {
4717                 printf("%s: ctl_enable_lun() failed!\n", __func__);
4718                 mtx_lock(&ctl_softc->ctl_lock);
4719                 STAILQ_REMOVE(&ctl_softc->lun_list, lun, ctl_lun, links);
4720                 ctl_clear_mask(ctl_softc->ctl_lun_mask, lun_number);
4721                 ctl_softc->ctl_luns[lun_number] = NULL;
4722                 ctl_softc->num_luns--;
4723                 mtx_unlock(&ctl_softc->ctl_lock);
4724                 free(lun->lun_devid, M_CTL);
4725                 free(lun, M_CTL);
4726                 return (EIO);
4727         }
4728
4729         return (0);
4730 }
4731
4732 /*
4733  * Free LUN that has no active requests.
4734  */
4735 static int
4736 ctl_free_lun(struct ctl_lun *lun)
4737 {
4738         struct ctl_softc *softc = lun->ctl_softc;
4739         struct ctl_lun *nlun;
4740         int i;
4741
4742         KASSERT(TAILQ_EMPTY(&lun->ooa_queue),
4743             ("Freeing a LUN %p with outstanding I/O!\n", lun));
4744
4745         mtx_lock(&softc->ctl_lock);
4746         STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links);
4747         ctl_clear_mask(softc->ctl_lun_mask, lun->lun);
4748         softc->ctl_luns[lun->lun] = NULL;
4749         softc->num_luns--;
4750         STAILQ_FOREACH(nlun, &softc->lun_list, links) {
4751                 mtx_lock(&nlun->lun_lock);
4752                 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE);
4753                 mtx_unlock(&nlun->lun_lock);
4754         }
4755         mtx_unlock(&softc->ctl_lock);
4756
4757         /*
4758          * Tell the backend to free resources, if this LUN has a backend.
4759          */
4760         lun->be_lun->lun_shutdown(lun->be_lun);
4761
4762         lun->ie_reportcnt = UINT32_MAX;
4763         callout_drain(&lun->ie_callout);
4764         ctl_tpc_lun_shutdown(lun);
4765         mtx_destroy(&lun->lun_lock);
4766         free(lun->lun_devid, M_CTL);
4767         for (i = 0; i < ctl_max_ports; i++)
4768                 free(lun->pending_ua[i], M_CTL);
4769         free(lun->pending_ua, M_DEVBUF);
4770         for (i = 0; i < ctl_max_ports; i++)
4771                 free(lun->pr_keys[i], M_CTL);
4772         free(lun->pr_keys, M_DEVBUF);
4773         free(lun->write_buffer, M_CTL);
4774         free(lun->prevent, M_CTL);
4775         free(lun, M_CTL);
4776
4777         return (0);
4778 }
4779
4780 static int
4781 ctl_enable_lun(struct ctl_lun *lun)
4782 {
4783         struct ctl_softc *softc;
4784         struct ctl_port *port, *nport;
4785         int retval;
4786
4787         softc = lun->ctl_softc;
4788
4789         mtx_lock(&softc->ctl_lock);
4790         mtx_lock(&lun->lun_lock);
4791         KASSERT((lun->flags & CTL_LUN_DISABLED) != 0,
4792             ("%s: LUN not disabled", __func__));
4793         lun->flags &= ~CTL_LUN_DISABLED;
4794         mtx_unlock(&lun->lun_lock);
4795
4796         STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) {
4797                 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 ||
4798                     port->lun_map != NULL || port->lun_enable == NULL)
4799                         continue;
4800
4801                 /*
4802                  * Drop the lock while we call the FETD's enable routine.
4803                  * This can lead to a callback into CTL (at least in the
4804                  * case of the internal initiator frontend.
4805                  */
4806                 mtx_unlock(&softc->ctl_lock);
4807                 retval = port->lun_enable(port->targ_lun_arg, lun->lun);
4808                 mtx_lock(&softc->ctl_lock);
4809                 if (retval != 0) {
4810                         printf("%s: FETD %s port %d returned error "
4811                                "%d for lun_enable on lun %jd\n",
4812                                __func__, port->port_name, port->targ_port,
4813                                retval, (intmax_t)lun->lun);
4814                 }
4815         }
4816
4817         mtx_unlock(&softc->ctl_lock);
4818         ctl_isc_announce_lun(lun);
4819
4820         return (0);
4821 }
4822
4823 static int
4824 ctl_disable_lun(struct ctl_lun *lun)
4825 {
4826         struct ctl_softc *softc;
4827         struct ctl_port *port;
4828         int retval;
4829
4830         softc = lun->ctl_softc;
4831
4832         mtx_lock(&softc->ctl_lock);
4833         mtx_lock(&lun->lun_lock);
4834         KASSERT((lun->flags & CTL_LUN_DISABLED) == 0,
4835             ("%s: LUN not enabled", __func__));
4836         lun->flags |= CTL_LUN_DISABLED;
4837         mtx_unlock(&lun->lun_lock);
4838
4839         STAILQ_FOREACH(port, &softc->port_list, links) {
4840                 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 ||
4841                     port->lun_map != NULL || port->lun_disable == NULL)
4842                         continue;
4843
4844                 /*
4845                  * Drop the lock before we call the frontend's disable
4846                  * routine, to avoid lock order reversals.
4847                  *
4848                  * XXX KDM what happens if the frontend list changes while
4849                  * we're traversing it?  It's unlikely, but should be handled.
4850                  */
4851                 mtx_unlock(&softc->ctl_lock);
4852                 retval = port->lun_disable(port->targ_lun_arg, lun->lun);
4853                 mtx_lock(&softc->ctl_lock);
4854                 if (retval != 0) {
4855                         printf("%s: FETD %s port %d returned error "
4856                                "%d for lun_disable on lun %jd\n",
4857                                __func__, port->port_name, port->targ_port,
4858                                retval, (intmax_t)lun->lun);
4859                 }
4860         }
4861
4862         mtx_unlock(&softc->ctl_lock);
4863         ctl_isc_announce_lun(lun);
4864
4865         return (0);
4866 }
4867
4868 int
4869 ctl_start_lun(struct ctl_be_lun *be_lun)
4870 {
4871         struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4872
4873         mtx_lock(&lun->lun_lock);
4874         lun->flags &= ~CTL_LUN_STOPPED;
4875         mtx_unlock(&lun->lun_lock);
4876         return (0);
4877 }
4878
4879 int
4880 ctl_stop_lun(struct ctl_be_lun *be_lun)
4881 {
4882         struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4883
4884         mtx_lock(&lun->lun_lock);
4885         lun->flags |= CTL_LUN_STOPPED;
4886         mtx_unlock(&lun->lun_lock);
4887         return (0);
4888 }
4889
4890 int
4891 ctl_lun_no_media(struct ctl_be_lun *be_lun)
4892 {
4893         struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4894
4895         mtx_lock(&lun->lun_lock);
4896         lun->flags |= CTL_LUN_NO_MEDIA;
4897         mtx_unlock(&lun->lun_lock);
4898         return (0);
4899 }
4900
4901 int
4902 ctl_lun_has_media(struct ctl_be_lun *be_lun)
4903 {
4904         struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4905         union ctl_ha_msg msg;
4906
4907         mtx_lock(&lun->lun_lock);
4908         lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED);
4909         if (lun->flags & CTL_LUN_REMOVABLE)
4910                 ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE);
4911         mtx_unlock(&lun->lun_lock);
4912         if ((lun->flags & CTL_LUN_REMOVABLE) &&
4913             lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
4914                 bzero(&msg.ua, sizeof(msg.ua));
4915                 msg.hdr.msg_type = CTL_MSG_UA;
4916                 msg.hdr.nexus.initid = -1;
4917                 msg.hdr.nexus.targ_port = -1;
4918                 msg.hdr.nexus.targ_lun = lun->lun;
4919                 msg.hdr.nexus.targ_mapped_lun = lun->lun;
4920                 msg.ua.ua_all = 1;
4921                 msg.ua.ua_set = 1;
4922                 msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE;
4923                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua),
4924                     M_WAITOK);
4925         }
4926         return (0);
4927 }
4928
4929 int
4930 ctl_lun_ejected(struct ctl_be_lun *be_lun)
4931 {
4932         struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4933
4934         mtx_lock(&lun->lun_lock);
4935         lun->flags |= CTL_LUN_EJECTED;
4936         mtx_unlock(&lun->lun_lock);
4937         return (0);
4938 }
4939
4940 int
4941 ctl_lun_primary(struct ctl_be_lun *be_lun)
4942 {
4943         struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4944
4945         mtx_lock(&lun->lun_lock);
4946         lun->flags |= CTL_LUN_PRIMARY_SC;
4947         ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
4948         mtx_unlock(&lun->lun_lock);
4949         ctl_isc_announce_lun(lun);
4950         return (0);
4951 }
4952
4953 int
4954 ctl_lun_secondary(struct ctl_be_lun *be_lun)
4955 {
4956         struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4957
4958         mtx_lock(&lun->lun_lock);
4959         lun->flags &= ~CTL_LUN_PRIMARY_SC;
4960         ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
4961         mtx_unlock(&lun->lun_lock);
4962         ctl_isc_announce_lun(lun);
4963         return (0);
4964 }
4965
4966 /*
4967  * Remove LUN.  If there are active requests, wait for completion.
4968  *
4969  * Returns 0 for success, non-zero (errno) for failure.
4970  * Completion is reported to backed via the lun_shutdown() method.
4971  */
4972 int
4973 ctl_remove_lun(struct ctl_be_lun *be_lun)
4974 {
4975         struct ctl_lun *lun;
4976
4977         lun = (struct ctl_lun *)be_lun->ctl_lun;
4978
4979         ctl_disable_lun(lun);
4980
4981         mtx_lock(&lun->lun_lock);
4982         lun->flags |= CTL_LUN_INVALID;
4983
4984         /*
4985          * If there is nothing in the OOA queue, go ahead and free the LUN.
4986          * If we have something in the OOA queue, we'll free it when the
4987          * last I/O completes.
4988          */
4989         if (TAILQ_EMPTY(&lun->ooa_queue)) {
4990                 mtx_unlock(&lun->lun_lock);
4991                 ctl_free_lun(lun);
4992         } else
4993                 mtx_unlock(&lun->lun_lock);
4994
4995         return (0);
4996 }
4997
4998 void
4999 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun)
5000 {
5001         struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
5002         union ctl_ha_msg msg;
5003
5004         mtx_lock(&lun->lun_lock);
5005         ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE);
5006         mtx_unlock(&lun->lun_lock);
5007         if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
5008                 /* Send msg to other side. */
5009                 bzero(&msg.ua, sizeof(msg.ua));
5010                 msg.hdr.msg_type = CTL_MSG_UA;
5011                 msg.hdr.nexus.initid = -1;
5012                 msg.hdr.nexus.targ_port = -1;
5013                 msg.hdr.nexus.targ_lun = lun->lun;
5014                 msg.hdr.nexus.targ_mapped_lun = lun->lun;
5015                 msg.ua.ua_all = 1;
5016                 msg.ua.ua_set = 1;
5017                 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE;
5018                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua),
5019                     M_WAITOK);
5020         }
5021 }
5022
5023 /*
5024  * Backend "memory move is complete" callback for requests that never
5025  * make it down to say RAIDCore's configuration code.
5026  */
5027 int
5028 ctl_config_move_done(union ctl_io *io)
5029 {
5030         int retval;
5031
5032         CTL_DEBUG_PRINT(("ctl_config_move_done\n"));
5033         KASSERT(io->io_hdr.io_type == CTL_IO_SCSI,
5034             ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type));
5035
5036         if ((io->io_hdr.port_status != 0) &&
5037             ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
5038              (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
5039                 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1,
5040                     /*retry_count*/ io->io_hdr.port_status);
5041         } else if (io->scsiio.kern_data_resid != 0 &&
5042             (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT &&
5043             ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
5044              (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
5045                 ctl_set_invalid_field_ciu(&io->scsiio);
5046         }
5047
5048         if (ctl_debug & CTL_DEBUG_CDB_DATA)
5049                 ctl_data_print(io);
5050         if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) ||
5051             ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
5052              (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) ||
5053             ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) {
5054                 /*
5055                  * XXX KDM just assuming a single pointer here, and not a
5056                  * S/G list.  If we start using S/G lists for config data,
5057                  * we'll need to know how to clean them up here as well.
5058                  */
5059                 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
5060                         free(io->scsiio.kern_data_ptr, M_CTL);
5061                 ctl_done(io);
5062                 retval = CTL_RETVAL_COMPLETE;
5063         } else {
5064                 /*
5065                  * XXX KDM now we need to continue data movement.  Some
5066                  * options:
5067                  * - call ctl_scsiio() again?  We don't do this for data
5068                  *   writes, because for those at least we know ahead of
5069                  *   time where the write will go and how long it is.  For
5070                  *   config writes, though, that information is largely
5071                  *   contained within the write itself, thus we need to
5072                  *   parse out the data again.
5073                  *
5074                  * - Call some other function once the data is in?
5075                  */
5076
5077                 /*
5078                  * XXX KDM call ctl_scsiio() again for now, and check flag
5079                  * bits to see whether we're allocated or not.
5080                  */
5081                 retval = ctl_scsiio(&io->scsiio);
5082         }
5083         return (retval);
5084 }
5085
5086 /*
5087  * This gets called by a backend driver when it is done with a
5088  * data_submit method.
5089  */
5090 void
5091 ctl_data_submit_done(union ctl_io *io)
5092 {
5093         /*
5094          * If the IO_CONT flag is set, we need to call the supplied
5095          * function to continue processing the I/O, instead of completing
5096          * the I/O just yet.
5097          *
5098          * If there is an error, though, we don't want to keep processing.
5099          * Instead, just send status back to the initiator.
5100          */
5101         if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) &&
5102             (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 &&
5103             ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
5104              (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
5105                 io->scsiio.io_cont(io);
5106                 return;
5107         }
5108         ctl_done(io);
5109 }
5110
5111 /*
5112  * This gets called by a backend driver when it is done with a
5113  * configuration write.
5114  */
5115 void
5116 ctl_config_write_done(union ctl_io *io)
5117 {
5118         uint8_t *buf;
5119
5120         /*
5121          * If the IO_CONT flag is set, we need to call the supplied
5122          * function to continue processing the I/O, instead of completing
5123          * the I/O just yet.
5124          *
5125          * If there is an error, though, we don't want to keep processing.
5126          * Instead, just send status back to the initiator.
5127          */
5128         if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) &&
5129             (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 &&
5130             ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
5131              (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
5132                 io->scsiio.io_cont(io);
5133                 return;
5134         }
5135         /*
5136          * Since a configuration write can be done for commands that actually
5137          * have data allocated, like write buffer, and commands that have
5138          * no data, like start/stop unit, we need to check here.
5139          */
5140         if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
5141                 buf = io->scsiio.kern_data_ptr;
5142         else
5143                 buf = NULL;
5144         ctl_done(io);
5145         if (buf)
5146                 free(buf, M_CTL);
5147 }
5148
5149 void
5150 ctl_config_read_done(union ctl_io *io)
5151 {
5152         uint8_t *buf;
5153
5154         /*
5155          * If there is some error -- we are done, skip data transfer.
5156          */
5157         if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 ||
5158             ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
5159              (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) {
5160                 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
5161                         buf = io->scsiio.kern_data_ptr;
5162                 else
5163                         buf = NULL;
5164                 ctl_done(io);
5165                 if (buf)
5166                         free(buf, M_CTL);
5167                 return;
5168         }
5169
5170         /*
5171          * If the IO_CONT flag is set, we need to call the supplied
5172          * function to continue processing the I/O, instead of completing
5173          * the I/O just yet.
5174          */
5175         if (io->io_hdr.flags & CTL_FLAG_IO_CONT) {
5176                 io->scsiio.io_cont(io);
5177                 return;
5178         }
5179
5180         ctl_datamove(io);
5181 }
5182
5183 /*
5184  * SCSI release command.
5185  */
5186 int
5187 ctl_scsi_release(struct ctl_scsiio *ctsio)
5188 {
5189         struct ctl_lun *lun = CTL_LUN(ctsio);
5190         uint32_t residx;
5191
5192         CTL_DEBUG_PRINT(("ctl_scsi_release\n"));
5193
5194         residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5195
5196         /*
5197          * XXX KDM right now, we only support LUN reservation.  We don't
5198          * support 3rd party reservations, or extent reservations, which
5199          * might actually need the parameter list.  If we've gotten this
5200          * far, we've got a LUN reservation.  Anything else got kicked out
5201          * above.  So, according to SPC, ignore the length.
5202          */
5203
5204         mtx_lock(&lun->lun_lock);
5205
5206         /*
5207          * According to SPC, it is not an error for an intiator to attempt
5208          * to release a reservation on a LUN that isn't reserved, or that
5209          * is reserved by another initiator.  The reservation can only be
5210          * released, though, by the initiator who made it or by one of
5211          * several reset type events.
5212          */
5213         if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx))
5214                         lun->flags &= ~CTL_LUN_RESERVED;
5215
5216         mtx_unlock(&lun->lun_lock);
5217
5218         ctl_set_success(ctsio);
5219         ctl_done((union ctl_io *)ctsio);
5220         return (CTL_RETVAL_COMPLETE);
5221 }
5222
5223 int
5224 ctl_scsi_reserve(struct ctl_scsiio *ctsio)
5225 {
5226         struct ctl_lun *lun = CTL_LUN(ctsio);
5227         uint32_t residx;
5228
5229         CTL_DEBUG_PRINT(("ctl_reserve\n"));
5230
5231         residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5232
5233         /*
5234          * XXX KDM right now, we only support LUN reservation.  We don't
5235          * support 3rd party reservations, or extent reservations, which
5236          * might actually need the parameter list.  If we've gotten this
5237          * far, we've got a LUN reservation.  Anything else got kicked out
5238          * above.  So, according to SPC, ignore the length.
5239          */
5240
5241         mtx_lock(&lun->lun_lock);
5242         if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) {
5243                 ctl_set_reservation_conflict(ctsio);
5244                 goto bailout;
5245         }
5246
5247         /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */
5248         if (lun->flags & CTL_LUN_PR_RESERVED) {
5249                 ctl_set_success(ctsio);
5250                 goto bailout;
5251         }
5252
5253         lun->flags |= CTL_LUN_RESERVED;
5254         lun->res_idx = residx;
5255         ctl_set_success(ctsio);
5256
5257 bailout:
5258         mtx_unlock(&lun->lun_lock);
5259         ctl_done((union ctl_io *)ctsio);
5260         return (CTL_RETVAL_COMPLETE);
5261 }
5262
5263 int
5264 ctl_start_stop(struct ctl_scsiio *ctsio)
5265 {
5266         struct ctl_lun *lun = CTL_LUN(ctsio);
5267         struct scsi_start_stop_unit *cdb;
5268         int retval;
5269
5270         CTL_DEBUG_PRINT(("ctl_start_stop\n"));
5271
5272         cdb = (struct scsi_start_stop_unit *)ctsio->cdb;
5273
5274         if ((cdb->how & SSS_PC_MASK) == 0) {
5275                 if ((lun->flags & CTL_LUN_PR_RESERVED) &&
5276                     (cdb->how & SSS_START) == 0) {
5277                         uint32_t residx;
5278
5279                         residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5280                         if (ctl_get_prkey(lun, residx) == 0 ||
5281                             (lun->pr_res_idx != residx && lun->pr_res_type < 4)) {
5282
5283                                 ctl_set_reservation_conflict(ctsio);
5284                                 ctl_done((union ctl_io *)ctsio);
5285                                 return (CTL_RETVAL_COMPLETE);
5286                         }
5287                 }
5288
5289                 if ((cdb->how & SSS_LOEJ) &&
5290                     (lun->flags & CTL_LUN_REMOVABLE) == 0) {
5291                         ctl_set_invalid_field(ctsio,
5292                                               /*sks_valid*/ 1,
5293                                               /*command*/ 1,
5294                                               /*field*/ 4,
5295                                               /*bit_valid*/ 1,
5296                                               /*bit*/ 1);
5297                         ctl_done((union ctl_io *)ctsio);
5298                         return (CTL_RETVAL_COMPLETE);
5299                 }
5300
5301                 if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) &&
5302                     lun->prevent_count > 0) {
5303                         /* "Medium removal prevented" */
5304                         ctl_set_sense(ctsio, /*current_error*/ 1,
5305                             /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ?
5306                              SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST,
5307                             /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE);
5308                         ctl_done((union ctl_io *)ctsio);
5309                         return (CTL_RETVAL_COMPLETE);
5310                 }
5311         }
5312
5313         retval = lun->backend->config_write((union ctl_io *)ctsio);
5314         return (retval);
5315 }
5316
5317 int
5318 ctl_prevent_allow(struct ctl_scsiio *ctsio)
5319 {
5320         struct ctl_lun *lun = CTL_LUN(ctsio);
5321         struct scsi_prevent *cdb;
5322         int retval;
5323         uint32_t initidx;
5324
5325         CTL_DEBUG_PRINT(("ctl_prevent_allow\n"));
5326
5327         cdb = (struct scsi_prevent *)ctsio->cdb;
5328
5329         if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) {
5330                 ctl_set_invalid_opcode(ctsio);
5331                 ctl_done((union ctl_io *)ctsio);
5332                 return (CTL_RETVAL_COMPLETE);
5333         }
5334
5335         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5336         mtx_lock(&lun->lun_lock);
5337         if ((cdb->how & PR_PREVENT) &&
5338             ctl_is_set(lun->prevent, initidx) == 0) {
5339                 ctl_set_mask(lun->prevent, initidx);
5340                 lun->prevent_count++;
5341         } else if ((cdb->how & PR_PREVENT) == 0 &&
5342             ctl_is_set(lun->prevent, initidx)) {
5343                 ctl_clear_mask(lun->prevent, initidx);
5344                 lun->prevent_count--;
5345         }
5346         mtx_unlock(&lun->lun_lock);
5347         retval = lun->backend->config_write((union ctl_io *)ctsio);
5348         return (retval);
5349 }
5350
5351 /*
5352  * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but
5353  * we don't really do anything with the LBA and length fields if the user
5354  * passes them in.  Instead we'll just flush out the cache for the entire
5355  * LUN.
5356  */
5357 int
5358 ctl_sync_cache(struct ctl_scsiio *ctsio)
5359 {
5360         struct ctl_lun *lun = CTL_LUN(ctsio);
5361         struct ctl_lba_len_flags *lbalen;
5362         uint64_t starting_lba;
5363         uint32_t block_count;
5364         int retval;
5365         uint8_t byte2;
5366
5367         CTL_DEBUG_PRINT(("ctl_sync_cache\n"));
5368
5369         retval = 0;
5370
5371         switch (ctsio->cdb[0]) {
5372         case SYNCHRONIZE_CACHE: {
5373                 struct scsi_sync_cache *cdb;
5374                 cdb = (struct scsi_sync_cache *)ctsio->cdb;
5375
5376                 starting_lba = scsi_4btoul(cdb->begin_lba);
5377                 block_count = scsi_2btoul(cdb->lb_count);
5378                 byte2 = cdb->byte2;
5379                 break;
5380         }
5381         case SYNCHRONIZE_CACHE_16: {
5382                 struct scsi_sync_cache_16 *cdb;
5383                 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb;
5384
5385                 starting_lba = scsi_8btou64(cdb->begin_lba);
5386                 block_count = scsi_4btoul(cdb->lb_count);
5387                 byte2 = cdb->byte2;
5388                 break;
5389         }
5390         default:
5391                 ctl_set_invalid_opcode(ctsio);
5392                 ctl_done((union ctl_io *)ctsio);
5393                 goto bailout;
5394                 break; /* NOTREACHED */
5395         }
5396
5397         /*
5398          * We check the LBA and length, but don't do anything with them.
5399          * A SYNCHRONIZE CACHE will cause the entire cache for this lun to
5400          * get flushed.  This check will just help satisfy anyone who wants
5401          * to see an error for an out of range LBA.
5402          */
5403         if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) {
5404                 ctl_set_lba_out_of_range(ctsio,
5405                     MAX(starting_lba, lun->be_lun->maxlba + 1));
5406                 ctl_done((union ctl_io *)ctsio);
5407                 goto bailout;
5408         }
5409
5410         lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
5411         lbalen->lba = starting_lba;
5412         lbalen->len = block_count;
5413         lbalen->flags = byte2;
5414         retval = lun->backend->config_write((union ctl_io *)ctsio);
5415
5416 bailout:
5417         return (retval);
5418 }
5419
5420 int
5421 ctl_format(struct ctl_scsiio *ctsio)
5422 {
5423         struct scsi_format *cdb;
5424         int length, defect_list_len;
5425
5426         CTL_DEBUG_PRINT(("ctl_format\n"));
5427
5428         cdb = (struct scsi_format *)ctsio->cdb;
5429
5430         length = 0;
5431         if (cdb->byte2 & SF_FMTDATA) {
5432                 if (cdb->byte2 & SF_LONGLIST)
5433                         length = sizeof(struct scsi_format_header_long);
5434                 else
5435                         length = sizeof(struct scsi_format_header_short);
5436         }
5437
5438         if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
5439          && (length > 0)) {
5440                 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
5441                 ctsio->kern_data_len = length;
5442                 ctsio->kern_total_len = length;
5443                 ctsio->kern_rel_offset = 0;
5444                 ctsio->kern_sg_entries = 0;
5445                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5446                 ctsio->be_move_done = ctl_config_move_done;
5447                 ctl_datamove((union ctl_io *)ctsio);
5448
5449                 return (CTL_RETVAL_COMPLETE);
5450         }
5451
5452         defect_list_len = 0;
5453
5454         if (cdb->byte2 & SF_FMTDATA) {
5455                 if (cdb->byte2 & SF_LONGLIST) {
5456                         struct scsi_format_header_long *header;
5457
5458                         header = (struct scsi_format_header_long *)
5459                                 ctsio->kern_data_ptr;
5460
5461                         defect_list_len = scsi_4btoul(header->defect_list_len);
5462                         if (defect_list_len != 0) {
5463                                 ctl_set_invalid_field(ctsio,
5464                                                       /*sks_valid*/ 1,
5465                                                       /*command*/ 0,
5466                                                       /*field*/ 2,
5467                                                       /*bit_valid*/ 0,
5468                                                       /*bit*/ 0);
5469                                 goto bailout;
5470                         }
5471                 } else {
5472                         struct scsi_format_header_short *header;
5473
5474                         header = (struct scsi_format_header_short *)
5475                                 ctsio->kern_data_ptr;
5476
5477                         defect_list_len = scsi_2btoul(header->defect_list_len);
5478                         if (defect_list_len != 0) {
5479                                 ctl_set_invalid_field(ctsio,
5480                                                       /*sks_valid*/ 1,
5481                                                       /*command*/ 0,
5482                                                       /*field*/ 2,
5483                                                       /*bit_valid*/ 0,
5484                                                       /*bit*/ 0);
5485                                 goto bailout;
5486                         }
5487                 }
5488         }
5489
5490         ctl_set_success(ctsio);
5491 bailout:
5492
5493         if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
5494                 free(ctsio->kern_data_ptr, M_CTL);
5495                 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
5496         }
5497
5498         ctl_done((union ctl_io *)ctsio);
5499         return (CTL_RETVAL_COMPLETE);
5500 }
5501
5502 int
5503 ctl_read_buffer(struct ctl_scsiio *ctsio)
5504 {
5505         struct ctl_lun *lun = CTL_LUN(ctsio);
5506         uint64_t buffer_offset;
5507         uint32_t len;
5508         uint8_t byte2;
5509         static uint8_t descr[4];
5510         static uint8_t echo_descr[4] = { 0 };
5511
5512         CTL_DEBUG_PRINT(("ctl_read_buffer\n"));
5513
5514         switch (ctsio->cdb[0]) {
5515         case READ_BUFFER: {
5516                 struct scsi_read_buffer *cdb;
5517
5518                 cdb = (struct scsi_read_buffer *)ctsio->cdb;
5519                 buffer_offset = scsi_3btoul(cdb->offset);
5520                 len = scsi_3btoul(cdb->length);
5521                 byte2 = cdb->byte2;
5522                 break;
5523         }
5524         case READ_BUFFER_16: {
5525                 struct scsi_read_buffer_16 *cdb;
5526
5527                 cdb = (struct scsi_read_buffer_16 *)ctsio->cdb;
5528                 buffer_offset = scsi_8btou64(cdb->offset);
5529                 len = scsi_4btoul(cdb->length);
5530                 byte2 = cdb->byte2;
5531                 break;
5532         }
5533         default: /* This shouldn't happen. */
5534                 ctl_set_invalid_opcode(ctsio);
5535                 ctl_done((union ctl_io *)ctsio);
5536                 return (CTL_RETVAL_COMPLETE);
5537         }
5538
5539         if (buffer_offset > CTL_WRITE_BUFFER_SIZE ||
5540             buffer_offset + len > CTL_WRITE_BUFFER_SIZE) {
5541                 ctl_set_invalid_field(ctsio,
5542                                       /*sks_valid*/ 1,
5543                                       /*command*/ 1,
5544                                       /*field*/ 6,
5545                                       /*bit_valid*/ 0,
5546                                       /*bit*/ 0);
5547                 ctl_done((union ctl_io *)ctsio);
5548                 return (CTL_RETVAL_COMPLETE);
5549         }
5550
5551         if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) {
5552                 descr[0] = 0;
5553                 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]);
5554                 ctsio->kern_data_ptr = descr;
5555                 len = min(len, sizeof(descr));
5556         } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) {
5557                 ctsio->kern_data_ptr = echo_descr;
5558                 len = min(len, sizeof(echo_descr));
5559         } else {
5560                 if (lun->write_buffer == NULL) {
5561                         lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE,
5562                             M_CTL, M_WAITOK);
5563                 }
5564                 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
5565         }
5566         ctsio->kern_data_len = len;
5567         ctsio->kern_total_len = len;
5568         ctsio->kern_rel_offset = 0;
5569         ctsio->kern_sg_entries = 0;
5570         ctl_set_success(ctsio);
5571         ctsio->be_move_done = ctl_config_move_done;
5572         ctl_datamove((union ctl_io *)ctsio);
5573         return (CTL_RETVAL_COMPLETE);
5574 }
5575
5576 int
5577 ctl_write_buffer(struct ctl_scsiio *ctsio)
5578 {
5579         struct ctl_lun *lun = CTL_LUN(ctsio);
5580         struct scsi_write_buffer *cdb;
5581         int buffer_offset, len;
5582
5583         CTL_DEBUG_PRINT(("ctl_write_buffer\n"));
5584
5585         cdb = (struct scsi_write_buffer *)ctsio->cdb;
5586
5587         len = scsi_3btoul(cdb->length);
5588         buffer_offset = scsi_3btoul(cdb->offset);
5589
5590         if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) {
5591                 ctl_set_invalid_field(ctsio,
5592                                       /*sks_valid*/ 1,
5593                                       /*command*/ 1,
5594                                       /*field*/ 6,
5595                                       /*bit_valid*/ 0,
5596                                       /*bit*/ 0);
5597                 ctl_done((union ctl_io *)ctsio);
5598                 return (CTL_RETVAL_COMPLETE);
5599         }
5600
5601         /*
5602          * If we've got a kernel request that hasn't been malloced yet,
5603          * malloc it and tell the caller the data buffer is here.
5604          */
5605         if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
5606                 if (lun->write_buffer == NULL) {
5607                         lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE,
5608                             M_CTL, M_WAITOK);
5609                 }
5610                 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
5611                 ctsio->kern_data_len = len;
5612                 ctsio->kern_total_len = len;
5613                 ctsio->kern_rel_offset = 0;
5614                 ctsio->kern_sg_entries = 0;
5615                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5616                 ctsio->be_move_done = ctl_config_move_done;
5617                 ctl_datamove((union ctl_io *)ctsio);
5618
5619                 return (CTL_RETVAL_COMPLETE);
5620         }
5621
5622         ctl_set_success(ctsio);
5623         ctl_done((union ctl_io *)ctsio);
5624         return (CTL_RETVAL_COMPLETE);
5625 }
5626
5627 static int
5628 ctl_write_same_cont(union ctl_io *io)
5629 {
5630         struct ctl_lun *lun = CTL_LUN(io);
5631         struct ctl_scsiio *ctsio;
5632         struct ctl_lba_len_flags *lbalen;
5633         int retval;
5634
5635         ctsio = &io->scsiio;
5636         ctsio->io_hdr.status = CTL_STATUS_NONE;
5637         lbalen = (struct ctl_lba_len_flags *)
5638             &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
5639         lbalen->lba += lbalen->len;
5640         if ((lun->be_lun->maxlba + 1) - lbalen->lba <= UINT32_MAX) {
5641                 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT;
5642                 lbalen->len = (lun->be_lun->maxlba + 1) - lbalen->lba;
5643         }
5644
5645         CTL_DEBUG_PRINT(("ctl_write_same_cont: calling config_write()\n"));
5646         retval = lun->backend->config_write((union ctl_io *)ctsio);
5647         return (retval);
5648 }
5649
5650 int
5651 ctl_write_same(struct ctl_scsiio *ctsio)
5652 {
5653         struct ctl_lun *lun = CTL_LUN(ctsio);
5654         struct ctl_lba_len_flags *lbalen;
5655         const char *val;
5656         uint64_t lba, ival;
5657         uint32_t num_blocks;
5658         int len, retval;
5659         uint8_t byte2;
5660
5661         CTL_DEBUG_PRINT(("ctl_write_same\n"));
5662
5663         switch (ctsio->cdb[0]) {
5664         case WRITE_SAME_10: {
5665                 struct scsi_write_same_10 *cdb;
5666
5667                 cdb = (struct scsi_write_same_10 *)ctsio->cdb;
5668
5669                 lba = scsi_4btoul(cdb->addr);
5670                 num_blocks = scsi_2btoul(cdb->length);
5671                 byte2 = cdb->byte2;
5672                 break;
5673         }
5674         case WRITE_SAME_16: {
5675                 struct scsi_write_same_16 *cdb;
5676
5677                 cdb = (struct scsi_write_same_16 *)ctsio->cdb;
5678
5679                 lba = scsi_8btou64(cdb->addr);
5680                 num_blocks = scsi_4btoul(cdb->length);
5681                 byte2 = cdb->byte2;
5682                 break;
5683         }
5684         default:
5685                 /*
5686                  * We got a command we don't support.  This shouldn't
5687                  * happen, commands should be filtered out above us.
5688                  */
5689                 ctl_set_invalid_opcode(ctsio);
5690                 ctl_done((union ctl_io *)ctsio);
5691
5692                 return (CTL_RETVAL_COMPLETE);
5693                 break; /* NOTREACHED */
5694         }
5695
5696         /* ANCHOR flag can be used only together with UNMAP */
5697         if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) {
5698                 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
5699                     /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0);
5700                 ctl_done((union ctl_io *)ctsio);
5701                 return (CTL_RETVAL_COMPLETE);
5702         }
5703
5704         /*
5705          * The first check is to make sure we're in bounds, the second
5706          * check is to catch wrap-around problems.  If the lba + num blocks
5707          * is less than the lba, then we've wrapped around and the block
5708          * range is invalid anyway.
5709          */
5710         if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
5711          || ((lba + num_blocks) < lba)) {
5712                 ctl_set_lba_out_of_range(ctsio,
5713                     MAX(lba, lun->be_lun->maxlba + 1));
5714                 ctl_done((union ctl_io *)ctsio);
5715                 return (CTL_RETVAL_COMPLETE);
5716         }
5717
5718         /* Zero number of blocks means "to the last logical block" */
5719         if (num_blocks == 0) {
5720                 ival = UINT64_MAX;
5721                 val = dnvlist_get_string(lun->be_lun->options,
5722                     "write_same_max_lba", NULL);
5723                 if (val != NULL)
5724                         ctl_expand_number(val, &ival);
5725                 if ((lun->be_lun->maxlba + 1) - lba > ival) {
5726                         ctl_set_invalid_field(ctsio,
5727                             /*sks_valid*/ 1, /*command*/ 1,
5728                             /*field*/ ctsio->cdb[0] == WRITE_SAME_10 ? 7 : 10,
5729                             /*bit_valid*/ 0, /*bit*/ 0);
5730                         ctl_done((union ctl_io *)ctsio);
5731                         return (CTL_RETVAL_COMPLETE);
5732                 }
5733                 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) {
5734                         ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT;
5735                         ctsio->io_cont = ctl_write_same_cont;
5736                         num_blocks = 1 << 31;
5737                 } else
5738                         num_blocks = (lun->be_lun->maxlba + 1) - lba;
5739         }
5740
5741         len = lun->be_lun->blocksize;
5742
5743         /*
5744          * If we've got a kernel request that hasn't been malloced yet,
5745          * malloc it and tell the caller the data buffer is here.
5746          */
5747         if ((byte2 & SWS_NDOB) == 0 &&
5748             (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
5749                 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
5750                 ctsio->kern_data_len = len;
5751                 ctsio->kern_total_len = len;
5752                 ctsio->kern_rel_offset = 0;
5753                 ctsio->kern_sg_entries = 0;
5754                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5755                 ctsio->be_move_done = ctl_config_move_done;
5756                 ctl_datamove((union ctl_io *)ctsio);
5757
5758                 return (CTL_RETVAL_COMPLETE);
5759         }
5760
5761         lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
5762         lbalen->lba = lba;
5763         lbalen->len = num_blocks;
5764         lbalen->flags = byte2;
5765         retval = lun->backend->config_write((union ctl_io *)ctsio);
5766
5767         return (retval);
5768 }
5769
5770 int
5771 ctl_unmap(struct ctl_scsiio *ctsio)
5772 {
5773         struct ctl_lun *lun = CTL_LUN(ctsio);
5774         struct scsi_unmap *cdb;
5775         struct ctl_ptr_len_flags *ptrlen;
5776         struct scsi_unmap_header *hdr;
5777         struct scsi_unmap_desc *buf, *end, *endnz, *range;
5778         uint64_t lba;
5779         uint32_t num_blocks;
5780         int len, retval;
5781         uint8_t byte2;
5782
5783         CTL_DEBUG_PRINT(("ctl_unmap\n"));
5784
5785         cdb = (struct scsi_unmap *)ctsio->cdb;
5786         len = scsi_2btoul(cdb->length);
5787         byte2 = cdb->byte2;
5788
5789         /*
5790          * If we've got a kernel request that hasn't been malloced yet,
5791          * malloc it and tell the caller the data buffer is here.
5792          */
5793         if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
5794                 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
5795                 ctsio->kern_data_len = len;
5796                 ctsio->kern_total_len = len;
5797                 ctsio->kern_rel_offset = 0;
5798                 ctsio->kern_sg_entries = 0;
5799                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5800                 ctsio->be_move_done = ctl_config_move_done;
5801                 ctl_datamove((union ctl_io *)ctsio);
5802
5803                 return (CTL_RETVAL_COMPLETE);
5804         }
5805
5806         len = ctsio->kern_total_len - ctsio->kern_data_resid;
5807         hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr;
5808         if (len < sizeof (*hdr) ||
5809             len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) ||
5810             len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) ||
5811             scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) {
5812                 ctl_set_invalid_field(ctsio,
5813                                       /*sks_valid*/ 0,
5814                                       /*command*/ 0,
5815                                       /*field*/ 0,
5816                                       /*bit_valid*/ 0,
5817                                       /*bit*/ 0);
5818                 goto done;
5819         }
5820         len = scsi_2btoul(hdr->desc_length);
5821         buf = (struct scsi_unmap_desc *)(hdr + 1);
5822         end = buf + len / sizeof(*buf);
5823
5824         endnz = buf;
5825         for (range = buf; range < end; range++) {
5826                 lba = scsi_8btou64(range->lba);
5827                 num_blocks = scsi_4btoul(range->length);
5828                 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
5829                  || ((lba + num_blocks) < lba)) {
5830                         ctl_set_lba_out_of_range(ctsio,
5831                             MAX(lba, lun->be_lun->maxlba + 1));
5832                         ctl_done((union ctl_io *)ctsio);
5833                         return (CTL_RETVAL_COMPLETE);
5834                 }
5835                 if (num_blocks != 0)
5836                         endnz = range + 1;
5837         }
5838
5839         /*
5840          * Block backend can not handle zero last range.
5841          * Filter it out and return if there is nothing left.
5842          */
5843         len = (uint8_t *)endnz - (uint8_t *)buf;
5844         if (len == 0) {
5845                 ctl_set_success(ctsio);
5846                 goto done;
5847         }
5848
5849         mtx_lock(&lun->lun_lock);
5850         ptrlen = (struct ctl_ptr_len_flags *)
5851             &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
5852         ptrlen->ptr = (void *)buf;
5853         ptrlen->len = len;
5854         ptrlen->flags = byte2;
5855         ctl_try_unblock_others(lun, (union ctl_io *)ctsio, FALSE);
5856         mtx_unlock(&lun->lun_lock);
5857
5858         retval = lun->backend->config_write((union ctl_io *)ctsio);
5859         return (retval);
5860
5861 done:
5862         if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
5863                 free(ctsio->kern_data_ptr, M_CTL);
5864                 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
5865         }
5866         ctl_done((union ctl_io *)ctsio);
5867         return (CTL_RETVAL_COMPLETE);
5868 }
5869
5870 int
5871 ctl_default_page_handler(struct ctl_scsiio *ctsio,
5872                          struct ctl_page_index *page_index, uint8_t *page_ptr)
5873 {
5874         struct ctl_lun *lun = CTL_LUN(ctsio);
5875         uint8_t *current_cp;
5876         int set_ua;
5877         uint32_t initidx;
5878
5879         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5880         set_ua = 0;
5881
5882         current_cp = (page_index->page_data + (page_index->page_len *
5883             CTL_PAGE_CURRENT));
5884
5885         mtx_lock(&lun->lun_lock);
5886         if (memcmp(current_cp, page_ptr, page_index->page_len)) {
5887                 memcpy(current_cp, page_ptr, page_index->page_len);
5888                 set_ua = 1;
5889         }
5890         if (set_ua != 0)
5891                 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE);
5892         mtx_unlock(&lun->lun_lock);
5893         if (set_ua) {
5894                 ctl_isc_announce_mode(lun,
5895                     ctl_get_initindex(&ctsio->io_hdr.nexus),
5896                     page_index->page_code, page_index->subpage);
5897         }
5898         return (CTL_RETVAL_COMPLETE);
5899 }
5900
5901 static void
5902 ctl_ie_timer(void *arg)
5903 {
5904         struct ctl_lun *lun = arg;
5905         uint64_t t;
5906
5907         if (lun->ie_asc == 0)
5908                 return;
5909
5910         if (lun->MODE_IE.mrie == SIEP_MRIE_UA)
5911                 ctl_est_ua_all(lun, -1, CTL_UA_IE);
5912         else
5913                 lun->ie_reported = 0;
5914
5915         if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) {
5916                 lun->ie_reportcnt++;
5917                 t = scsi_4btoul(lun->MODE_IE.interval_timer);
5918                 if (t == 0 || t == UINT32_MAX)
5919                         t = 3000;  /* 5 min */
5920                 callout_schedule(&lun->ie_callout, t * hz / 10);
5921         }
5922 }
5923
5924 int
5925 ctl_ie_page_handler(struct ctl_scsiio *ctsio,
5926                          struct ctl_page_index *page_index, uint8_t *page_ptr)
5927 {
5928         struct ctl_lun *lun = CTL_LUN(ctsio);
5929         struct scsi_info_exceptions_page *pg;
5930         uint64_t t;
5931
5932         (void)ctl_default_page_handler(ctsio, page_index, page_ptr);
5933
5934         pg = (struct scsi_info_exceptions_page *)page_ptr;
5935         mtx_lock(&lun->lun_lock);
5936         if (pg->info_flags & SIEP_FLAGS_TEST) {
5937                 lun->ie_asc = 0x5d;
5938                 lun->ie_ascq = 0xff;
5939                 if (pg->mrie == SIEP_MRIE_UA) {
5940                         ctl_est_ua_all(lun, -1, CTL_UA_IE);
5941                         lun->ie_reported = 1;
5942                 } else {
5943                         ctl_clr_ua_all(lun, -1, CTL_UA_IE);
5944                         lun->ie_reported = -1;
5945                 }
5946                 lun->ie_reportcnt = 1;
5947                 if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) {
5948                         lun->ie_reportcnt++;
5949                         t = scsi_4btoul(pg->interval_timer);
5950                         if (t == 0 || t == UINT32_MAX)
5951                                 t = 3000;  /* 5 min */
5952                         callout_reset(&lun->ie_callout, t * hz / 10,
5953                             ctl_ie_timer, lun);
5954                 }
5955         } else {
5956                 lun->ie_asc = 0;
5957                 lun->ie_ascq = 0;
5958                 lun->ie_reported = 1;
5959                 ctl_clr_ua_all(lun, -1, CTL_UA_IE);
5960                 lun->ie_reportcnt = UINT32_MAX;
5961                 callout_stop(&lun->ie_callout);
5962         }
5963         mtx_unlock(&lun->lun_lock);
5964         return (CTL_RETVAL_COMPLETE);
5965 }
5966
5967 static int
5968 ctl_do_mode_select(union ctl_io *io)
5969 {
5970         struct ctl_lun *lun = CTL_LUN(io);
5971         struct scsi_mode_page_header *page_header;
5972         struct ctl_page_index *page_index;
5973         struct ctl_scsiio *ctsio;
5974         int page_len, page_len_offset, page_len_size;
5975         union ctl_modepage_info *modepage_info;
5976         uint16_t *len_left, *len_used;
5977         int retval, i;
5978
5979         ctsio = &io->scsiio;
5980         page_index = NULL;
5981         page_len = 0;
5982
5983         modepage_info = (union ctl_modepage_info *)
5984                 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
5985         len_left = &modepage_info->header.len_left;
5986         len_used = &modepage_info->header.len_used;
5987
5988 do_next_page:
5989
5990         page_header = (struct scsi_mode_page_header *)
5991                 (ctsio->kern_data_ptr + *len_used);
5992
5993         if (*len_left == 0) {
5994                 free(ctsio->kern_data_ptr, M_CTL);
5995                 ctl_set_success(ctsio);
5996                 ctl_done((union ctl_io *)ctsio);
5997                 return (CTL_RETVAL_COMPLETE);
5998         } else if (*len_left < sizeof(struct scsi_mode_page_header)) {
5999
6000                 free(ctsio->kern_data_ptr, M_CTL);
6001                 ctl_set_param_len_error(ctsio);
6002                 ctl_done((union ctl_io *)ctsio);
6003                 return (CTL_RETVAL_COMPLETE);
6004
6005         } else if ((page_header->page_code & SMPH_SPF)
6006                 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) {
6007
6008                 free(ctsio->kern_data_ptr, M_CTL);
6009                 ctl_set_param_len_error(ctsio);
6010                 ctl_done((union ctl_io *)ctsio);
6011                 return (CTL_RETVAL_COMPLETE);
6012         }
6013
6014
6015         /*
6016          * XXX KDM should we do something with the block descriptor?
6017          */
6018         for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6019                 page_index = &lun->mode_pages.index[i];
6020                 if (lun->be_lun->lun_type == T_DIRECT &&
6021                     (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
6022                         continue;
6023                 if (lun->be_lun->lun_type == T_PROCESSOR &&
6024                     (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
6025                         continue;
6026                 if (lun->be_lun->lun_type == T_CDROM &&
6027                     (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
6028                         continue;
6029
6030                 if ((page_index->page_code & SMPH_PC_MASK) !=
6031                     (page_header->page_code & SMPH_PC_MASK))
6032                         continue;
6033
6034                 /*
6035                  * If neither page has a subpage code, then we've got a
6036                  * match.
6037                  */
6038                 if (((page_index->page_code & SMPH_SPF) == 0)
6039                  && ((page_header->page_code & SMPH_SPF) == 0)) {
6040                         page_len = page_header->page_length;
6041                         break;
6042                 }
6043
6044                 /*
6045                  * If both pages have subpages, then the subpage numbers
6046                  * have to match.
6047                  */
6048                 if ((page_index->page_code & SMPH_SPF)
6049                   && (page_header->page_code & SMPH_SPF)) {
6050                         struct scsi_mode_page_header_sp *sph;
6051
6052                         sph = (struct scsi_mode_page_header_sp *)page_header;
6053                         if (page_index->subpage == sph->subpage) {
6054                                 page_len = scsi_2btoul(sph->page_length);
6055                                 break;
6056                         }
6057                 }
6058         }
6059
6060         /*
6061          * If we couldn't find the page, or if we don't have a mode select
6062          * handler for it, send back an error to the user.
6063          */
6064         if ((i >= CTL_NUM_MODE_PAGES)
6065          || (page_index->select_handler == NULL)) {
6066                 ctl_set_invalid_field(ctsio,
6067                                       /*sks_valid*/ 1,
6068                                       /*command*/ 0,
6069                                       /*field*/ *len_used,
6070                                       /*bit_valid*/ 0,
6071                                       /*bit*/ 0);
6072                 free(ctsio->kern_data_ptr, M_CTL);
6073                 ctl_done((union ctl_io *)ctsio);
6074                 return (CTL_RETVAL_COMPLETE);
6075         }
6076
6077         if (page_index->page_code & SMPH_SPF) {
6078                 page_len_offset = 2;
6079                 page_len_size = 2;
6080         } else {
6081                 page_len_size = 1;
6082                 page_len_offset = 1;
6083         }
6084
6085         /*
6086          * If the length the initiator gives us isn't the one we specify in
6087          * the mode page header, or if they didn't specify enough data in
6088          * the CDB to avoid truncating this page, kick out the request.
6089          */
6090         if (page_len != page_index->page_len - page_len_offset - page_len_size) {
6091                 ctl_set_invalid_field(ctsio,
6092                                       /*sks_valid*/ 1,
6093                                       /*command*/ 0,
6094                                       /*field*/ *len_used + page_len_offset,
6095                                       /*bit_valid*/ 0,
6096                                       /*bit*/ 0);
6097                 free(ctsio->kern_data_ptr, M_CTL);
6098                 ctl_done((union ctl_io *)ctsio);
6099                 return (CTL_RETVAL_COMPLETE);
6100         }
6101         if (*len_left < page_index->page_len) {
6102                 free(ctsio->kern_data_ptr, M_CTL);
6103                 ctl_set_param_len_error(ctsio);
6104                 ctl_done((union ctl_io *)ctsio);
6105                 return (CTL_RETVAL_COMPLETE);
6106         }
6107
6108         /*
6109          * Run through the mode page, checking to make sure that the bits
6110          * the user changed are actually legal for him to change.
6111          */
6112         for (i = 0; i < page_index->page_len; i++) {
6113                 uint8_t *user_byte, *change_mask, *current_byte;
6114                 int bad_bit;
6115                 int j;
6116
6117                 user_byte = (uint8_t *)page_header + i;
6118                 change_mask = page_index->page_data +
6119                               (page_index->page_len * CTL_PAGE_CHANGEABLE) + i;
6120                 current_byte = page_index->page_data +
6121                                (page_index->page_len * CTL_PAGE_CURRENT) + i;
6122
6123                 /*
6124                  * Check to see whether the user set any bits in this byte
6125                  * that he is not allowed to set.
6126                  */
6127                 if ((*user_byte & ~(*change_mask)) ==
6128                     (*current_byte & ~(*change_mask)))
6129                         continue;
6130
6131                 /*
6132                  * Go through bit by bit to determine which one is illegal.
6133                  */
6134                 bad_bit = 0;
6135                 for (j = 7; j >= 0; j--) {
6136                         if ((((1 << i) & ~(*change_mask)) & *user_byte) !=
6137                             (((1 << i) & ~(*change_mask)) & *current_byte)) {
6138                                 bad_bit = i;
6139                                 break;
6140                         }
6141                 }
6142                 ctl_set_invalid_field(ctsio,
6143                                       /*sks_valid*/ 1,
6144                                       /*command*/ 0,
6145                                       /*field*/ *len_used + i,
6146                                       /*bit_valid*/ 1,
6147                                       /*bit*/ bad_bit);
6148                 free(ctsio->kern_data_ptr, M_CTL);
6149                 ctl_done((union ctl_io *)ctsio);
6150                 return (CTL_RETVAL_COMPLETE);
6151         }
6152
6153         /*
6154          * Decrement these before we call the page handler, since we may
6155          * end up getting called back one way or another before the handler
6156          * returns to this context.
6157          */
6158         *len_left -= page_index->page_len;
6159         *len_used += page_index->page_len;
6160
6161         retval = page_index->select_handler(ctsio, page_index,
6162                                             (uint8_t *)page_header);
6163
6164         /*
6165          * If the page handler returns CTL_RETVAL_QUEUED, then we need to
6166          * wait until this queued command completes to finish processing
6167          * the mode page.  If it returns anything other than
6168          * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have
6169          * already set the sense information, freed the data pointer, and
6170          * completed the io for us.
6171          */
6172         if (retval != CTL_RETVAL_COMPLETE)
6173                 goto bailout_no_done;
6174
6175         /*
6176          * If the initiator sent us more than one page, parse the next one.
6177          */
6178         if (*len_left > 0)
6179                 goto do_next_page;
6180
6181         ctl_set_success(ctsio);
6182         free(ctsio->kern_data_ptr, M_CTL);
6183         ctl_done((union ctl_io *)ctsio);
6184
6185 bailout_no_done:
6186
6187         return (CTL_RETVAL_COMPLETE);
6188
6189 }
6190
6191 int
6192 ctl_mode_select(struct ctl_scsiio *ctsio)
6193 {
6194         struct ctl_lun *lun = CTL_LUN(ctsio);
6195         union ctl_modepage_info *modepage_info;
6196         int bd_len, i, header_size, param_len, rtd;
6197         uint32_t initidx;
6198
6199         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
6200         switch (ctsio->cdb[0]) {
6201         case MODE_SELECT_6: {
6202                 struct scsi_mode_select_6 *cdb;
6203
6204                 cdb = (struct scsi_mode_select_6 *)ctsio->cdb;
6205
6206                 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0;
6207                 param_len = cdb->length;
6208                 header_size = sizeof(struct scsi_mode_header_6);
6209                 break;
6210         }
6211         case MODE_SELECT_10: {
6212                 struct scsi_mode_select_10 *cdb;
6213
6214                 cdb = (struct scsi_mode_select_10 *)ctsio->cdb;
6215
6216                 rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0;
6217                 param_len = scsi_2btoul(cdb->length);
6218                 header_size = sizeof(struct scsi_mode_header_10);
6219                 break;
6220         }
6221         default:
6222                 ctl_set_invalid_opcode(ctsio);
6223                 ctl_done((union ctl_io *)ctsio);
6224                 return (CTL_RETVAL_COMPLETE);
6225         }
6226
6227         if (rtd) {
6228                 if (param_len != 0) {
6229                         ctl_set_invalid_field(ctsio, /*sks_valid*/ 0,
6230                             /*command*/ 1, /*field*/ 0,
6231                             /*bit_valid*/ 0, /*bit*/ 0);
6232                         ctl_done((union ctl_io *)ctsio);
6233                         return (CTL_RETVAL_COMPLETE);
6234                 }
6235
6236                 /* Revert to defaults. */
6237                 ctl_init_page_index(lun);
6238                 mtx_lock(&lun->lun_lock);
6239                 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE);
6240                 mtx_unlock(&lun->lun_lock);
6241                 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6242                         ctl_isc_announce_mode(lun, -1,
6243                             lun->mode_pages.index[i].page_code & SMPH_PC_MASK,
6244                             lun->mode_pages.index[i].subpage);
6245                 }
6246                 ctl_set_success(ctsio);
6247                 ctl_done((union ctl_io *)ctsio);
6248                 return (CTL_RETVAL_COMPLETE);
6249         }
6250
6251         /*
6252          * From SPC-3:
6253          * "A parameter list length of zero indicates that the Data-Out Buffer
6254          * shall be empty. This condition shall not be considered as an error."
6255          */
6256         if (param_len == 0) {
6257                 ctl_set_success(ctsio);
6258                 ctl_done((union ctl_io *)ctsio);
6259                 return (CTL_RETVAL_COMPLETE);
6260         }
6261
6262         /*
6263          * Since we'll hit this the first time through, prior to
6264          * allocation, we don't need to free a data buffer here.
6265          */
6266         if (param_len < header_size) {
6267                 ctl_set_param_len_error(ctsio);
6268                 ctl_done((union ctl_io *)ctsio);
6269                 return (CTL_RETVAL_COMPLETE);
6270         }
6271
6272         /*
6273          * Allocate the data buffer and grab the user's data.  In theory,
6274          * we shouldn't have to sanity check the parameter list length here
6275          * because the maximum size is 64K.  We should be able to malloc
6276          * that much without too many problems.
6277          */
6278         if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
6279                 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
6280                 ctsio->kern_data_len = param_len;
6281                 ctsio->kern_total_len = param_len;
6282                 ctsio->kern_rel_offset = 0;
6283                 ctsio->kern_sg_entries = 0;
6284                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6285                 ctsio->be_move_done = ctl_config_move_done;
6286                 ctl_datamove((union ctl_io *)ctsio);
6287
6288                 return (CTL_RETVAL_COMPLETE);
6289         }
6290
6291         switch (ctsio->cdb[0]) {
6292         case MODE_SELECT_6: {
6293                 struct scsi_mode_header_6 *mh6;
6294
6295                 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr;
6296                 bd_len = mh6->blk_desc_len;
6297                 break;
6298         }
6299         case MODE_SELECT_10: {
6300                 struct scsi_mode_header_10 *mh10;
6301
6302                 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr;
6303                 bd_len = scsi_2btoul(mh10->blk_desc_len);
6304                 break;
6305         }
6306         default:
6307                 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]);
6308         }
6309
6310         if (param_len < (header_size + bd_len)) {
6311                 free(ctsio->kern_data_ptr, M_CTL);
6312                 ctl_set_param_len_error(ctsio);
6313                 ctl_done((union ctl_io *)ctsio);
6314                 return (CTL_RETVAL_COMPLETE);
6315         }
6316
6317         /*
6318          * Set the IO_CONT flag, so that if this I/O gets passed to
6319          * ctl_config_write_done(), it'll get passed back to
6320          * ctl_do_mode_select() for further processing, or completion if
6321          * we're all done.
6322          */
6323         ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT;
6324         ctsio->io_cont = ctl_do_mode_select;
6325
6326         modepage_info = (union ctl_modepage_info *)
6327                 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
6328         memset(modepage_info, 0, sizeof(*modepage_info));
6329         modepage_info->header.len_left = param_len - header_size - bd_len;
6330         modepage_info->header.len_used = header_size + bd_len;
6331
6332         return (ctl_do_mode_select((union ctl_io *)ctsio));
6333 }
6334
6335 int
6336 ctl_mode_sense(struct ctl_scsiio *ctsio)
6337 {
6338         struct ctl_lun *lun = CTL_LUN(ctsio);
6339         int pc, page_code, llba, subpage;
6340         int alloc_len, page_len, header_len, bd_len, total_len;
6341         void *block_desc;
6342         struct ctl_page_index *page_index;
6343
6344         llba = 0;
6345
6346         CTL_DEBUG_PRINT(("ctl_mode_sense\n"));
6347
6348         switch (ctsio->cdb[0]) {
6349         case MODE_SENSE_6: {
6350                 struct scsi_mode_sense_6 *cdb;
6351
6352                 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb;
6353
6354                 header_len = sizeof(struct scsi_mode_hdr_6);
6355                 if (cdb->byte2 & SMS_DBD)
6356                         bd_len = 0;
6357                 else
6358                         bd_len = sizeof(struct scsi_mode_block_descr);
6359                 header_len += bd_len;
6360
6361                 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6;
6362                 page_code = cdb->page & SMS_PAGE_CODE;
6363                 subpage = cdb->subpage;
6364                 alloc_len = cdb->length;
6365                 break;
6366         }
6367         case MODE_SENSE_10: {
6368                 struct scsi_mode_sense_10 *cdb;
6369
6370                 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb;
6371
6372                 header_len = sizeof(struct scsi_mode_hdr_10);
6373                 if (cdb->byte2 & SMS_DBD) {
6374                         bd_len = 0;
6375                 } else if (lun->be_lun->lun_type == T_DIRECT) {
6376                         if (cdb->byte2 & SMS10_LLBAA) {
6377                                 llba = 1;
6378                                 bd_len = sizeof(struct scsi_mode_block_descr_dlong);
6379                         } else
6380                                 bd_len = sizeof(struct scsi_mode_block_descr_dshort);
6381                 } else
6382                         bd_len = sizeof(struct scsi_mode_block_descr);
6383                 header_len += bd_len;
6384
6385                 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6;
6386                 page_code = cdb->page & SMS_PAGE_CODE;
6387                 subpage = cdb->subpage;
6388                 alloc_len = scsi_2btoul(cdb->length);
6389                 break;
6390         }
6391         default:
6392                 ctl_set_invalid_opcode(ctsio);
6393                 ctl_done((union ctl_io *)ctsio);
6394                 return (CTL_RETVAL_COMPLETE);
6395                 break; /* NOTREACHED */
6396         }
6397
6398         /*
6399          * We have to make a first pass through to calculate the size of
6400          * the pages that match the user's query.  Then we allocate enough
6401          * memory to hold it, and actually copy the data into the buffer.
6402          */
6403         switch (page_code) {
6404         case SMS_ALL_PAGES_PAGE: {
6405                 u_int i;
6406
6407                 page_len = 0;
6408
6409                 /*
6410                  * At the moment, values other than 0 and 0xff here are
6411                  * reserved according to SPC-3.
6412                  */
6413                 if ((subpage != SMS_SUBPAGE_PAGE_0)
6414                  && (subpage != SMS_SUBPAGE_ALL)) {
6415                         ctl_set_invalid_field(ctsio,
6416                                               /*sks_valid*/ 1,
6417                                               /*command*/ 1,
6418                                               /*field*/ 3,
6419                                               /*bit_valid*/ 0,
6420                                               /*bit*/ 0);
6421                         ctl_done((union ctl_io *)ctsio);
6422                         return (CTL_RETVAL_COMPLETE);
6423                 }
6424
6425                 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6426                         page_index = &lun->mode_pages.index[i];
6427
6428                         /* Make sure the page is supported for this dev type */
6429                         if (lun->be_lun->lun_type == T_DIRECT &&
6430                             (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
6431                                 continue;
6432                         if (lun->be_lun->lun_type == T_PROCESSOR &&
6433                             (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
6434                                 continue;
6435                         if (lun->be_lun->lun_type == T_CDROM &&
6436                             (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
6437                                 continue;
6438
6439                         /*
6440                          * We don't use this subpage if the user didn't
6441                          * request all subpages.
6442                          */
6443                         if ((page_index->subpage != 0)
6444                          && (subpage == SMS_SUBPAGE_PAGE_0))
6445                                 continue;
6446
6447                         page_len += page_index->page_len;
6448                 }
6449                 break;
6450         }
6451         default: {
6452                 u_int i;
6453
6454                 page_len = 0;
6455
6456                 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6457                         page_index = &lun->mode_pages.index[i];
6458
6459                         /* Make sure the page is supported for this dev type */
6460                         if (lun->be_lun->lun_type == T_DIRECT &&
6461                             (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
6462                                 continue;
6463                         if (lun->be_lun->lun_type == T_PROCESSOR &&
6464                             (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
6465                                 continue;
6466                         if (lun->be_lun->lun_type == T_CDROM &&
6467                             (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
6468                                 continue;
6469
6470                         /* Look for the right page code */
6471                         if ((page_index->page_code & SMPH_PC_MASK) != page_code)
6472                                 continue;
6473
6474                         /* Look for the right subpage or the subpage wildcard*/
6475                         if ((page_index->subpage != subpage)
6476                          && (subpage != SMS_SUBPAGE_ALL))
6477                                 continue;
6478
6479                         page_len += page_index->page_len;
6480                 }
6481
6482                 if (page_len == 0) {
6483                         ctl_set_invalid_field(ctsio,
6484                                               /*sks_valid*/ 1,
6485                                               /*command*/ 1,
6486                                               /*field*/ 2,
6487                                               /*bit_valid*/ 1,
6488                                               /*bit*/ 5);
6489                         ctl_done((union ctl_io *)ctsio);
6490                         return (CTL_RETVAL_COMPLETE);
6491                 }
6492                 break;
6493         }
6494         }
6495
6496         total_len = header_len + page_len;
6497
6498         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
6499         ctsio->kern_sg_entries = 0;
6500         ctsio->kern_rel_offset = 0;
6501         ctsio->kern_data_len = min(total_len, alloc_len);
6502         ctsio->kern_total_len = ctsio->kern_data_len;
6503
6504         switch (ctsio->cdb[0]) {
6505         case MODE_SENSE_6: {
6506                 struct scsi_mode_hdr_6 *header;
6507
6508                 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr;
6509
6510                 header->datalen = MIN(total_len - 1, 254);
6511                 if (lun->be_lun->lun_type == T_DIRECT) {
6512                         header->dev_specific = 0x10; /* DPOFUA */
6513                         if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) ||
6514                             (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0)
6515                                 header->dev_specific |= 0x80; /* WP */
6516                 }
6517                 header->block_descr_len = bd_len;
6518                 block_desc = &header[1];
6519                 break;
6520         }
6521         case MODE_SENSE_10: {
6522                 struct scsi_mode_hdr_10 *header;
6523                 int datalen;
6524
6525                 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr;
6526
6527                 datalen = MIN(total_len - 2, 65533);
6528                 scsi_ulto2b(datalen, header->datalen);
6529                 if (lun->be_lun->lun_type == T_DIRECT) {
6530                         header->dev_specific = 0x10; /* DPOFUA */
6531                         if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) ||
6532                             (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0)
6533                                 header->dev_specific |= 0x80; /* WP */
6534                 }
6535                 if (llba)
6536                         header->flags |= SMH_LONGLBA;
6537                 scsi_ulto2b(bd_len, header->block_descr_len);
6538                 block_desc = &header[1];
6539                 break;
6540         }
6541         default:
6542                 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]);
6543         }
6544
6545         /*
6546          * If we've got a disk, use its blocksize in the block
6547          * descriptor.  Otherwise, just set it to 0.
6548          */
6549         if (bd_len > 0) {
6550                 if (lun->be_lun->lun_type == T_DIRECT) {
6551                         if (llba) {
6552                                 struct scsi_mode_block_descr_dlong *bd = block_desc;
6553                                 if (lun->be_lun->maxlba != 0)
6554                                         scsi_u64to8b(lun->be_lun->maxlba + 1,
6555                                             bd->num_blocks);
6556                                 scsi_ulto4b(lun->be_lun->blocksize,
6557                                     bd->block_len);
6558                         } else {
6559                                 struct scsi_mode_block_descr_dshort *bd = block_desc;
6560                                 if (lun->be_lun->maxlba != 0)
6561                                         scsi_ulto4b(MIN(lun->be_lun->maxlba+1,
6562                                             UINT32_MAX), bd->num_blocks);
6563                                 scsi_ulto3b(lun->be_lun->blocksize,
6564                                     bd->block_len);
6565                         }
6566                 } else {
6567                         struct scsi_mode_block_descr *bd = block_desc;
6568                         scsi_ulto3b(0, bd->block_len);
6569                 }
6570         }
6571
6572         switch (page_code) {
6573         case SMS_ALL_PAGES_PAGE: {
6574                 int i, data_used;
6575
6576                 data_used = header_len;
6577                 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6578                         struct ctl_page_index *page_index;
6579
6580                         page_index = &lun->mode_pages.index[i];
6581                         if (lun->be_lun->lun_type == T_DIRECT &&
6582                             (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
6583                                 continue;
6584                         if (lun->be_lun->lun_type == T_PROCESSOR &&
6585                             (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
6586                                 continue;
6587                         if (lun->be_lun->lun_type == T_CDROM &&
6588                             (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
6589                                 continue;
6590
6591                         /*
6592                          * We don't use this subpage if the user didn't
6593                          * request all subpages.  We already checked (above)
6594                          * to make sure the user only specified a subpage
6595                          * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case.
6596                          */
6597                         if ((page_index->subpage != 0)
6598                          && (subpage == SMS_SUBPAGE_PAGE_0))
6599                                 continue;
6600
6601                         /*
6602                          * Call the handler, if it exists, to update the
6603                          * page to the latest values.
6604                          */
6605                         if (page_index->sense_handler != NULL)
6606                                 page_index->sense_handler(ctsio, page_index,pc);
6607
6608                         memcpy(ctsio->kern_data_ptr + data_used,
6609                                page_index->page_data +
6610                                (page_index->page_len * pc),
6611                                page_index->page_len);
6612                         data_used += page_index->page_len;
6613                 }
6614                 break;
6615         }
6616         default: {
6617                 int i, data_used;
6618
6619                 data_used = header_len;
6620
6621                 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6622                         struct ctl_page_index *page_index;
6623
6624                         page_index = &lun->mode_pages.index[i];
6625
6626                         /* Look for the right page code */
6627                         if ((page_index->page_code & SMPH_PC_MASK) != page_code)
6628                                 continue;
6629
6630                         /* Look for the right subpage or the subpage wildcard*/
6631                         if ((page_index->subpage != subpage)
6632                          && (subpage != SMS_SUBPAGE_ALL))
6633                                 continue;
6634
6635                         /* Make sure the page is supported for this dev type */
6636                         if (lun->be_lun->lun_type == T_DIRECT &&
6637                             (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
6638                                 continue;
6639                         if (lun->be_lun->lun_type == T_PROCESSOR &&
6640                             (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
6641                                 continue;
6642                         if (lun->be_lun->lun_type == T_CDROM &&
6643                             (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
6644                                 continue;
6645
6646                         /*
6647                          * Call the handler, if it exists, to update the
6648                          * page to the latest values.
6649                          */
6650                         if (page_index->sense_handler != NULL)
6651                                 page_index->sense_handler(ctsio, page_index,pc);
6652
6653                         memcpy(ctsio->kern_data_ptr + data_used,
6654                                page_index->page_data +
6655                                (page_index->page_len * pc),
6656                                page_index->page_len);
6657                         data_used += page_index->page_len;
6658                 }
6659                 break;
6660         }
6661         }
6662
6663         ctl_set_success(ctsio);
6664         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6665         ctsio->be_move_done = ctl_config_move_done;
6666         ctl_datamove((union ctl_io *)ctsio);
6667         return (CTL_RETVAL_COMPLETE);
6668 }
6669
6670 int
6671 ctl_temp_log_sense_handler(struct ctl_scsiio *ctsio,
6672                                struct ctl_page_index *page_index,
6673                                int pc)
6674 {
6675         struct ctl_lun *lun = CTL_LUN(ctsio);
6676         struct scsi_log_temperature *data;
6677         const char *value;
6678
6679         data = (struct scsi_log_temperature *)page_index->page_data;
6680
6681         scsi_ulto2b(SLP_TEMPERATURE, data->hdr.param_code);
6682         data->hdr.param_control = SLP_LBIN;
6683         data->hdr.param_len = sizeof(struct scsi_log_temperature) -
6684             sizeof(struct scsi_log_param_header);
6685         if ((value = dnvlist_get_string(lun->be_lun->options, "temperature",
6686             NULL)) != NULL)
6687                 data->temperature = strtol(value, NULL, 0);
6688         else
6689                 data->temperature = 0xff;
6690         data++;
6691
6692         scsi_ulto2b(SLP_REFTEMPERATURE, data->hdr.param_code);
6693         data->hdr.param_control = SLP_LBIN;
6694         data->hdr.param_len = sizeof(struct scsi_log_temperature) -
6695             sizeof(struct scsi_log_param_header);
6696         if ((value = dnvlist_get_string(lun->be_lun->options, "reftemperature",
6697             NULL)) != NULL)
6698                 data->temperature = strtol(value, NULL, 0);
6699         else
6700                 data->temperature = 0xff;
6701         return (0);
6702 }
6703
6704 int
6705 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio,
6706                                struct ctl_page_index *page_index,
6707                                int pc)
6708 {
6709         struct ctl_lun *lun = CTL_LUN(ctsio);
6710         struct scsi_log_param_header *phdr;
6711         uint8_t *data;
6712         uint64_t val;
6713
6714         data = page_index->page_data;
6715
6716         if (lun->backend->lun_attr != NULL &&
6717             (val = lun->backend->lun_attr(lun->be_lun, "blocksavail"))
6718              != UINT64_MAX) {
6719                 phdr = (struct scsi_log_param_header *)data;
6720                 scsi_ulto2b(0x0001, phdr->param_code);
6721                 phdr->param_control = SLP_LBIN | SLP_LP;
6722                 phdr->param_len = 8;
6723                 data = (uint8_t *)(phdr + 1);
6724                 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
6725                 data[4] = 0x02; /* per-pool */
6726                 data += phdr->param_len;
6727         }
6728
6729         if (lun->backend->lun_attr != NULL &&
6730             (val = lun->backend->lun_attr(lun->be_lun, "blocksused"))
6731              != UINT64_MAX) {
6732                 phdr = (struct scsi_log_param_header *)data;
6733                 scsi_ulto2b(0x0002, phdr->param_code);
6734                 phdr->param_control = SLP_LBIN | SLP_LP;
6735                 phdr->param_len = 8;
6736                 data = (uint8_t *)(phdr + 1);
6737                 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
6738                 data[4] = 0x01; /* per-LUN */
6739                 data += phdr->param_len;
6740         }
6741
6742         if (lun->backend->lun_attr != NULL &&
6743             (val = lun->backend->lun_attr(lun->be_lun, "poolblocksavail"))
6744              != UINT64_MAX) {
6745                 phdr = (struct scsi_log_param_header *)data;
6746                 scsi_ulto2b(0x00f1, phdr->param_code);
6747                 phdr->param_control = SLP_LBIN | SLP_LP;
6748                 phdr->param_len = 8;
6749                 data = (uint8_t *)(phdr + 1);
6750                 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
6751                 data[4] = 0x02; /* per-pool */
6752                 data += phdr->param_len;
6753         }
6754
6755         if (lun->backend->lun_attr != NULL &&
6756             (val = lun->backend->lun_attr(lun->be_lun, "poolblocksused"))
6757              != UINT64_MAX) {
6758                 phdr = (struct scsi_log_param_header *)data;
6759                 scsi_ulto2b(0x00f2, phdr->param_code);
6760                 phdr->param_control = SLP_LBIN | SLP_LP;
6761                 phdr->param_len = 8;
6762                 data = (uint8_t *)(phdr + 1);
6763                 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
6764                 data[4] = 0x02; /* per-pool */
6765                 data += phdr->param_len;
6766         }
6767
6768         page_index->page_len = data - page_index->page_data;
6769         return (0);
6770 }
6771
6772 int
6773 ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio,
6774                                struct ctl_page_index *page_index,
6775                                int pc)
6776 {
6777         struct ctl_lun *lun = CTL_LUN(ctsio);
6778         struct stat_page *data;
6779         struct bintime *t;
6780
6781         data = (struct stat_page *)page_index->page_data;
6782
6783         scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code);
6784         data->sap.hdr.param_control = SLP_LBIN;
6785         data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) -
6786             sizeof(struct scsi_log_param_header);
6787         scsi_u64to8b(lun->stats.operations[CTL_STATS_READ],
6788             data->sap.read_num);
6789         scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE],
6790             data->sap.write_num);
6791         if (lun->be_lun->blocksize > 0) {
6792                 scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] /
6793                     lun->be_lun->blocksize, data->sap.recvieved_lba);
6794                 scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] /
6795                     lun->be_lun->blocksize, data->sap.transmitted_lba);
6796         }
6797         t = &lun->stats.time[CTL_STATS_READ];
6798         scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000),
6799             data->sap.read_int);
6800         t = &lun->stats.time[CTL_STATS_WRITE];
6801         scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000),
6802             data->sap.write_int);
6803         scsi_u64to8b(0, data->sap.weighted_num);
6804         scsi_u64to8b(0, data->sap.weighted_int);
6805         scsi_ulto2b(SLP_IT, data->it.hdr.param_code);
6806         data->it.hdr.param_control = SLP_LBIN;
6807         data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) -
6808             sizeof(struct scsi_log_param_header);
6809 #ifdef CTL_TIME_IO
6810         scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int);
6811 #endif
6812         scsi_ulto2b(SLP_TI, data->ti.hdr.param_code);
6813         data->it.hdr.param_control = SLP_LBIN;
6814         data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) -
6815             sizeof(struct scsi_log_param_header);
6816         scsi_ulto4b(3, data->ti.exponent);
6817         scsi_ulto4b(1, data->ti.integer);
6818         return (0);
6819 }
6820
6821 int
6822 ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio,
6823                                struct ctl_page_index *page_index,
6824                                int pc)
6825 {
6826         struct ctl_lun *lun = CTL_LUN(ctsio);
6827         struct scsi_log_informational_exceptions *data;
6828         const char *value;
6829
6830         data = (struct scsi_log_informational_exceptions *)page_index->page_data;
6831
6832         scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code);
6833         data->hdr.param_control = SLP_LBIN;
6834         data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) -
6835             sizeof(struct scsi_log_param_header);
6836         data->ie_asc = lun->ie_asc;
6837         data->ie_ascq = lun->ie_ascq;
6838         if ((value = dnvlist_get_string(lun->be_lun->options, "temperature",
6839             NULL)) != NULL)
6840                 data->temperature = strtol(value, NULL, 0);
6841         else
6842                 data->temperature = 0xff;
6843         return (0);
6844 }
6845
6846 int
6847 ctl_log_sense(struct ctl_scsiio *ctsio)
6848 {
6849         struct ctl_lun *lun = CTL_LUN(ctsio);
6850         int i, pc, page_code, subpage;
6851         int alloc_len, total_len;
6852         struct ctl_page_index *page_index;
6853         struct scsi_log_sense *cdb;
6854         struct scsi_log_header *header;
6855
6856         CTL_DEBUG_PRINT(("ctl_log_sense\n"));
6857
6858         cdb = (struct scsi_log_sense *)ctsio->cdb;
6859         pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6;
6860         page_code = cdb->page & SLS_PAGE_CODE;
6861         subpage = cdb->subpage;
6862         alloc_len = scsi_2btoul(cdb->length);
6863
6864         page_index = NULL;
6865         for (i = 0; i < CTL_NUM_LOG_PAGES; i++) {
6866                 page_index = &lun->log_pages.index[i];
6867
6868                 /* Look for the right page code */
6869                 if ((page_index->page_code & SL_PAGE_CODE) != page_code)
6870                         continue;
6871
6872                 /* Look for the right subpage or the subpage wildcard*/
6873                 if (page_index->subpage != subpage)
6874                         continue;
6875
6876                 break;
6877         }
6878         if (i >= CTL_NUM_LOG_PAGES) {
6879                 ctl_set_invalid_field(ctsio,
6880                                       /*sks_valid*/ 1,
6881                                       /*command*/ 1,
6882                                       /*field*/ 2,
6883                                       /*bit_valid*/ 0,
6884                                       /*bit*/ 0);
6885                 ctl_done((union ctl_io *)ctsio);
6886                 return (CTL_RETVAL_COMPLETE);
6887         }
6888
6889         total_len = sizeof(struct scsi_log_header) + page_index->page_len;
6890
6891         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
6892         ctsio->kern_sg_entries = 0;
6893         ctsio->kern_rel_offset = 0;
6894         ctsio->kern_data_len = min(total_len, alloc_len);
6895         ctsio->kern_total_len = ctsio->kern_data_len;
6896
6897         header = (struct scsi_log_header *)ctsio->kern_data_ptr;
6898         header->page = page_index->page_code;
6899         if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING)
6900                 header->page |= SL_DS;
6901         if (page_index->subpage) {
6902                 header->page |= SL_SPF;
6903                 header->subpage = page_index->subpage;
6904         }
6905         scsi_ulto2b(page_index->page_len, header->datalen);
6906
6907         /*
6908          * Call the handler, if it exists, to update the
6909          * page to the latest values.
6910          */
6911         if (page_index->sense_handler != NULL)
6912                 page_index->sense_handler(ctsio, page_index, pc);
6913
6914         memcpy(header + 1, page_index->page_data, page_index->page_len);
6915
6916         ctl_set_success(ctsio);
6917         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6918         ctsio->be_move_done = ctl_config_move_done;
6919         ctl_datamove((union ctl_io *)ctsio);
6920         return (CTL_RETVAL_COMPLETE);
6921 }
6922
6923 int
6924 ctl_read_capacity(struct ctl_scsiio *ctsio)
6925 {
6926         struct ctl_lun *lun = CTL_LUN(ctsio);
6927         struct scsi_read_capacity *cdb;
6928         struct scsi_read_capacity_data *data;
6929         uint32_t lba;
6930
6931         CTL_DEBUG_PRINT(("ctl_read_capacity\n"));
6932
6933         cdb = (struct scsi_read_capacity *)ctsio->cdb;
6934
6935         lba = scsi_4btoul(cdb->addr);
6936         if (((cdb->pmi & SRC_PMI) == 0)
6937          && (lba != 0)) {
6938                 ctl_set_invalid_field(/*ctsio*/ ctsio,
6939                                       /*sks_valid*/ 1,
6940                                       /*command*/ 1,
6941                                       /*field*/ 2,
6942                                       /*bit_valid*/ 0,
6943                                       /*bit*/ 0);
6944                 ctl_done((union ctl_io *)ctsio);
6945                 return (CTL_RETVAL_COMPLETE);
6946         }
6947
6948         ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
6949         data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr;
6950         ctsio->kern_data_len = sizeof(*data);
6951         ctsio->kern_total_len = sizeof(*data);
6952         ctsio->kern_rel_offset = 0;
6953         ctsio->kern_sg_entries = 0;
6954
6955         /*
6956          * If the maximum LBA is greater than 0xfffffffe, the user must
6957          * issue a SERVICE ACTION IN (16) command, with the read capacity
6958          * serivce action set.
6959          */
6960         if (lun->be_lun->maxlba > 0xfffffffe)
6961                 scsi_ulto4b(0xffffffff, data->addr);
6962         else
6963                 scsi_ulto4b(lun->be_lun->maxlba, data->addr);
6964
6965         /*
6966          * XXX KDM this may not be 512 bytes...
6967          */
6968         scsi_ulto4b(lun->be_lun->blocksize, data->length);
6969
6970         ctl_set_success(ctsio);
6971         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6972         ctsio->be_move_done = ctl_config_move_done;
6973         ctl_datamove((union ctl_io *)ctsio);
6974         return (CTL_RETVAL_COMPLETE);
6975 }
6976
6977 int
6978 ctl_read_capacity_16(struct ctl_scsiio *ctsio)
6979 {
6980         struct ctl_lun *lun = CTL_LUN(ctsio);
6981         struct scsi_read_capacity_16 *cdb;
6982         struct scsi_read_capacity_data_long *data;
6983         uint64_t lba;
6984         uint32_t alloc_len;
6985
6986         CTL_DEBUG_PRINT(("ctl_read_capacity_16\n"));
6987
6988         cdb = (struct scsi_read_capacity_16 *)ctsio->cdb;
6989
6990         alloc_len = scsi_4btoul(cdb->alloc_len);
6991         lba = scsi_8btou64(cdb->addr);
6992
6993         if ((cdb->reladr & SRC16_PMI)
6994          && (lba != 0)) {
6995                 ctl_set_invalid_field(/*ctsio*/ ctsio,
6996                                       /*sks_valid*/ 1,
6997                                       /*command*/ 1,
6998                                       /*field*/ 2,
6999                                       /*bit_valid*/ 0,
7000                                       /*bit*/ 0);
7001                 ctl_done((union ctl_io *)ctsio);
7002                 return (CTL_RETVAL_COMPLETE);
7003         }
7004
7005         ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
7006         data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr;
7007         ctsio->kern_rel_offset = 0;
7008         ctsio->kern_sg_entries = 0;
7009         ctsio->kern_data_len = min(sizeof(*data), alloc_len);
7010         ctsio->kern_total_len = ctsio->kern_data_len;
7011
7012         scsi_u64to8b(lun->be_lun->maxlba, data->addr);
7013         /* XXX KDM this may not be 512 bytes... */
7014         scsi_ulto4b(lun->be_lun->blocksize, data->length);
7015         data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE;
7016         scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp);
7017         if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
7018                 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ;
7019
7020         ctl_set_success(ctsio);
7021         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7022         ctsio->be_move_done = ctl_config_move_done;
7023         ctl_datamove((union ctl_io *)ctsio);
7024         return (CTL_RETVAL_COMPLETE);
7025 }
7026
7027 int
7028 ctl_get_lba_status(struct ctl_scsiio *ctsio)
7029 {
7030         struct ctl_lun *lun = CTL_LUN(ctsio);
7031         struct scsi_get_lba_status *cdb;
7032         struct scsi_get_lba_status_data *data;
7033         struct ctl_lba_len_flags *lbalen;
7034         uint64_t lba;
7035         uint32_t alloc_len, total_len;
7036         int retval;
7037
7038         CTL_DEBUG_PRINT(("ctl_get_lba_status\n"));
7039
7040         cdb = (struct scsi_get_lba_status *)ctsio->cdb;
7041         lba = scsi_8btou64(cdb->addr);
7042         alloc_len = scsi_4btoul(cdb->alloc_len);
7043
7044         if (lba > lun->be_lun->maxlba) {
7045                 ctl_set_lba_out_of_range(ctsio, lba);
7046                 ctl_done((union ctl_io *)ctsio);
7047                 return (CTL_RETVAL_COMPLETE);
7048         }
7049
7050         total_len = sizeof(*data) + sizeof(data->descr[0]);
7051         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7052         data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr;
7053         ctsio->kern_rel_offset = 0;
7054         ctsio->kern_sg_entries = 0;
7055         ctsio->kern_data_len = min(total_len, alloc_len);
7056         ctsio->kern_total_len = ctsio->kern_data_len;
7057
7058         /* Fill dummy data in case backend can't tell anything. */
7059         scsi_ulto4b(4 + sizeof(data->descr[0]), data->length);
7060         scsi_u64to8b(lba, data->descr[0].addr);
7061         scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba),
7062             data->descr[0].length);
7063         data->descr[0].status = 0; /* Mapped or unknown. */
7064
7065         ctl_set_success(ctsio);
7066         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7067         ctsio->be_move_done = ctl_config_move_done;
7068
7069         lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
7070         lbalen->lba = lba;
7071         lbalen->len = total_len;
7072         lbalen->flags = 0;
7073         retval = lun->backend->config_read((union ctl_io *)ctsio);
7074         return (retval);
7075 }
7076
7077 int
7078 ctl_read_defect(struct ctl_scsiio *ctsio)
7079 {
7080         struct scsi_read_defect_data_10 *ccb10;
7081         struct scsi_read_defect_data_12 *ccb12;
7082         struct scsi_read_defect_data_hdr_10 *data10;
7083         struct scsi_read_defect_data_hdr_12 *data12;
7084         uint32_t alloc_len, data_len;
7085         uint8_t format;
7086
7087         CTL_DEBUG_PRINT(("ctl_read_defect\n"));
7088
7089         if (ctsio->cdb[0] == READ_DEFECT_DATA_10) {
7090                 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb;
7091                 format = ccb10->format;
7092                 alloc_len = scsi_2btoul(ccb10->alloc_length);
7093                 data_len = sizeof(*data10);
7094         } else {
7095                 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb;
7096                 format = ccb12->format;
7097                 alloc_len = scsi_4btoul(ccb12->alloc_length);
7098                 data_len = sizeof(*data12);
7099         }
7100         if (alloc_len == 0) {
7101                 ctl_set_success(ctsio);
7102                 ctl_done((union ctl_io *)ctsio);
7103                 return (CTL_RETVAL_COMPLETE);
7104         }
7105
7106         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
7107         ctsio->kern_rel_offset = 0;
7108         ctsio->kern_sg_entries = 0;
7109         ctsio->kern_data_len = min(data_len, alloc_len);
7110         ctsio->kern_total_len = ctsio->kern_data_len;
7111
7112         if (ctsio->cdb[0] == READ_DEFECT_DATA_10) {
7113                 data10 = (struct scsi_read_defect_data_hdr_10 *)
7114                     ctsio->kern_data_ptr;
7115                 data10->format = format;
7116                 scsi_ulto2b(0, data10->length);
7117         } else {
7118                 data12 = (struct scsi_read_defect_data_hdr_12 *)
7119                     ctsio->kern_data_ptr;
7120                 data12->format = format;
7121                 scsi_ulto2b(0, data12->generation);
7122                 scsi_ulto4b(0, data12->length);
7123         }
7124
7125         ctl_set_success(ctsio);
7126         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7127         ctsio->be_move_done = ctl_config_move_done;
7128         ctl_datamove((union ctl_io *)ctsio);
7129         return (CTL_RETVAL_COMPLETE);
7130 }
7131
7132 int
7133 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
7134 {
7135         struct ctl_softc *softc = CTL_SOFTC(ctsio);
7136         struct ctl_lun *lun = CTL_LUN(ctsio);
7137         struct scsi_maintenance_in *cdb;
7138         int retval;
7139         int alloc_len, ext, total_len = 0, g, pc, pg, ts, os;
7140         int num_ha_groups, num_target_ports, shared_group;
7141         struct ctl_port *port;
7142         struct scsi_target_group_data *rtg_ptr;
7143         struct scsi_target_group_data_extended *rtg_ext_ptr;
7144         struct scsi_target_port_group_descriptor *tpg_desc;
7145
7146         CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n"));
7147
7148         cdb = (struct scsi_maintenance_in *)ctsio->cdb;
7149         retval = CTL_RETVAL_COMPLETE;
7150
7151         switch (cdb->byte2 & STG_PDF_MASK) {
7152         case STG_PDF_LENGTH:
7153                 ext = 0;
7154                 break;
7155         case STG_PDF_EXTENDED:
7156                 ext = 1;
7157                 break;
7158         default:
7159                 ctl_set_invalid_field(/*ctsio*/ ctsio,
7160                                       /*sks_valid*/ 1,
7161                                       /*command*/ 1,
7162                                       /*field*/ 2,
7163                                       /*bit_valid*/ 1,
7164                                       /*bit*/ 5);
7165                 ctl_done((union ctl_io *)ctsio);
7166                 return(retval);
7167         }
7168
7169         num_target_ports = 0;
7170         shared_group = (softc->is_single != 0);
7171         mtx_lock(&softc->ctl_lock);
7172         STAILQ_FOREACH(port, &softc->port_list, links) {
7173                 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
7174                         continue;
7175                 if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
7176                         continue;
7177                 num_target_ports++;
7178                 if (port->status & CTL_PORT_STATUS_HA_SHARED)
7179                         shared_group = 1;
7180         }
7181         mtx_unlock(&softc->ctl_lock);
7182         num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES;
7183
7184         if (ext)
7185                 total_len = sizeof(struct scsi_target_group_data_extended);
7186         else
7187                 total_len = sizeof(struct scsi_target_group_data);
7188         total_len += sizeof(struct scsi_target_port_group_descriptor) *
7189                 (shared_group + num_ha_groups) +
7190             sizeof(struct scsi_target_port_descriptor) * num_target_ports;
7191
7192         alloc_len = scsi_4btoul(cdb->length);
7193
7194         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7195         ctsio->kern_sg_entries = 0;
7196         ctsio->kern_rel_offset = 0;
7197         ctsio->kern_data_len = min(total_len, alloc_len);
7198         ctsio->kern_total_len = ctsio->kern_data_len;
7199
7200         if (ext) {
7201                 rtg_ext_ptr = (struct scsi_target_group_data_extended *)
7202                     ctsio->kern_data_ptr;
7203                 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length);
7204                 rtg_ext_ptr->format_type = 0x10;
7205                 rtg_ext_ptr->implicit_transition_time = 0;
7206                 tpg_desc = &rtg_ext_ptr->groups[0];
7207         } else {
7208                 rtg_ptr = (struct scsi_target_group_data *)
7209                     ctsio->kern_data_ptr;
7210                 scsi_ulto4b(total_len - 4, rtg_ptr->length);
7211                 tpg_desc = &rtg_ptr->groups[0];
7212         }
7213
7214         mtx_lock(&softc->ctl_lock);
7215         pg = softc->port_min / softc->port_cnt;
7216         if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) {
7217                 /* Some shelf is known to be primary. */
7218                 if (softc->ha_link == CTL_HA_LINK_OFFLINE)
7219                         os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE;
7220                 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN)
7221                         os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING;
7222                 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY)
7223                         os = TPG_ASYMMETRIC_ACCESS_STANDBY;
7224                 else
7225                         os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
7226                 if (lun->flags & CTL_LUN_PRIMARY_SC) {
7227                         ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
7228                 } else {
7229                         ts = os;
7230                         os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
7231                 }
7232         } else {
7233                 /* No known primary shelf. */
7234                 if (softc->ha_link == CTL_HA_LINK_OFFLINE) {
7235                         ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE;
7236                         os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
7237                 } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) {
7238                         ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING;
7239                         os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
7240                 } else {
7241                         ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING;
7242                 }
7243         }
7244         if (shared_group) {
7245                 tpg_desc->pref_state = ts;
7246                 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP |
7247                     TPG_U_SUP | TPG_T_SUP;
7248                 scsi_ulto2b(1, tpg_desc->target_port_group);
7249                 tpg_desc->status = TPG_IMPLICIT;
7250                 pc = 0;
7251                 STAILQ_FOREACH(port, &softc->port_list, links) {
7252                         if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
7253                                 continue;
7254                         if (!softc->is_single &&
7255                             (port->status & CTL_PORT_STATUS_HA_SHARED) == 0)
7256                                 continue;
7257                         if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
7258                                 continue;
7259                         scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc].
7260                             relative_target_port_identifier);
7261                         pc++;
7262                 }
7263                 tpg_desc->target_port_count = pc;
7264                 tpg_desc = (struct scsi_target_port_group_descriptor *)
7265                     &tpg_desc->descriptors[pc];
7266         }
7267         for (g = 0; g < num_ha_groups; g++) {
7268                 tpg_desc->pref_state = (g == pg) ? ts : os;
7269                 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP |
7270                     TPG_U_SUP | TPG_T_SUP;
7271                 scsi_ulto2b(2 + g, tpg_desc->target_port_group);
7272                 tpg_desc->status = TPG_IMPLICIT;
7273                 pc = 0;
7274                 STAILQ_FOREACH(port, &softc->port_list, links) {
7275                         if (port->targ_port < g * softc->port_cnt ||
7276                             port->targ_port >= (g + 1) * softc->port_cnt)
7277                                 continue;
7278                         if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
7279                                 continue;
7280                         if (port->status & CTL_PORT_STATUS_HA_SHARED)
7281                                 continue;
7282                         if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
7283                                 continue;
7284                         scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc].
7285                             relative_target_port_identifier);
7286                         pc++;
7287                 }
7288                 tpg_desc->target_port_count = pc;
7289                 tpg_desc = (struct scsi_target_port_group_descriptor *)
7290                     &tpg_desc->descriptors[pc];
7291         }
7292         mtx_unlock(&softc->ctl_lock);
7293
7294         ctl_set_success(ctsio);
7295         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7296         ctsio->be_move_done = ctl_config_move_done;
7297         ctl_datamove((union ctl_io *)ctsio);
7298         return(retval);
7299 }
7300
7301 int
7302 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio)
7303 {
7304         struct ctl_lun *lun = CTL_LUN(ctsio);
7305         struct scsi_report_supported_opcodes *cdb;
7306         const struct ctl_cmd_entry *entry, *sentry;
7307         struct scsi_report_supported_opcodes_all *all;
7308         struct scsi_report_supported_opcodes_descr *descr;
7309         struct scsi_report_supported_opcodes_one *one;
7310         int retval;
7311         int alloc_len, total_len;
7312         int opcode, service_action, i, j, num;
7313
7314         CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n"));
7315
7316         cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb;
7317         retval = CTL_RETVAL_COMPLETE;
7318
7319         opcode = cdb->requested_opcode;
7320         service_action = scsi_2btoul(cdb->requested_service_action);
7321         switch (cdb->options & RSO_OPTIONS_MASK) {
7322         case RSO_OPTIONS_ALL:
7323                 num = 0;
7324                 for (i = 0; i < 256; i++) {
7325                         entry = &ctl_cmd_table[i];
7326                         if (entry->flags & CTL_CMD_FLAG_SA5) {
7327                                 for (j = 0; j < 32; j++) {
7328                                         sentry = &((const struct ctl_cmd_entry *)
7329                                             entry->execute)[j];
7330                                         if (ctl_cmd_applicable(
7331                                             lun->be_lun->lun_type, sentry))
7332                                                 num++;
7333                                 }
7334                         } else {
7335                                 if (ctl_cmd_applicable(lun->be_lun->lun_type,
7336                                     entry))
7337                                         num++;
7338                         }
7339                 }
7340                 total_len = sizeof(struct scsi_report_supported_opcodes_all) +
7341                     num * sizeof(struct scsi_report_supported_opcodes_descr);
7342                 break;
7343         case RSO_OPTIONS_OC:
7344                 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) {
7345                         ctl_set_invalid_field(/*ctsio*/ ctsio,
7346                                               /*sks_valid*/ 1,
7347                                               /*command*/ 1,
7348                                               /*field*/ 2,
7349                                               /*bit_valid*/ 1,
7350                                               /*bit*/ 2);
7351                         ctl_done((union ctl_io *)ctsio);
7352                         return (CTL_RETVAL_COMPLETE);
7353                 }
7354                 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32;
7355                 break;
7356         case RSO_OPTIONS_OC_SA:
7357                 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 ||
7358                     service_action >= 32) {
7359                         ctl_set_invalid_field(/*ctsio*/ ctsio,
7360                                               /*sks_valid*/ 1,
7361                                               /*command*/ 1,
7362                                               /*field*/ 2,
7363                                               /*bit_valid*/ 1,
7364                                               /*bit*/ 2);
7365                         ctl_done((union ctl_io *)ctsio);
7366                         return (CTL_RETVAL_COMPLETE);
7367                 }
7368                 /* FALLTHROUGH */
7369         case RSO_OPTIONS_OC_ASA:
7370                 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32;
7371                 break;
7372         default:
7373                 ctl_set_invalid_field(/*ctsio*/ ctsio,
7374                                       /*sks_valid*/ 1,
7375                                       /*command*/ 1,
7376                                       /*field*/ 2,
7377                                       /*bit_valid*/ 1,
7378                                       /*bit*/ 2);
7379                 ctl_done((union ctl_io *)ctsio);
7380                 return (CTL_RETVAL_COMPLETE);
7381         }
7382
7383         alloc_len = scsi_4btoul(cdb->length);
7384
7385         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7386         ctsio->kern_sg_entries = 0;
7387         ctsio->kern_rel_offset = 0;
7388         ctsio->kern_data_len = min(total_len, alloc_len);
7389         ctsio->kern_total_len = ctsio->kern_data_len;
7390
7391         switch (cdb->options & RSO_OPTIONS_MASK) {
7392         case RSO_OPTIONS_ALL:
7393                 all = (struct scsi_report_supported_opcodes_all *)
7394                     ctsio->kern_data_ptr;
7395                 num = 0;
7396                 for (i = 0; i < 256; i++) {
7397                         entry = &ctl_cmd_table[i];
7398                         if (entry->flags & CTL_CMD_FLAG_SA5) {
7399                                 for (j = 0; j < 32; j++) {
7400                                         sentry = &((const struct ctl_cmd_entry *)
7401                                             entry->execute)[j];
7402                                         if (!ctl_cmd_applicable(
7403                                             lun->be_lun->lun_type, sentry))
7404                                                 continue;
7405                                         descr = &all->descr[num++];
7406                                         descr->opcode = i;
7407                                         scsi_ulto2b(j, descr->service_action);
7408                                         descr->flags = RSO_SERVACTV;
7409                                         scsi_ulto2b(sentry->length,
7410                                             descr->cdb_length);
7411                                 }
7412                         } else {
7413                                 if (!ctl_cmd_applicable(lun->be_lun->lun_type,
7414                                     entry))
7415                                         continue;
7416                                 descr = &all->descr[num++];
7417                                 descr->opcode = i;
7418                                 scsi_ulto2b(0, descr->service_action);
7419                                 descr->flags = 0;
7420                                 scsi_ulto2b(entry->length, descr->cdb_length);
7421                         }
7422                 }
7423                 scsi_ulto4b(
7424                     num * sizeof(struct scsi_report_supported_opcodes_descr),
7425                     all->length);
7426                 break;
7427         case RSO_OPTIONS_OC:
7428                 one = (struct scsi_report_supported_opcodes_one *)
7429                     ctsio->kern_data_ptr;
7430                 entry = &ctl_cmd_table[opcode];
7431                 goto fill_one;
7432         case RSO_OPTIONS_OC_SA:
7433                 one = (struct scsi_report_supported_opcodes_one *)
7434                     ctsio->kern_data_ptr;
7435                 entry = &ctl_cmd_table[opcode];
7436                 entry = &((const struct ctl_cmd_entry *)
7437                     entry->execute)[service_action];
7438 fill_one:
7439                 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) {
7440                         one->support = 3;
7441                         scsi_ulto2b(entry->length, one->cdb_length);
7442                         one->cdb_usage[0] = opcode;
7443                         memcpy(&one->cdb_usage[1], entry->usage,
7444                             entry->length - 1);
7445                 } else
7446                         one->support = 1;
7447                 break;
7448         case RSO_OPTIONS_OC_ASA:
7449                 one = (struct scsi_report_supported_opcodes_one *)
7450                     ctsio->kern_data_ptr;
7451                 entry = &ctl_cmd_table[opcode];
7452                 if (entry->flags & CTL_CMD_FLAG_SA5) {
7453                         entry = &((const struct ctl_cmd_entry *)
7454                             entry->execute)[service_action];
7455                 } else if (service_action != 0) {
7456                         one->support = 1;
7457                         break;
7458                 }
7459                 goto fill_one;
7460         }
7461
7462         ctl_set_success(ctsio);
7463         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7464         ctsio->be_move_done = ctl_config_move_done;
7465         ctl_datamove((union ctl_io *)ctsio);
7466         return(retval);
7467 }
7468
7469 int
7470 ctl_report_supported_tmf(struct ctl_scsiio *ctsio)
7471 {
7472         struct scsi_report_supported_tmf *cdb;
7473         struct scsi_report_supported_tmf_ext_data *data;
7474         int retval;
7475         int alloc_len, total_len;
7476
7477         CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
7478
7479         cdb = (struct scsi_report_supported_tmf *)ctsio->cdb;
7480
7481         retval = CTL_RETVAL_COMPLETE;
7482
7483         if (cdb->options & RST_REPD)
7484                 total_len = sizeof(struct scsi_report_supported_tmf_ext_data);
7485         else
7486                 total_len = sizeof(struct scsi_report_supported_tmf_data);
7487         alloc_len = scsi_4btoul(cdb->length);
7488
7489         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7490         ctsio->kern_sg_entries = 0;
7491         ctsio->kern_rel_offset = 0;
7492         ctsio->kern_data_len = min(total_len, alloc_len);
7493         ctsio->kern_total_len = ctsio->kern_data_len;
7494
7495         data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr;
7496         data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS |
7497             RST_TRS;
7498         data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS;
7499         data->length = total_len - 4;
7500
7501         ctl_set_success(ctsio);
7502         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7503         ctsio->be_move_done = ctl_config_move_done;
7504         ctl_datamove((union ctl_io *)ctsio);
7505         return (retval);
7506 }
7507
7508 int
7509 ctl_report_timestamp(struct ctl_scsiio *ctsio)
7510 {
7511         struct scsi_report_timestamp *cdb;
7512         struct scsi_report_timestamp_data *data;
7513         struct timeval tv;
7514         int64_t timestamp;
7515         int retval;
7516         int alloc_len, total_len;
7517
7518         CTL_DEBUG_PRINT(("ctl_report_timestamp\n"));
7519
7520         cdb = (struct scsi_report_timestamp *)ctsio->cdb;
7521
7522         retval = CTL_RETVAL_COMPLETE;
7523
7524         total_len = sizeof(struct scsi_report_timestamp_data);
7525         alloc_len = scsi_4btoul(cdb->length);
7526
7527         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7528         ctsio->kern_sg_entries = 0;
7529         ctsio->kern_rel_offset = 0;
7530         ctsio->kern_data_len = min(total_len, alloc_len);
7531         ctsio->kern_total_len = ctsio->kern_data_len;
7532
7533         data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr;
7534         scsi_ulto2b(sizeof(*data) - 2, data->length);
7535         data->origin = RTS_ORIG_OUTSIDE;
7536         getmicrotime(&tv);
7537         timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000;
7538         scsi_ulto4b(timestamp >> 16, data->timestamp);
7539         scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]);
7540
7541         ctl_set_success(ctsio);
7542         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7543         ctsio->be_move_done = ctl_config_move_done;
7544         ctl_datamove((union ctl_io *)ctsio);
7545         return (retval);
7546 }
7547
7548 int
7549 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio)
7550 {
7551         struct ctl_softc *softc = CTL_SOFTC(ctsio);
7552         struct ctl_lun *lun = CTL_LUN(ctsio);
7553         struct scsi_per_res_in *cdb;
7554         int alloc_len, total_len = 0;
7555         /* struct scsi_per_res_in_rsrv in_data; */
7556         uint64_t key;
7557
7558         CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n"));
7559
7560         cdb = (struct scsi_per_res_in *)ctsio->cdb;
7561
7562         alloc_len = scsi_2btoul(cdb->length);
7563
7564 retry:
7565         mtx_lock(&lun->lun_lock);
7566         switch (cdb->action) {
7567         case SPRI_RK: /* read keys */
7568                 total_len = sizeof(struct scsi_per_res_in_keys) +
7569                         lun->pr_key_count *
7570                         sizeof(struct scsi_per_res_key);
7571                 break;
7572         case SPRI_RR: /* read reservation */
7573                 if (lun->flags & CTL_LUN_PR_RESERVED)
7574                         total_len = sizeof(struct scsi_per_res_in_rsrv);
7575                 else
7576                         total_len = sizeof(struct scsi_per_res_in_header);
7577                 break;
7578         case SPRI_RC: /* report capabilities */
7579                 total_len = sizeof(struct scsi_per_res_cap);
7580                 break;
7581         case SPRI_RS: /* read full status */
7582                 total_len = sizeof(struct scsi_per_res_in_header) +
7583                     (sizeof(struct scsi_per_res_in_full_desc) + 256) *
7584                     lun->pr_key_count;
7585                 break;
7586         default:
7587                 panic("%s: Invalid PR type %#x", __func__, cdb->action);
7588         }
7589         mtx_unlock(&lun->lun_lock);
7590
7591         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7592         ctsio->kern_rel_offset = 0;
7593         ctsio->kern_sg_entries = 0;
7594         ctsio->kern_data_len = min(total_len, alloc_len);
7595         ctsio->kern_total_len = ctsio->kern_data_len;
7596
7597         mtx_lock(&lun->lun_lock);
7598         switch (cdb->action) {
7599         case SPRI_RK: { // read keys
7600         struct scsi_per_res_in_keys *res_keys;
7601                 int i, key_count;
7602
7603                 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr;
7604
7605                 /*
7606                  * We had to drop the lock to allocate our buffer, which
7607                  * leaves time for someone to come in with another
7608                  * persistent reservation.  (That is unlikely, though,
7609                  * since this should be the only persistent reservation
7610                  * command active right now.)
7611                  */
7612                 if (total_len != (sizeof(struct scsi_per_res_in_keys) +
7613                     (lun->pr_key_count *
7614                      sizeof(struct scsi_per_res_key)))){
7615                         mtx_unlock(&lun->lun_lock);
7616                         free(ctsio->kern_data_ptr, M_CTL);
7617                         printf("%s: reservation length changed, retrying\n",
7618                                __func__);
7619                         goto retry;
7620                 }
7621
7622                 scsi_ulto4b(lun->pr_generation, res_keys->header.generation);
7623
7624                 scsi_ulto4b(sizeof(struct scsi_per_res_key) *
7625                              lun->pr_key_count, res_keys->header.length);
7626
7627                 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) {
7628                         if ((key = ctl_get_prkey(lun, i)) == 0)
7629                                 continue;
7630
7631                         /*
7632                          * We used lun->pr_key_count to calculate the
7633                          * size to allocate.  If it turns out the number of
7634                          * initiators with the registered flag set is
7635                          * larger than that (i.e. they haven't been kept in
7636                          * sync), we've got a problem.
7637                          */
7638                         if (key_count >= lun->pr_key_count) {
7639                                 key_count++;
7640                                 continue;
7641                         }
7642                         scsi_u64to8b(key, res_keys->keys[key_count].key);
7643                         key_count++;
7644                 }
7645                 break;
7646         }
7647         case SPRI_RR: { // read reservation
7648                 struct scsi_per_res_in_rsrv *res;
7649                 int tmp_len, header_only;
7650
7651                 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr;
7652
7653                 scsi_ulto4b(lun->pr_generation, res->header.generation);
7654
7655                 if (lun->flags & CTL_LUN_PR_RESERVED)
7656                 {
7657                         tmp_len = sizeof(struct scsi_per_res_in_rsrv);
7658                         scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data),
7659                                     res->header.length);
7660                         header_only = 0;
7661                 } else {
7662                         tmp_len = sizeof(struct scsi_per_res_in_header);
7663                         scsi_ulto4b(0, res->header.length);
7664                         header_only = 1;
7665                 }
7666
7667                 /*
7668                  * We had to drop the lock to allocate our buffer, which
7669                  * leaves time for someone to come in with another
7670                  * persistent reservation.  (That is unlikely, though,
7671                  * since this should be the only persistent reservation
7672                  * command active right now.)
7673                  */
7674                 if (tmp_len != total_len) {
7675                         mtx_unlock(&lun->lun_lock);
7676                         free(ctsio->kern_data_ptr, M_CTL);
7677                         printf("%s: reservation status changed, retrying\n",
7678                                __func__);
7679                         goto retry;
7680                 }
7681
7682                 /*
7683                  * No reservation held, so we're done.
7684                  */
7685                 if (header_only != 0)
7686                         break;
7687
7688                 /*
7689                  * If the registration is an All Registrants type, the key
7690                  * is 0, since it doesn't really matter.
7691                  */
7692                 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) {
7693                         scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx),
7694                             res->data.reservation);
7695                 }
7696                 res->data.scopetype = lun->pr_res_type;
7697                 break;
7698         }
7699         case SPRI_RC:     //report capabilities
7700         {
7701                 struct scsi_per_res_cap *res_cap;
7702                 uint16_t type_mask;
7703
7704                 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr;
7705                 scsi_ulto2b(sizeof(*res_cap), res_cap->length);
7706                 res_cap->flags1 = SPRI_CRH;
7707                 res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5;
7708                 type_mask = SPRI_TM_WR_EX_AR |
7709                             SPRI_TM_EX_AC_RO |
7710                             SPRI_TM_WR_EX_RO |
7711                             SPRI_TM_EX_AC |
7712                             SPRI_TM_WR_EX |
7713                             SPRI_TM_EX_AC_AR;
7714                 scsi_ulto2b(type_mask, res_cap->type_mask);
7715                 break;
7716         }
7717         case SPRI_RS: { // read full status
7718                 struct scsi_per_res_in_full *res_status;
7719                 struct scsi_per_res_in_full_desc *res_desc;
7720                 struct ctl_port *port;
7721                 int i, len;
7722
7723                 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr;
7724
7725                 /*
7726                  * We had to drop the lock to allocate our buffer, which
7727                  * leaves time for someone to come in with another
7728                  * persistent reservation.  (That is unlikely, though,
7729                  * since this should be the only persistent reservation
7730                  * command active right now.)
7731                  */
7732                 if (total_len < (sizeof(struct scsi_per_res_in_header) +
7733                     (sizeof(struct scsi_per_res_in_full_desc) + 256) *
7734                      lun->pr_key_count)){
7735                         mtx_unlock(&lun->lun_lock);
7736                         free(ctsio->kern_data_ptr, M_CTL);
7737                         printf("%s: reservation length changed, retrying\n",
7738                                __func__);
7739                         goto retry;
7740                 }
7741
7742                 scsi_ulto4b(lun->pr_generation, res_status->header.generation);
7743
7744                 res_desc = &res_status->desc[0];
7745                 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
7746                         if ((key = ctl_get_prkey(lun, i)) == 0)
7747                                 continue;
7748
7749                         scsi_u64to8b(key, res_desc->res_key.key);
7750                         if ((lun->flags & CTL_LUN_PR_RESERVED) &&
7751                             (lun->pr_res_idx == i ||
7752                              lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) {
7753                                 res_desc->flags = SPRI_FULL_R_HOLDER;
7754                                 res_desc->scopetype = lun->pr_res_type;
7755                         }
7756                         scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT,
7757                             res_desc->rel_trgt_port_id);
7758                         len = 0;
7759                         port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT];
7760                         if (port != NULL)
7761                                 len = ctl_create_iid(port,
7762                                     i % CTL_MAX_INIT_PER_PORT,
7763                                     res_desc->transport_id);
7764                         scsi_ulto4b(len, res_desc->additional_length);
7765                         res_desc = (struct scsi_per_res_in_full_desc *)
7766                             &res_desc->transport_id[len];
7767                 }
7768                 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0],
7769                     res_status->header.length);
7770                 break;
7771         }
7772         default:
7773                 panic("%s: Invalid PR type %#x", __func__, cdb->action);
7774         }
7775         mtx_unlock(&lun->lun_lock);
7776
7777         ctl_set_success(ctsio);
7778         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7779         ctsio->be_move_done = ctl_config_move_done;
7780         ctl_datamove((union ctl_io *)ctsio);
7781         return (CTL_RETVAL_COMPLETE);
7782 }
7783
7784 /*
7785  * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if
7786  * it should return.
7787  */
7788 static int
7789 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
7790                 uint64_t sa_res_key, uint8_t type, uint32_t residx,
7791                 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb,
7792                 struct scsi_per_res_out_parms* param)
7793 {
7794         union ctl_ha_msg persis_io;
7795         int i;
7796
7797         mtx_lock(&lun->lun_lock);
7798         if (sa_res_key == 0) {
7799                 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
7800                         /* validate scope and type */
7801                         if ((cdb->scope_type & SPR_SCOPE_MASK) !=
7802                              SPR_LU_SCOPE) {
7803                                 mtx_unlock(&lun->lun_lock);
7804                                 ctl_set_invalid_field(/*ctsio*/ ctsio,
7805                                                       /*sks_valid*/ 1,
7806                                                       /*command*/ 1,
7807                                                       /*field*/ 2,
7808                                                       /*bit_valid*/ 1,
7809                                                       /*bit*/ 4);
7810                                 ctl_done((union ctl_io *)ctsio);
7811                                 return (1);
7812                         }
7813
7814                         if (type>8 || type==2 || type==4 || type==0) {
7815                                 mtx_unlock(&lun->lun_lock);
7816                                 ctl_set_invalid_field(/*ctsio*/ ctsio,
7817                                                       /*sks_valid*/ 1,
7818                                                       /*command*/ 1,
7819                                                       /*field*/ 2,
7820                                                       /*bit_valid*/ 1,
7821                                                       /*bit*/ 0);
7822                                 ctl_done((union ctl_io *)ctsio);
7823                                 return (1);
7824                         }
7825
7826                         /*
7827                          * Unregister everybody else and build UA for
7828                          * them
7829                          */
7830                         for(i = 0; i < CTL_MAX_INITIATORS; i++) {
7831                                 if (i == residx || ctl_get_prkey(lun, i) == 0)
7832                                         continue;
7833
7834                                 ctl_clr_prkey(lun, i);
7835                                 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
7836                         }
7837                         lun->pr_key_count = 1;
7838                         lun->pr_res_type = type;
7839                         if (lun->pr_res_type != SPR_TYPE_WR_EX_AR &&
7840                             lun->pr_res_type != SPR_TYPE_EX_AC_AR)
7841                                 lun->pr_res_idx = residx;
7842                         lun->pr_generation++;
7843                         mtx_unlock(&lun->lun_lock);
7844
7845                         /* send msg to other side */
7846                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
7847                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
7848                         persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
7849                         persis_io.pr.pr_info.residx = lun->pr_res_idx;
7850                         persis_io.pr.pr_info.res_type = type;
7851                         memcpy(persis_io.pr.pr_info.sa_res_key,
7852                                param->serv_act_res_key,
7853                                sizeof(param->serv_act_res_key));
7854                         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
7855                             sizeof(persis_io.pr), M_WAITOK);
7856                 } else {
7857                         /* not all registrants */
7858                         mtx_unlock(&lun->lun_lock);
7859                         free(ctsio->kern_data_ptr, M_CTL);
7860                         ctl_set_invalid_field(ctsio,
7861                                               /*sks_valid*/ 1,
7862                                               /*command*/ 0,
7863                                               /*field*/ 8,
7864                                               /*bit_valid*/ 0,
7865                                               /*bit*/ 0);
7866                         ctl_done((union ctl_io *)ctsio);
7867                         return (1);
7868                 }
7869         } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS
7870                 || !(lun->flags & CTL_LUN_PR_RESERVED)) {
7871                 int found = 0;
7872
7873                 if (res_key == sa_res_key) {
7874                         /* special case */
7875                         /*
7876                          * The spec implies this is not good but doesn't
7877                          * say what to do. There are two choices either
7878                          * generate a res conflict or check condition
7879                          * with illegal field in parameter data. Since
7880                          * that is what is done when the sa_res_key is
7881                          * zero I'll take that approach since this has
7882                          * to do with the sa_res_key.
7883                          */
7884                         mtx_unlock(&lun->lun_lock);
7885                         free(ctsio->kern_data_ptr, M_CTL);
7886                         ctl_set_invalid_field(ctsio,
7887                                               /*sks_valid*/ 1,
7888                                               /*command*/ 0,
7889                                               /*field*/ 8,
7890                                               /*bit_valid*/ 0,
7891                                               /*bit*/ 0);
7892                         ctl_done((union ctl_io *)ctsio);
7893                         return (1);
7894                 }
7895
7896                 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
7897                         if (ctl_get_prkey(lun, i) != sa_res_key)
7898                                 continue;
7899
7900                         found = 1;
7901                         ctl_clr_prkey(lun, i);
7902                         lun->pr_key_count--;
7903                         ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
7904                 }
7905                 if (!found) {
7906                         mtx_unlock(&lun->lun_lock);
7907                         free(ctsio->kern_data_ptr, M_CTL);
7908                         ctl_set_reservation_conflict(ctsio);
7909                         ctl_done((union ctl_io *)ctsio);
7910                         return (CTL_RETVAL_COMPLETE);
7911                 }
7912                 lun->pr_generation++;
7913                 mtx_unlock(&lun->lun_lock);
7914
7915                 /* send msg to other side */
7916                 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
7917                 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
7918                 persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
7919                 persis_io.pr.pr_info.residx = lun->pr_res_idx;
7920                 persis_io.pr.pr_info.res_type = type;
7921                 memcpy(persis_io.pr.pr_info.sa_res_key,
7922                        param->serv_act_res_key,
7923                        sizeof(param->serv_act_res_key));
7924                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
7925                     sizeof(persis_io.pr), M_WAITOK);
7926         } else {
7927                 /* Reserved but not all registrants */
7928                 /* sa_res_key is res holder */
7929                 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) {
7930                         /* validate scope and type */
7931                         if ((cdb->scope_type & SPR_SCOPE_MASK) !=
7932                              SPR_LU_SCOPE) {
7933                                 mtx_unlock(&lun->lun_lock);
7934                                 ctl_set_invalid_field(/*ctsio*/ ctsio,
7935                                                       /*sks_valid*/ 1,
7936                                                       /*command*/ 1,
7937                                                       /*field*/ 2,
7938                                                       /*bit_valid*/ 1,
7939                                                       /*bit*/ 4);
7940                                 ctl_done((union ctl_io *)ctsio);
7941                                 return (1);
7942                         }
7943
7944                         if (type>8 || type==2 || type==4 || type==0) {
7945                                 mtx_unlock(&lun->lun_lock);
7946                                 ctl_set_invalid_field(/*ctsio*/ ctsio,
7947                                                       /*sks_valid*/ 1,
7948                                                       /*command*/ 1,
7949                                                       /*field*/ 2,
7950                                                       /*bit_valid*/ 1,
7951                                                       /*bit*/ 0);
7952                                 ctl_done((union ctl_io *)ctsio);
7953                                 return (1);
7954                         }
7955
7956                         /*
7957                          * Do the following:
7958                          * if sa_res_key != res_key remove all
7959                          * registrants w/sa_res_key and generate UA
7960                          * for these registrants(Registrations
7961                          * Preempted) if it wasn't an exclusive
7962                          * reservation generate UA(Reservations
7963                          * Preempted) for all other registered nexuses
7964                          * if the type has changed. Establish the new
7965                          * reservation and holder. If res_key and
7966                          * sa_res_key are the same do the above
7967                          * except don't unregister the res holder.
7968                          */
7969
7970                         for(i = 0; i < CTL_MAX_INITIATORS; i++) {
7971                                 if (i == residx || ctl_get_prkey(lun, i) == 0)
7972                                         continue;
7973
7974                                 if (sa_res_key == ctl_get_prkey(lun, i)) {
7975                                         ctl_clr_prkey(lun, i);
7976                                         lun->pr_key_count--;
7977                                         ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
7978                                 } else if (type != lun->pr_res_type &&
7979                                     (lun->pr_res_type == SPR_TYPE_WR_EX_RO ||
7980                                      lun->pr_res_type == SPR_TYPE_EX_AC_RO)) {
7981                                         ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
7982                                 }
7983                         }
7984                         lun->pr_res_type = type;
7985                         if (lun->pr_res_type != SPR_TYPE_WR_EX_AR &&
7986                             lun->pr_res_type != SPR_TYPE_EX_AC_AR)
7987                                 lun->pr_res_idx = residx;
7988                         else
7989                                 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
7990                         lun->pr_generation++;
7991                         mtx_unlock(&lun->lun_lock);
7992
7993                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
7994                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
7995                         persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
7996                         persis_io.pr.pr_info.residx = lun->pr_res_idx;
7997                         persis_io.pr.pr_info.res_type = type;
7998                         memcpy(persis_io.pr.pr_info.sa_res_key,
7999                                param->serv_act_res_key,
8000                                sizeof(param->serv_act_res_key));
8001                         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8002                             sizeof(persis_io.pr), M_WAITOK);
8003                 } else {
8004                         /*
8005                          * sa_res_key is not the res holder just
8006                          * remove registrants
8007                          */
8008                         int found=0;
8009
8010                         for (i = 0; i < CTL_MAX_INITIATORS; i++) {
8011                                 if (sa_res_key != ctl_get_prkey(lun, i))
8012                                         continue;
8013
8014                                 found = 1;
8015                                 ctl_clr_prkey(lun, i);
8016                                 lun->pr_key_count--;
8017                                 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8018                         }
8019
8020                         if (!found) {
8021                                 mtx_unlock(&lun->lun_lock);
8022                                 free(ctsio->kern_data_ptr, M_CTL);
8023                                 ctl_set_reservation_conflict(ctsio);
8024                                 ctl_done((union ctl_io *)ctsio);
8025                                 return (1);
8026                         }
8027                         lun->pr_generation++;
8028                         mtx_unlock(&lun->lun_lock);
8029
8030                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8031                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8032                         persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
8033                         persis_io.pr.pr_info.residx = lun->pr_res_idx;
8034                         persis_io.pr.pr_info.res_type = type;
8035                         memcpy(persis_io.pr.pr_info.sa_res_key,
8036                                param->serv_act_res_key,
8037                                sizeof(param->serv_act_res_key));
8038                         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8039                             sizeof(persis_io.pr), M_WAITOK);
8040                 }
8041         }
8042         return (0);
8043 }
8044
8045 static void
8046 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
8047 {
8048         uint64_t sa_res_key;
8049         int i;
8050
8051         sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key);
8052
8053         if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS
8054          || lun->pr_res_idx == CTL_PR_NO_RESERVATION
8055          || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) {
8056                 if (sa_res_key == 0) {
8057                         /*
8058                          * Unregister everybody else and build UA for
8059                          * them
8060                          */
8061                         for(i = 0; i < CTL_MAX_INITIATORS; i++) {
8062                                 if (i == msg->pr.pr_info.residx ||
8063                                     ctl_get_prkey(lun, i) == 0)
8064                                         continue;
8065
8066                                 ctl_clr_prkey(lun, i);
8067                                 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8068                         }
8069
8070                         lun->pr_key_count = 1;
8071                         lun->pr_res_type = msg->pr.pr_info.res_type;
8072                         if (lun->pr_res_type != SPR_TYPE_WR_EX_AR &&
8073                             lun->pr_res_type != SPR_TYPE_EX_AC_AR)
8074                                 lun->pr_res_idx = msg->pr.pr_info.residx;
8075                 } else {
8076                         for (i = 0; i < CTL_MAX_INITIATORS; i++) {
8077                                 if (sa_res_key == ctl_get_prkey(lun, i))
8078                                         continue;
8079
8080                                 ctl_clr_prkey(lun, i);
8081                                 lun->pr_key_count--;
8082                                 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8083                         }
8084                 }
8085         } else {
8086                 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
8087                         if (i == msg->pr.pr_info.residx ||
8088                             ctl_get_prkey(lun, i) == 0)
8089                                 continue;
8090
8091                         if (sa_res_key == ctl_get_prkey(lun, i)) {
8092                                 ctl_clr_prkey(lun, i);
8093                                 lun->pr_key_count--;
8094                                 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8095                         } else if (msg->pr.pr_info.res_type != lun->pr_res_type
8096                             && (lun->pr_res_type == SPR_TYPE_WR_EX_RO ||
8097                              lun->pr_res_type == SPR_TYPE_EX_AC_RO)) {
8098                                 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
8099                         }
8100                 }
8101                 lun->pr_res_type = msg->pr.pr_info.res_type;
8102                 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR &&
8103                     lun->pr_res_type != SPR_TYPE_EX_AC_AR)
8104                         lun->pr_res_idx = msg->pr.pr_info.residx;
8105                 else
8106                         lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
8107         }
8108         lun->pr_generation++;
8109
8110 }
8111
8112
8113 int
8114 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
8115 {
8116         struct ctl_softc *softc = CTL_SOFTC(ctsio);
8117         struct ctl_lun *lun = CTL_LUN(ctsio);
8118         int retval;
8119         u_int32_t param_len;
8120         struct scsi_per_res_out *cdb;
8121         struct scsi_per_res_out_parms* param;
8122         uint32_t residx;
8123         uint64_t res_key, sa_res_key, key;
8124         uint8_t type;
8125         union ctl_ha_msg persis_io;
8126         int    i;
8127
8128         CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n"));
8129
8130         cdb = (struct scsi_per_res_out *)ctsio->cdb;
8131         retval = CTL_RETVAL_COMPLETE;
8132
8133         /*
8134          * We only support whole-LUN scope.  The scope & type are ignored for
8135          * register, register and ignore existing key and clear.
8136          * We sometimes ignore scope and type on preempts too!!
8137          * Verify reservation type here as well.
8138          */
8139         type = cdb->scope_type & SPR_TYPE_MASK;
8140         if ((cdb->action == SPRO_RESERVE)
8141          || (cdb->action == SPRO_RELEASE)) {
8142                 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) {
8143                         ctl_set_invalid_field(/*ctsio*/ ctsio,
8144                                               /*sks_valid*/ 1,
8145                                               /*command*/ 1,
8146                                               /*field*/ 2,
8147                                               /*bit_valid*/ 1,
8148                                               /*bit*/ 4);
8149                         ctl_done((union ctl_io *)ctsio);
8150                         return (CTL_RETVAL_COMPLETE);
8151                 }
8152
8153                 if (type>8 || type==2 || type==4 || type==0) {
8154                         ctl_set_invalid_field(/*ctsio*/ ctsio,
8155                                               /*sks_valid*/ 1,
8156                                               /*command*/ 1,
8157                                               /*field*/ 2,
8158                                               /*bit_valid*/ 1,
8159                                               /*bit*/ 0);
8160                         ctl_done((union ctl_io *)ctsio);
8161                         return (CTL_RETVAL_COMPLETE);
8162                 }
8163         }
8164
8165         param_len = scsi_4btoul(cdb->length);
8166
8167         if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
8168                 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
8169                 ctsio->kern_data_len = param_len;
8170                 ctsio->kern_total_len = param_len;
8171                 ctsio->kern_rel_offset = 0;
8172                 ctsio->kern_sg_entries = 0;
8173                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
8174                 ctsio->be_move_done = ctl_config_move_done;
8175                 ctl_datamove((union ctl_io *)ctsio);
8176
8177                 return (CTL_RETVAL_COMPLETE);
8178         }
8179
8180         param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr;
8181
8182         residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
8183         res_key = scsi_8btou64(param->res_key.key);
8184         sa_res_key = scsi_8btou64(param->serv_act_res_key);
8185
8186         /*
8187          * Validate the reservation key here except for SPRO_REG_IGNO
8188          * This must be done for all other service actions
8189          */
8190         if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) {
8191                 mtx_lock(&lun->lun_lock);
8192                 if ((key = ctl_get_prkey(lun, residx)) != 0) {
8193                         if (res_key != key) {
8194                                 /*
8195                                  * The current key passed in doesn't match
8196                                  * the one the initiator previously
8197                                  * registered.
8198                                  */
8199                                 mtx_unlock(&lun->lun_lock);
8200                                 free(ctsio->kern_data_ptr, M_CTL);
8201                                 ctl_set_reservation_conflict(ctsio);
8202                                 ctl_done((union ctl_io *)ctsio);
8203                                 return (CTL_RETVAL_COMPLETE);
8204                         }
8205                 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) {
8206                         /*
8207                          * We are not registered
8208                          */
8209                         mtx_unlock(&lun->lun_lock);
8210                         free(ctsio->kern_data_ptr, M_CTL);
8211                         ctl_set_reservation_conflict(ctsio);
8212                         ctl_done((union ctl_io *)ctsio);
8213                         return (CTL_RETVAL_COMPLETE);
8214                 } else if (res_key != 0) {
8215                         /*
8216                          * We are not registered and trying to register but
8217                          * the register key isn't zero.
8218                          */
8219                         mtx_unlock(&lun->lun_lock);
8220                         free(ctsio->kern_data_ptr, M_CTL);
8221                         ctl_set_reservation_conflict(ctsio);
8222                         ctl_done((union ctl_io *)ctsio);
8223                         return (CTL_RETVAL_COMPLETE);
8224                 }
8225                 mtx_unlock(&lun->lun_lock);
8226         }
8227
8228         switch (cdb->action & SPRO_ACTION_MASK) {
8229         case SPRO_REGISTER:
8230         case SPRO_REG_IGNO: {
8231
8232                 /*
8233                  * We don't support any of these options, as we report in
8234                  * the read capabilities request (see
8235                  * ctl_persistent_reserve_in(), above).
8236                  */
8237                 if ((param->flags & SPR_SPEC_I_PT)
8238                  || (param->flags & SPR_ALL_TG_PT)
8239                  || (param->flags & SPR_APTPL)) {
8240                         int bit_ptr;
8241
8242                         if (param->flags & SPR_APTPL)
8243                                 bit_ptr = 0;
8244                         else if (param->flags & SPR_ALL_TG_PT)
8245                                 bit_ptr = 2;
8246                         else /* SPR_SPEC_I_PT */
8247                                 bit_ptr = 3;
8248
8249                         free(ctsio->kern_data_ptr, M_CTL);
8250                         ctl_set_invalid_field(ctsio,
8251                                               /*sks_valid*/ 1,
8252                                               /*command*/ 0,
8253                                               /*field*/ 20,
8254                                               /*bit_valid*/ 1,
8255                                               /*bit*/ bit_ptr);
8256                         ctl_done((union ctl_io *)ctsio);
8257                         return (CTL_RETVAL_COMPLETE);
8258                 }
8259
8260                 mtx_lock(&lun->lun_lock);
8261
8262                 /*
8263                  * The initiator wants to clear the
8264                  * key/unregister.
8265                  */
8266                 if (sa_res_key == 0) {
8267                         if ((res_key == 0
8268                           && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER)
8269                          || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO
8270                           && ctl_get_prkey(lun, residx) == 0)) {
8271                                 mtx_unlock(&lun->lun_lock);
8272                                 goto done;
8273                         }
8274
8275                         ctl_clr_prkey(lun, residx);
8276                         lun->pr_key_count--;
8277
8278                         if (residx == lun->pr_res_idx) {
8279                                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8280                                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8281
8282                                 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO ||
8283                                      lun->pr_res_type == SPR_TYPE_EX_AC_RO) &&
8284                                     lun->pr_key_count) {
8285                                         /*
8286                                          * If the reservation is a registrants
8287                                          * only type we need to generate a UA
8288                                          * for other registered inits.  The
8289                                          * sense code should be RESERVATIONS
8290                                          * RELEASED
8291                                          */
8292
8293                                         for (i = softc->init_min; i < softc->init_max; i++){
8294                                                 if (ctl_get_prkey(lun, i) == 0)
8295                                                         continue;
8296                                                 ctl_est_ua(lun, i,
8297                                                     CTL_UA_RES_RELEASE);
8298                                         }
8299                                 }
8300                                 lun->pr_res_type = 0;
8301                         } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
8302                                 if (lun->pr_key_count==0) {
8303                                         lun->flags &= ~CTL_LUN_PR_RESERVED;
8304                                         lun->pr_res_type = 0;
8305                                         lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8306                                 }
8307                         }
8308                         lun->pr_generation++;
8309                         mtx_unlock(&lun->lun_lock);
8310
8311                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8312                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8313                         persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY;
8314                         persis_io.pr.pr_info.residx = residx;
8315                         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8316                             sizeof(persis_io.pr), M_WAITOK);
8317                 } else /* sa_res_key != 0 */ {
8318
8319                         /*
8320                          * If we aren't registered currently then increment
8321                          * the key count and set the registered flag.
8322                          */
8323                         ctl_alloc_prkey(lun, residx);
8324                         if (ctl_get_prkey(lun, residx) == 0)
8325                                 lun->pr_key_count++;
8326                         ctl_set_prkey(lun, residx, sa_res_key);
8327                         lun->pr_generation++;
8328                         mtx_unlock(&lun->lun_lock);
8329
8330                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8331                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8332                         persis_io.pr.pr_info.action = CTL_PR_REG_KEY;
8333                         persis_io.pr.pr_info.residx = residx;
8334                         memcpy(persis_io.pr.pr_info.sa_res_key,
8335                                param->serv_act_res_key,
8336                                sizeof(param->serv_act_res_key));
8337                         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8338                             sizeof(persis_io.pr), M_WAITOK);
8339                 }
8340
8341                 break;
8342         }
8343         case SPRO_RESERVE:
8344                 mtx_lock(&lun->lun_lock);
8345                 if (lun->flags & CTL_LUN_PR_RESERVED) {
8346                         /*
8347                          * if this isn't the reservation holder and it's
8348                          * not a "all registrants" type or if the type is
8349                          * different then we have a conflict
8350                          */
8351                         if ((lun->pr_res_idx != residx
8352                           && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS)
8353                          || lun->pr_res_type != type) {
8354                                 mtx_unlock(&lun->lun_lock);
8355                                 free(ctsio->kern_data_ptr, M_CTL);
8356                                 ctl_set_reservation_conflict(ctsio);
8357                                 ctl_done((union ctl_io *)ctsio);
8358                                 return (CTL_RETVAL_COMPLETE);
8359                         }
8360                         mtx_unlock(&lun->lun_lock);
8361                 } else /* create a reservation */ {
8362                         /*
8363                          * If it's not an "all registrants" type record
8364                          * reservation holder
8365                          */
8366                         if (type != SPR_TYPE_WR_EX_AR
8367                          && type != SPR_TYPE_EX_AC_AR)
8368                                 lun->pr_res_idx = residx; /* Res holder */
8369                         else
8370                                 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
8371
8372                         lun->flags |= CTL_LUN_PR_RESERVED;
8373                         lun->pr_res_type = type;
8374
8375                         mtx_unlock(&lun->lun_lock);
8376
8377                         /* send msg to other side */
8378                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8379                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8380                         persis_io.pr.pr_info.action = CTL_PR_RESERVE;
8381                         persis_io.pr.pr_info.residx = lun->pr_res_idx;
8382                         persis_io.pr.pr_info.res_type = type;
8383                         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8384                             sizeof(persis_io.pr), M_WAITOK);
8385                 }
8386                 break;
8387
8388         case SPRO_RELEASE:
8389                 mtx_lock(&lun->lun_lock);
8390                 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) {
8391                         /* No reservation exists return good status */
8392                         mtx_unlock(&lun->lun_lock);
8393                         goto done;
8394                 }
8395                 /*
8396                  * Is this nexus a reservation holder?
8397                  */
8398                 if (lun->pr_res_idx != residx
8399                  && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) {
8400                         /*
8401                          * not a res holder return good status but
8402                          * do nothing
8403                          */
8404                         mtx_unlock(&lun->lun_lock);
8405                         goto done;
8406                 }
8407
8408                 if (lun->pr_res_type != type) {
8409                         mtx_unlock(&lun->lun_lock);
8410                         free(ctsio->kern_data_ptr, M_CTL);
8411                         ctl_set_illegal_pr_release(ctsio);
8412                         ctl_done((union ctl_io *)ctsio);
8413                         return (CTL_RETVAL_COMPLETE);
8414                 }
8415
8416                 /* okay to release */
8417                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8418                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8419                 lun->pr_res_type = 0;
8420
8421                 /*
8422                  * If this isn't an exclusive access reservation and NUAR
8423                  * is not set, generate UA for all other registrants.
8424                  */
8425                 if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX &&
8426                     (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) {
8427                         for (i = softc->init_min; i < softc->init_max; i++) {
8428                                 if (i == residx || ctl_get_prkey(lun, i) == 0)
8429                                         continue;
8430                                 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
8431                         }
8432                 }
8433                 mtx_unlock(&lun->lun_lock);
8434
8435                 /* Send msg to other side */
8436                 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8437                 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8438                 persis_io.pr.pr_info.action = CTL_PR_RELEASE;
8439                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8440                      sizeof(persis_io.pr), M_WAITOK);
8441                 break;
8442
8443         case SPRO_CLEAR:
8444                 /* send msg to other side */
8445
8446                 mtx_lock(&lun->lun_lock);
8447                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8448                 lun->pr_res_type = 0;
8449                 lun->pr_key_count = 0;
8450                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8451
8452                 ctl_clr_prkey(lun, residx);
8453                 for (i = 0; i < CTL_MAX_INITIATORS; i++)
8454                         if (ctl_get_prkey(lun, i) != 0) {
8455                                 ctl_clr_prkey(lun, i);
8456                                 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8457                         }
8458                 lun->pr_generation++;
8459                 mtx_unlock(&lun->lun_lock);
8460
8461                 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8462                 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8463                 persis_io.pr.pr_info.action = CTL_PR_CLEAR;
8464                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8465                      sizeof(persis_io.pr), M_WAITOK);
8466                 break;
8467
8468         case SPRO_PREEMPT:
8469         case SPRO_PRE_ABO: {
8470                 int nretval;
8471
8472                 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type,
8473                                           residx, ctsio, cdb, param);
8474                 if (nretval != 0)
8475                         return (CTL_RETVAL_COMPLETE);
8476                 break;
8477         }
8478         default:
8479                 panic("%s: Invalid PR type %#x", __func__, cdb->action);
8480         }
8481
8482 done:
8483         free(ctsio->kern_data_ptr, M_CTL);
8484         ctl_set_success(ctsio);
8485         ctl_done((union ctl_io *)ctsio);
8486
8487         return (retval);
8488 }
8489
8490 /*
8491  * This routine is for handling a message from the other SC pertaining to
8492  * persistent reserve out. All the error checking will have been done
8493  * so only perorming the action need be done here to keep the two
8494  * in sync.
8495  */
8496 static void
8497 ctl_hndl_per_res_out_on_other_sc(union ctl_io *io)
8498 {
8499         struct ctl_softc *softc = CTL_SOFTC(io);
8500         union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg;
8501         struct ctl_lun *lun;
8502         int i;
8503         uint32_t residx, targ_lun;
8504
8505         targ_lun = msg->hdr.nexus.targ_mapped_lun;
8506         mtx_lock(&softc->ctl_lock);
8507         if (targ_lun >= ctl_max_luns ||
8508             (lun = softc->ctl_luns[targ_lun]) == NULL) {
8509                 mtx_unlock(&softc->ctl_lock);
8510                 return;
8511         }
8512         mtx_lock(&lun->lun_lock);
8513         mtx_unlock(&softc->ctl_lock);
8514         if (lun->flags & CTL_LUN_DISABLED) {
8515                 mtx_unlock(&lun->lun_lock);
8516                 return;
8517         }
8518         residx = ctl_get_initindex(&msg->hdr.nexus);
8519         switch(msg->pr.pr_info.action) {
8520         case CTL_PR_REG_KEY:
8521                 ctl_alloc_prkey(lun, msg->pr.pr_info.residx);
8522                 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0)
8523                         lun->pr_key_count++;
8524                 ctl_set_prkey(lun, msg->pr.pr_info.residx,
8525                     scsi_8btou64(msg->pr.pr_info.sa_res_key));
8526                 lun->pr_generation++;
8527                 break;
8528
8529         case CTL_PR_UNREG_KEY:
8530                 ctl_clr_prkey(lun, msg->pr.pr_info.residx);
8531                 lun->pr_key_count--;
8532
8533                 /* XXX Need to see if the reservation has been released */
8534                 /* if so do we need to generate UA? */
8535                 if (msg->pr.pr_info.residx == lun->pr_res_idx) {
8536                         lun->flags &= ~CTL_LUN_PR_RESERVED;
8537                         lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8538
8539                         if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO ||
8540                              lun->pr_res_type == SPR_TYPE_EX_AC_RO) &&
8541                             lun->pr_key_count) {
8542                                 /*
8543                                  * If the reservation is a registrants
8544                                  * only type we need to generate a UA
8545                                  * for other registered inits.  The
8546                                  * sense code should be RESERVATIONS
8547                                  * RELEASED
8548                                  */
8549
8550                                 for (i = softc->init_min; i < softc->init_max; i++) {
8551                                         if (ctl_get_prkey(lun, i) == 0)
8552                                                 continue;
8553
8554                                         ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
8555                                 }
8556                         }
8557                         lun->pr_res_type = 0;
8558                 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
8559                         if (lun->pr_key_count==0) {
8560                                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8561                                 lun->pr_res_type = 0;
8562                                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8563                         }
8564                 }
8565                 lun->pr_generation++;
8566                 break;
8567
8568         case CTL_PR_RESERVE:
8569                 lun->flags |= CTL_LUN_PR_RESERVED;
8570                 lun->pr_res_type = msg->pr.pr_info.res_type;
8571                 lun->pr_res_idx = msg->pr.pr_info.residx;
8572
8573                 break;
8574
8575         case CTL_PR_RELEASE:
8576                 /*
8577                  * If this isn't an exclusive access reservation and NUAR
8578                  * is not set, generate UA for all other registrants.
8579                  */
8580                 if (lun->pr_res_type != SPR_TYPE_EX_AC &&
8581                     lun->pr_res_type != SPR_TYPE_WR_EX &&
8582                     (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) {
8583                         for (i = softc->init_min; i < softc->init_max; i++) {
8584                                 if (i == residx || ctl_get_prkey(lun, i) == 0)
8585                                         continue;
8586                                 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
8587                         }
8588                 }
8589
8590                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8591                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8592                 lun->pr_res_type = 0;
8593                 break;
8594
8595         case CTL_PR_PREEMPT:
8596                 ctl_pro_preempt_other(lun, msg);
8597                 break;
8598         case CTL_PR_CLEAR:
8599                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8600                 lun->pr_res_type = 0;
8601                 lun->pr_key_count = 0;
8602                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8603
8604                 for (i=0; i < CTL_MAX_INITIATORS; i++) {
8605                         if (ctl_get_prkey(lun, i) == 0)
8606                                 continue;
8607                         ctl_clr_prkey(lun, i);
8608                         ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8609                 }
8610                 lun->pr_generation++;
8611                 break;
8612         }
8613
8614         mtx_unlock(&lun->lun_lock);
8615 }
8616
8617 int
8618 ctl_read_write(struct ctl_scsiio *ctsio)
8619 {
8620         struct ctl_lun *lun = CTL_LUN(ctsio);
8621         struct ctl_lba_len_flags *lbalen;
8622         uint64_t lba;
8623         uint32_t num_blocks;
8624         int flags, retval;
8625         int isread;
8626
8627         CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0]));
8628
8629         flags = 0;
8630         isread = ctsio->cdb[0] == READ_6  || ctsio->cdb[0] == READ_10
8631               || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16;
8632         switch (ctsio->cdb[0]) {
8633         case READ_6:
8634         case WRITE_6: {
8635                 struct scsi_rw_6 *cdb;
8636
8637                 cdb = (struct scsi_rw_6 *)ctsio->cdb;
8638
8639                 lba = scsi_3btoul(cdb->addr);
8640                 /* only 5 bits are valid in the most significant address byte */
8641                 lba &= 0x1fffff;
8642                 num_blocks = cdb->length;
8643                 /*
8644                  * This is correct according to SBC-2.
8645                  */
8646                 if (num_blocks == 0)
8647                         num_blocks = 256;
8648                 break;
8649         }
8650         case READ_10:
8651         case WRITE_10: {
8652                 struct scsi_rw_10 *cdb;
8653
8654                 cdb = (struct scsi_rw_10 *)ctsio->cdb;
8655                 if (cdb->byte2 & SRW10_FUA)
8656                         flags |= CTL_LLF_FUA;
8657                 if (cdb->byte2 & SRW10_DPO)
8658                         flags |= CTL_LLF_DPO;
8659                 lba = scsi_4btoul(cdb->addr);
8660                 num_blocks = scsi_2btoul(cdb->length);
8661                 break;
8662         }
8663         case WRITE_VERIFY_10: {
8664                 struct scsi_write_verify_10 *cdb;
8665
8666                 cdb = (struct scsi_write_verify_10 *)ctsio->cdb;
8667                 flags |= CTL_LLF_FUA;
8668                 if (cdb->byte2 & SWV_DPO)
8669                         flags |= CTL_LLF_DPO;
8670                 lba = scsi_4btoul(cdb->addr);
8671                 num_blocks = scsi_2btoul(cdb->length);
8672                 break;
8673         }
8674         case READ_12:
8675         case WRITE_12: {
8676                 struct scsi_rw_12 *cdb;
8677
8678                 cdb = (struct scsi_rw_12 *)ctsio->cdb;
8679                 if (cdb->byte2 & SRW12_FUA)
8680                         flags |= CTL_LLF_FUA;
8681                 if (cdb->byte2 & SRW12_DPO)
8682                         flags |= CTL_LLF_DPO;
8683                 lba = scsi_4btoul(cdb->addr);
8684                 num_blocks = scsi_4btoul(cdb->length);
8685                 break;
8686         }
8687         case WRITE_VERIFY_12: {
8688                 struct scsi_write_verify_12 *cdb;
8689
8690                 cdb = (struct scsi_write_verify_12 *)ctsio->cdb;
8691                 flags |= CTL_LLF_FUA;
8692                 if (cdb->byte2 & SWV_DPO)
8693                         flags |= CTL_LLF_DPO;
8694                 lba = scsi_4btoul(cdb->addr);
8695                 num_blocks = scsi_4btoul(cdb->length);
8696                 break;
8697         }
8698         case READ_16:
8699         case WRITE_16: {
8700                 struct scsi_rw_16 *cdb;
8701
8702                 cdb = (struct scsi_rw_16 *)ctsio->cdb;
8703                 if (cdb->byte2 & SRW12_FUA)
8704                         flags |= CTL_LLF_FUA;
8705                 if (cdb->byte2 & SRW12_DPO)
8706                         flags |= CTL_LLF_DPO;
8707                 lba = scsi_8btou64(cdb->addr);
8708                 num_blocks = scsi_4btoul(cdb->length);
8709                 break;
8710         }
8711         case WRITE_ATOMIC_16: {
8712                 struct scsi_write_atomic_16 *cdb;
8713
8714                 if (lun->be_lun->atomicblock == 0) {
8715                         ctl_set_invalid_opcode(ctsio);
8716                         ctl_done((union ctl_io *)ctsio);
8717                         return (CTL_RETVAL_COMPLETE);
8718                 }
8719
8720                 cdb = (struct scsi_write_atomic_16 *)ctsio->cdb;
8721                 if (cdb->byte2 & SRW12_FUA)
8722                         flags |= CTL_LLF_FUA;
8723                 if (cdb->byte2 & SRW12_DPO)
8724                         flags |= CTL_LLF_DPO;
8725                 lba = scsi_8btou64(cdb->addr);
8726                 num_blocks = scsi_2btoul(cdb->length);
8727                 if (num_blocks > lun->be_lun->atomicblock) {
8728                         ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
8729                             /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0,
8730                             /*bit*/ 0);
8731                         ctl_done((union ctl_io *)ctsio);
8732                         return (CTL_RETVAL_COMPLETE);
8733                 }
8734                 break;
8735         }
8736         case WRITE_VERIFY_16: {
8737                 struct scsi_write_verify_16 *cdb;
8738
8739                 cdb = (struct scsi_write_verify_16 *)ctsio->cdb;
8740                 flags |= CTL_LLF_FUA;
8741                 if (cdb->byte2 & SWV_DPO)
8742                         flags |= CTL_LLF_DPO;
8743                 lba = scsi_8btou64(cdb->addr);
8744                 num_blocks = scsi_4btoul(cdb->length);
8745                 break;
8746         }
8747         default:
8748                 /*
8749                  * We got a command we don't support.  This shouldn't
8750                  * happen, commands should be filtered out above us.
8751                  */
8752                 ctl_set_invalid_opcode(ctsio);
8753                 ctl_done((union ctl_io *)ctsio);
8754
8755                 return (CTL_RETVAL_COMPLETE);
8756                 break; /* NOTREACHED */
8757         }
8758
8759         /*
8760          * The first check is to make sure we're in bounds, the second
8761          * check is to catch wrap-around problems.  If the lba + num blocks
8762          * is less than the lba, then we've wrapped around and the block
8763          * range is invalid anyway.
8764          */
8765         if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
8766          || ((lba + num_blocks) < lba)) {
8767                 ctl_set_lba_out_of_range(ctsio,
8768                     MAX(lba, lun->be_lun->maxlba + 1));
8769                 ctl_done((union ctl_io *)ctsio);
8770                 return (CTL_RETVAL_COMPLETE);
8771         }
8772
8773         /*
8774          * According to SBC-3, a transfer length of 0 is not an error.
8775          * Note that this cannot happen with WRITE(6) or READ(6), since 0
8776          * translates to 256 blocks for those commands.
8777          */
8778         if (num_blocks == 0) {
8779                 ctl_set_success(ctsio);
8780                 ctl_done((union ctl_io *)ctsio);
8781                 return (CTL_RETVAL_COMPLETE);
8782         }
8783
8784         /* Set FUA and/or DPO if caches are disabled. */
8785         if (isread) {
8786                 if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0)
8787                         flags |= CTL_LLF_FUA | CTL_LLF_DPO;
8788         } else {
8789                 if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0)
8790                         flags |= CTL_LLF_FUA;
8791         }
8792
8793         lbalen = (struct ctl_lba_len_flags *)
8794             &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
8795         lbalen->lba = lba;
8796         lbalen->len = num_blocks;
8797         lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags;
8798
8799         ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize;
8800         ctsio->kern_rel_offset = 0;
8801
8802         CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n"));
8803
8804         retval = lun->backend->data_submit((union ctl_io *)ctsio);
8805         return (retval);
8806 }
8807
8808 static int
8809 ctl_cnw_cont(union ctl_io *io)
8810 {
8811         struct ctl_lun *lun = CTL_LUN(io);
8812         struct ctl_scsiio *ctsio;
8813         struct ctl_lba_len_flags *lbalen;
8814         int retval;
8815
8816         ctsio = &io->scsiio;
8817         ctsio->io_hdr.status = CTL_STATUS_NONE;
8818         ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT;
8819         lbalen = (struct ctl_lba_len_flags *)
8820             &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
8821         lbalen->flags &= ~CTL_LLF_COMPARE;
8822         lbalen->flags |= CTL_LLF_WRITE;
8823
8824         CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n"));
8825         retval = lun->backend->data_submit((union ctl_io *)ctsio);
8826         return (retval);
8827 }
8828
8829 int
8830 ctl_cnw(struct ctl_scsiio *ctsio)
8831 {
8832         struct ctl_lun *lun = CTL_LUN(ctsio);
8833         struct ctl_lba_len_flags *lbalen;
8834         uint64_t lba;
8835         uint32_t num_blocks;
8836         int flags, retval;
8837
8838         CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0]));
8839
8840         flags = 0;
8841         switch (ctsio->cdb[0]) {
8842         case COMPARE_AND_WRITE: {
8843                 struct scsi_compare_and_write *cdb;
8844
8845                 cdb = (struct scsi_compare_and_write *)ctsio->cdb;
8846                 if (cdb->byte2 & SRW10_FUA)
8847                         flags |= CTL_LLF_FUA;
8848                 if (cdb->byte2 & SRW10_DPO)
8849                         flags |= CTL_LLF_DPO;
8850                 lba = scsi_8btou64(cdb->addr);
8851                 num_blocks = cdb->length;
8852                 break;
8853         }
8854         default:
8855                 /*
8856                  * We got a command we don't support.  This shouldn't
8857                  * happen, commands should be filtered out above us.
8858                  */
8859                 ctl_set_invalid_opcode(ctsio);
8860                 ctl_done((union ctl_io *)ctsio);
8861
8862                 return (CTL_RETVAL_COMPLETE);
8863                 break; /* NOTREACHED */
8864         }
8865
8866         /*
8867          * The first check is to make sure we're in bounds, the second
8868          * check is to catch wrap-around problems.  If the lba + num blocks
8869          * is less than the lba, then we've wrapped around and the block
8870          * range is invalid anyway.
8871          */
8872         if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
8873          || ((lba + num_blocks) < lba)) {
8874                 ctl_set_lba_out_of_range(ctsio,
8875                     MAX(lba, lun->be_lun->maxlba + 1));
8876                 ctl_done((union ctl_io *)ctsio);
8877                 return (CTL_RETVAL_COMPLETE);
8878         }
8879
8880         /*
8881          * According to SBC-3, a transfer length of 0 is not an error.
8882          */
8883         if (num_blocks == 0) {
8884                 ctl_set_success(ctsio);
8885                 ctl_done((union ctl_io *)ctsio);
8886                 return (CTL_RETVAL_COMPLETE);
8887         }
8888
8889         /* Set FUA if write cache is disabled. */
8890         if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0)
8891                 flags |= CTL_LLF_FUA;
8892
8893         ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize;
8894         ctsio->kern_rel_offset = 0;
8895
8896         /*
8897          * Set the IO_CONT flag, so that if this I/O gets passed to
8898          * ctl_data_submit_done(), it'll get passed back to
8899          * ctl_ctl_cnw_cont() for further processing.
8900          */
8901         ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT;
8902         ctsio->io_cont = ctl_cnw_cont;
8903
8904         lbalen = (struct ctl_lba_len_flags *)
8905             &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
8906         lbalen->lba = lba;
8907         lbalen->len = num_blocks;
8908         lbalen->flags = CTL_LLF_COMPARE | flags;
8909
8910         CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n"));
8911         retval = lun->backend->data_submit((union ctl_io *)ctsio);
8912         return (retval);
8913 }
8914
8915 int
8916 ctl_verify(struct ctl_scsiio *ctsio)
8917 {
8918         struct ctl_lun *lun = CTL_LUN(ctsio);
8919         struct ctl_lba_len_flags *lbalen;
8920         uint64_t lba;
8921         uint32_t num_blocks;
8922         int bytchk, flags;
8923         int retval;
8924
8925         CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0]));
8926
8927         bytchk = 0;
8928         flags = CTL_LLF_FUA;
8929         switch (ctsio->cdb[0]) {
8930         case VERIFY_10: {
8931                 struct scsi_verify_10 *cdb;
8932
8933                 cdb = (struct scsi_verify_10 *)ctsio->cdb;
8934                 if (cdb->byte2 & SVFY_BYTCHK)
8935                         bytchk = 1;
8936                 if (cdb->byte2 & SVFY_DPO)
8937                         flags |= CTL_LLF_DPO;
8938                 lba = scsi_4btoul(cdb->addr);
8939                 num_blocks = scsi_2btoul(cdb->length);
8940                 break;
8941         }
8942         case VERIFY_12: {
8943                 struct scsi_verify_12 *cdb;
8944
8945                 cdb = (struct scsi_verify_12 *)ctsio->cdb;
8946                 if (cdb->byte2 & SVFY_BYTCHK)
8947                         bytchk = 1;
8948                 if (cdb->byte2 & SVFY_DPO)
8949                         flags |= CTL_LLF_DPO;
8950                 lba = scsi_4btoul(cdb->addr);
8951                 num_blocks = scsi_4btoul(cdb->length);
8952                 break;
8953         }
8954         case VERIFY_16: {
8955                 struct scsi_rw_16 *cdb;
8956
8957                 cdb = (struct scsi_rw_16 *)ctsio->cdb;
8958                 if (cdb->byte2 & SVFY_BYTCHK)
8959                         bytchk = 1;
8960                 if (cdb->byte2 & SVFY_DPO)
8961                         flags |= CTL_LLF_DPO;
8962                 lba = scsi_8btou64(cdb->addr);
8963                 num_blocks = scsi_4btoul(cdb->length);
8964                 break;
8965         }
8966         default:
8967                 /*
8968                  * We got a command we don't support.  This shouldn't
8969                  * happen, commands should be filtered out above us.
8970                  */
8971                 ctl_set_invalid_opcode(ctsio);
8972                 ctl_done((union ctl_io *)ctsio);
8973                 return (CTL_RETVAL_COMPLETE);
8974         }
8975
8976         /*
8977          * The first check is to make sure we're in bounds, the second
8978          * check is to catch wrap-around problems.  If the lba + num blocks
8979          * is less than the lba, then we've wrapped around and the block
8980          * range is invalid anyway.
8981          */
8982         if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
8983          || ((lba + num_blocks) < lba)) {
8984                 ctl_set_lba_out_of_range(ctsio,
8985                     MAX(lba, lun->be_lun->maxlba + 1));
8986                 ctl_done((union ctl_io *)ctsio);
8987                 return (CTL_RETVAL_COMPLETE);
8988         }
8989
8990         /*
8991          * According to SBC-3, a transfer length of 0 is not an error.
8992          */
8993         if (num_blocks == 0) {
8994                 ctl_set_success(ctsio);
8995                 ctl_done((union ctl_io *)ctsio);
8996                 return (CTL_RETVAL_COMPLETE);
8997         }
8998
8999         lbalen = (struct ctl_lba_len_flags *)
9000             &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
9001         lbalen->lba = lba;
9002         lbalen->len = num_blocks;
9003         if (bytchk) {
9004                 lbalen->flags = CTL_LLF_COMPARE | flags;
9005                 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize;
9006         } else {
9007                 lbalen->flags = CTL_LLF_VERIFY | flags;
9008                 ctsio->kern_total_len = 0;
9009         }
9010         ctsio->kern_rel_offset = 0;
9011
9012         CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n"));
9013         retval = lun->backend->data_submit((union ctl_io *)ctsio);
9014         return (retval);
9015 }
9016
9017 int
9018 ctl_report_luns(struct ctl_scsiio *ctsio)
9019 {
9020         struct ctl_softc *softc = CTL_SOFTC(ctsio);
9021         struct ctl_port *port = CTL_PORT(ctsio);
9022         struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio);
9023         struct scsi_report_luns *cdb;
9024         struct scsi_report_luns_data *lun_data;
9025         int num_filled, num_luns, num_port_luns, retval;
9026         uint32_t alloc_len, lun_datalen;
9027         uint32_t initidx, targ_lun_id, lun_id;
9028
9029         retval = CTL_RETVAL_COMPLETE;
9030         cdb = (struct scsi_report_luns *)ctsio->cdb;
9031
9032         CTL_DEBUG_PRINT(("ctl_report_luns\n"));
9033
9034         num_luns = 0;
9035         num_port_luns = port->lun_map ? port->lun_map_size : ctl_max_luns;
9036         mtx_lock(&softc->ctl_lock);
9037         for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) {
9038                 if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX)
9039                         num_luns++;
9040         }
9041         mtx_unlock(&softc->ctl_lock);
9042
9043         switch (cdb->select_report) {
9044         case RPL_REPORT_DEFAULT:
9045         case RPL_REPORT_ALL:
9046         case RPL_REPORT_NONSUBSID:
9047                 break;
9048         case RPL_REPORT_WELLKNOWN:
9049         case RPL_REPORT_ADMIN:
9050         case RPL_REPORT_CONGLOM:
9051                 num_luns = 0;
9052                 break;
9053         default:
9054                 ctl_set_invalid_field(ctsio,
9055                                       /*sks_valid*/ 1,
9056                                       /*command*/ 1,
9057                                       /*field*/ 2,
9058                                       /*bit_valid*/ 0,
9059                                       /*bit*/ 0);
9060                 ctl_done((union ctl_io *)ctsio);
9061                 return (retval);
9062                 break; /* NOTREACHED */
9063         }
9064
9065         alloc_len = scsi_4btoul(cdb->length);
9066         /*
9067          * The initiator has to allocate at least 16 bytes for this request,
9068          * so he can at least get the header and the first LUN.  Otherwise
9069          * we reject the request (per SPC-3 rev 14, section 6.21).
9070          */
9071         if (alloc_len < (sizeof(struct scsi_report_luns_data) +
9072             sizeof(struct scsi_report_luns_lundata))) {
9073                 ctl_set_invalid_field(ctsio,
9074                                       /*sks_valid*/ 1,
9075                                       /*command*/ 1,
9076                                       /*field*/ 6,
9077                                       /*bit_valid*/ 0,
9078                                       /*bit*/ 0);
9079                 ctl_done((union ctl_io *)ctsio);
9080                 return (retval);
9081         }
9082
9083         lun_datalen = sizeof(*lun_data) +
9084                 (num_luns * sizeof(struct scsi_report_luns_lundata));
9085
9086         ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO);
9087         lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr;
9088         ctsio->kern_sg_entries = 0;
9089
9090         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
9091
9092         mtx_lock(&softc->ctl_lock);
9093         for (targ_lun_id = 0, num_filled = 0;
9094             targ_lun_id < num_port_luns && num_filled < num_luns;
9095             targ_lun_id++) {
9096                 lun_id = ctl_lun_map_from_port(port, targ_lun_id);
9097                 if (lun_id == UINT32_MAX)
9098                         continue;
9099                 lun = softc->ctl_luns[lun_id];
9100                 if (lun == NULL)
9101                         continue;
9102
9103                 be64enc(lun_data->luns[num_filled++].lundata,
9104                     ctl_encode_lun(targ_lun_id));
9105
9106                 /*
9107                  * According to SPC-3, rev 14 section 6.21:
9108                  *
9109                  * "The execution of a REPORT LUNS command to any valid and
9110                  * installed logical unit shall clear the REPORTED LUNS DATA
9111                  * HAS CHANGED unit attention condition for all logical
9112                  * units of that target with respect to the requesting
9113                  * initiator. A valid and installed logical unit is one
9114                  * having a PERIPHERAL QUALIFIER of 000b in the standard
9115                  * INQUIRY data (see 6.4.2)."
9116                  *
9117                  * If request_lun is NULL, the LUN this report luns command
9118                  * was issued to is either disabled or doesn't exist. In that
9119                  * case, we shouldn't clear any pending lun change unit
9120                  * attention.
9121                  */
9122                 if (request_lun != NULL) {
9123                         mtx_lock(&lun->lun_lock);
9124                         ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE);
9125                         mtx_unlock(&lun->lun_lock);
9126                 }
9127         }
9128         mtx_unlock(&softc->ctl_lock);
9129
9130         /*
9131          * It's quite possible that we've returned fewer LUNs than we allocated
9132          * space for.  Trim it.
9133          */
9134         lun_datalen = sizeof(*lun_data) +
9135                 (num_filled * sizeof(struct scsi_report_luns_lundata));
9136         ctsio->kern_rel_offset = 0;
9137         ctsio->kern_sg_entries = 0;
9138         ctsio->kern_data_len = min(lun_datalen, alloc_len);
9139         ctsio->kern_total_len = ctsio->kern_data_len;
9140
9141         /*
9142          * We set this to the actual data length, regardless of how much
9143          * space we actually have to return results.  If the user looks at
9144          * this value, he'll know whether or not he allocated enough space
9145          * and reissue the command if necessary.  We don't support well
9146          * known logical units, so if the user asks for that, return none.
9147          */
9148         scsi_ulto4b(lun_datalen - 8, lun_data->length);
9149
9150         /*
9151          * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy
9152          * this request.
9153          */
9154         ctl_set_success(ctsio);
9155         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9156         ctsio->be_move_done = ctl_config_move_done;
9157         ctl_datamove((union ctl_io *)ctsio);
9158         return (retval);
9159 }
9160
9161 int
9162 ctl_request_sense(struct ctl_scsiio *ctsio)
9163 {
9164         struct ctl_softc *softc = CTL_SOFTC(ctsio);
9165         struct ctl_lun *lun = CTL_LUN(ctsio);
9166         struct scsi_request_sense *cdb;
9167         struct scsi_sense_data *sense_ptr, *ps;
9168         uint32_t initidx;
9169         int have_error;
9170         u_int sense_len = SSD_FULL_SIZE;
9171         scsi_sense_data_type sense_format;
9172         ctl_ua_type ua_type;
9173         uint8_t asc = 0, ascq = 0;
9174
9175         cdb = (struct scsi_request_sense *)ctsio->cdb;
9176
9177         CTL_DEBUG_PRINT(("ctl_request_sense\n"));
9178
9179         /*
9180          * Determine which sense format the user wants.
9181          */
9182         if (cdb->byte2 & SRS_DESC)
9183                 sense_format = SSD_TYPE_DESC;
9184         else
9185                 sense_format = SSD_TYPE_FIXED;
9186
9187         ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK);
9188         sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr;
9189         ctsio->kern_sg_entries = 0;
9190         ctsio->kern_rel_offset = 0;
9191
9192         /*
9193          * struct scsi_sense_data, which is currently set to 256 bytes, is
9194          * larger than the largest allowed value for the length field in the
9195          * REQUEST SENSE CDB, which is 252 bytes as of SPC-4.
9196          */
9197         ctsio->kern_data_len = cdb->length;
9198         ctsio->kern_total_len = cdb->length;
9199
9200         /*
9201          * If we don't have a LUN, we don't have any pending sense.
9202          */
9203         if (lun == NULL ||
9204             ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
9205              softc->ha_link < CTL_HA_LINK_UNKNOWN)) {
9206                 /* "Logical unit not supported" */
9207                 ctl_set_sense_data(sense_ptr, &sense_len, NULL, sense_format,
9208                     /*current_error*/ 1,
9209                     /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
9210                     /*asc*/ 0x25,
9211                     /*ascq*/ 0x00,
9212                     SSD_ELEM_NONE);
9213                 goto send;
9214         }
9215
9216         have_error = 0;
9217         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
9218         /*
9219          * Check for pending sense, and then for pending unit attentions.
9220          * Pending sense gets returned first, then pending unit attentions.
9221          */
9222         mtx_lock(&lun->lun_lock);
9223         ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT];
9224         if (ps != NULL)
9225                 ps += initidx % CTL_MAX_INIT_PER_PORT;
9226         if (ps != NULL && ps->error_code != 0) {
9227                 scsi_sense_data_type stored_format;
9228
9229                 /*
9230                  * Check to see which sense format was used for the stored
9231                  * sense data.
9232                  */
9233                 stored_format = scsi_sense_type(ps);
9234
9235                 /*
9236                  * If the user requested a different sense format than the
9237                  * one we stored, then we need to convert it to the other
9238                  * format.  If we're going from descriptor to fixed format
9239                  * sense data, we may lose things in translation, depending
9240                  * on what options were used.
9241                  *
9242                  * If the stored format is SSD_TYPE_NONE (i.e. invalid),
9243                  * for some reason we'll just copy it out as-is.
9244                  */
9245                 if ((stored_format == SSD_TYPE_FIXED)
9246                  && (sense_format == SSD_TYPE_DESC))
9247                         ctl_sense_to_desc((struct scsi_sense_data_fixed *)
9248                             ps, (struct scsi_sense_data_desc *)sense_ptr);
9249                 else if ((stored_format == SSD_TYPE_DESC)
9250                       && (sense_format == SSD_TYPE_FIXED))
9251                         ctl_sense_to_fixed((struct scsi_sense_data_desc *)
9252                             ps, (struct scsi_sense_data_fixed *)sense_ptr);
9253                 else
9254                         memcpy(sense_ptr, ps, sizeof(*sense_ptr));
9255
9256                 ps->error_code = 0;
9257                 have_error = 1;
9258         } else {
9259                 ua_type = ctl_build_ua(lun, initidx, sense_ptr, &sense_len,
9260                     sense_format);
9261                 if (ua_type != CTL_UA_NONE)
9262                         have_error = 1;
9263         }
9264         if (have_error == 0) {
9265                 /*
9266                  * Report informational exception if have one and allowed.
9267                  */
9268                 if (lun->MODE_IE.mrie != SIEP_MRIE_NO) {
9269                         asc = lun->ie_asc;
9270                         ascq = lun->ie_ascq;
9271                 }
9272                 ctl_set_sense_data(sense_ptr, &sense_len, lun, sense_format,
9273                     /*current_error*/ 1,
9274                     /*sense_key*/ SSD_KEY_NO_SENSE,
9275                     /*asc*/ asc,
9276                     /*ascq*/ ascq,
9277                     SSD_ELEM_NONE);
9278         }
9279         mtx_unlock(&lun->lun_lock);
9280
9281 send:
9282         /*
9283          * We report the SCSI status as OK, since the status of the command
9284          * itself is OK.  We're reporting sense as parameter data.
9285          */
9286         ctl_set_success(ctsio);
9287         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9288         ctsio->be_move_done = ctl_config_move_done;
9289         ctl_datamove((union ctl_io *)ctsio);
9290         return (CTL_RETVAL_COMPLETE);
9291 }
9292
9293 int
9294 ctl_tur(struct ctl_scsiio *ctsio)
9295 {
9296
9297         CTL_DEBUG_PRINT(("ctl_tur\n"));
9298
9299         ctl_set_success(ctsio);
9300         ctl_done((union ctl_io *)ctsio);
9301
9302         return (CTL_RETVAL_COMPLETE);
9303 }
9304
9305 /*
9306  * SCSI VPD page 0x00, the Supported VPD Pages page.
9307  */
9308 static int
9309 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len)
9310 {
9311         struct ctl_lun *lun = CTL_LUN(ctsio);
9312         struct scsi_vpd_supported_pages *pages;
9313         int sup_page_size;
9314         int p;
9315
9316         sup_page_size = sizeof(struct scsi_vpd_supported_pages) *
9317             SCSI_EVPD_NUM_SUPPORTED_PAGES;
9318         ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO);
9319         pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr;
9320         ctsio->kern_rel_offset = 0;
9321         ctsio->kern_sg_entries = 0;
9322         ctsio->kern_data_len = min(sup_page_size, alloc_len);
9323         ctsio->kern_total_len = ctsio->kern_data_len;
9324
9325         /*
9326          * The control device is always connected.  The disk device, on the
9327          * other hand, may not be online all the time.  Need to change this
9328          * to figure out whether the disk device is actually online or not.
9329          */
9330         if (lun != NULL)
9331                 pages->device = (SID_QUAL_LU_CONNECTED << 5) |
9332                                 lun->be_lun->lun_type;
9333         else
9334                 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9335
9336         p = 0;
9337         /* Supported VPD pages */
9338         pages->page_list[p++] = SVPD_SUPPORTED_PAGES;
9339         /* Serial Number */
9340         pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER;
9341         /* Device Identification */
9342         pages->page_list[p++] = SVPD_DEVICE_ID;
9343         /* Extended INQUIRY Data */
9344         pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA;
9345         /* Mode Page Policy */
9346         pages->page_list[p++] = SVPD_MODE_PAGE_POLICY;
9347         /* SCSI Ports */
9348         pages->page_list[p++] = SVPD_SCSI_PORTS;
9349         /* Third-party Copy */
9350         pages->page_list[p++] = SVPD_SCSI_TPC;
9351         /* SCSI Feature Sets */
9352         pages->page_list[p++] = SVPD_SCSI_SFS;
9353         if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) {
9354                 /* Block limits */
9355                 pages->page_list[p++] = SVPD_BLOCK_LIMITS;
9356                 /* Block Device Characteristics */
9357                 pages->page_list[p++] = SVPD_BDC;
9358                 /* Logical Block Provisioning */
9359                 pages->page_list[p++] = SVPD_LBP;
9360         }
9361         pages->length = p;
9362
9363         ctl_set_success(ctsio);
9364         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9365         ctsio->be_move_done = ctl_config_move_done;
9366         ctl_datamove((union ctl_io *)ctsio);
9367         return (CTL_RETVAL_COMPLETE);
9368 }
9369
9370 /*
9371  * SCSI VPD page 0x80, the Unit Serial Number page.
9372  */
9373 static int
9374 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
9375 {
9376         struct ctl_lun *lun = CTL_LUN(ctsio);
9377         struct scsi_vpd_unit_serial_number *sn_ptr;
9378         int data_len;
9379
9380         data_len = 4 + CTL_SN_LEN;
9381         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9382         sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr;
9383         ctsio->kern_rel_offset = 0;
9384         ctsio->kern_sg_entries = 0;
9385         ctsio->kern_data_len = min(data_len, alloc_len);
9386         ctsio->kern_total_len = ctsio->kern_data_len;
9387
9388         /*
9389          * The control device is always connected.  The disk device, on the
9390          * other hand, may not be online all the time.  Need to change this
9391          * to figure out whether the disk device is actually online or not.
9392          */
9393         if (lun != NULL)
9394                 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9395                                   lun->be_lun->lun_type;
9396         else
9397                 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9398
9399         sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER;
9400         sn_ptr->length = CTL_SN_LEN;
9401         /*
9402          * If we don't have a LUN, we just leave the serial number as
9403          * all spaces.
9404          */
9405         if (lun != NULL) {
9406                 strncpy((char *)sn_ptr->serial_num,
9407                         (char *)lun->be_lun->serial_num, CTL_SN_LEN);
9408         } else
9409                 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN);
9410
9411         ctl_set_success(ctsio);
9412         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9413         ctsio->be_move_done = ctl_config_move_done;
9414         ctl_datamove((union ctl_io *)ctsio);
9415         return (CTL_RETVAL_COMPLETE);
9416 }
9417
9418
9419 /*
9420  * SCSI VPD page 0x86, the Extended INQUIRY Data page.
9421  */
9422 static int
9423 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len)
9424 {
9425         struct ctl_lun *lun = CTL_LUN(ctsio);
9426         struct scsi_vpd_extended_inquiry_data *eid_ptr;
9427         int data_len;
9428
9429         data_len = sizeof(struct scsi_vpd_extended_inquiry_data);
9430         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9431         eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr;
9432         ctsio->kern_sg_entries = 0;
9433         ctsio->kern_rel_offset = 0;
9434         ctsio->kern_data_len = min(data_len, alloc_len);
9435         ctsio->kern_total_len = ctsio->kern_data_len;
9436
9437         /*
9438          * The control device is always connected.  The disk device, on the
9439          * other hand, may not be online all the time.
9440          */
9441         if (lun != NULL)
9442                 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9443                                      lun->be_lun->lun_type;
9444         else
9445                 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9446         eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA;
9447         scsi_ulto2b(data_len - 4, eid_ptr->page_length);
9448         /*
9449          * We support head of queue, ordered and simple tags.
9450          */
9451         eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP;
9452         /*
9453          * Volatile cache supported.
9454          */
9455         eid_ptr->flags3 = SVPD_EID_V_SUP;
9456
9457         /*
9458          * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit
9459          * attention for a particular IT nexus on all LUNs once we report
9460          * it to that nexus once.  This bit is required as of SPC-4.
9461          */
9462         eid_ptr->flags4 = SVPD_EID_LUICLR;
9463
9464         /*
9465          * We support revert to defaults (RTD) bit in MODE SELECT.
9466          */
9467         eid_ptr->flags5 = SVPD_EID_RTD_SUP;
9468
9469         /*
9470          * XXX KDM in order to correctly answer this, we would need
9471          * information from the SIM to determine how much sense data it
9472          * can send.  So this would really be a path inquiry field, most
9473          * likely.  This can be set to a maximum of 252 according to SPC-4,
9474          * but the hardware may or may not be able to support that much.
9475          * 0 just means that the maximum sense data length is not reported.
9476          */
9477         eid_ptr->max_sense_length = 0;
9478
9479         ctl_set_success(ctsio);
9480         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9481         ctsio->be_move_done = ctl_config_move_done;
9482         ctl_datamove((union ctl_io *)ctsio);
9483         return (CTL_RETVAL_COMPLETE);
9484 }
9485
9486 static int
9487 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len)
9488 {
9489         struct ctl_lun *lun = CTL_LUN(ctsio);
9490         struct scsi_vpd_mode_page_policy *mpp_ptr;
9491         int data_len;
9492
9493         data_len = sizeof(struct scsi_vpd_mode_page_policy) +
9494             sizeof(struct scsi_vpd_mode_page_policy_descr);
9495
9496         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9497         mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr;
9498         ctsio->kern_rel_offset = 0;
9499         ctsio->kern_sg_entries = 0;
9500         ctsio->kern_data_len = min(data_len, alloc_len);
9501         ctsio->kern_total_len = ctsio->kern_data_len;
9502
9503         /*
9504          * The control device is always connected.  The disk device, on the
9505          * other hand, may not be online all the time.
9506          */
9507         if (lun != NULL)
9508                 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9509                                      lun->be_lun->lun_type;
9510         else
9511                 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9512         mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY;
9513         scsi_ulto2b(data_len - 4, mpp_ptr->page_length);
9514         mpp_ptr->descr[0].page_code = 0x3f;
9515         mpp_ptr->descr[0].subpage_code = 0xff;
9516         mpp_ptr->descr[0].policy = SVPD_MPP_SHARED;
9517
9518         ctl_set_success(ctsio);
9519         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9520         ctsio->be_move_done = ctl_config_move_done;
9521         ctl_datamove((union ctl_io *)ctsio);
9522         return (CTL_RETVAL_COMPLETE);
9523 }
9524
9525 /*
9526  * SCSI VPD page 0x83, the Device Identification page.
9527  */
9528 static int
9529 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
9530 {
9531         struct ctl_softc *softc = CTL_SOFTC(ctsio);
9532         struct ctl_port *port = CTL_PORT(ctsio);
9533         struct ctl_lun *lun = CTL_LUN(ctsio);
9534         struct scsi_vpd_device_id *devid_ptr;
9535         struct scsi_vpd_id_descriptor *desc;
9536         int data_len, g;
9537         uint8_t proto;
9538
9539         data_len = sizeof(struct scsi_vpd_device_id) +
9540             sizeof(struct scsi_vpd_id_descriptor) +
9541                 sizeof(struct scsi_vpd_id_rel_trgt_port_id) +
9542             sizeof(struct scsi_vpd_id_descriptor) +
9543                 sizeof(struct scsi_vpd_id_trgt_port_grp_id);
9544         if (lun && lun->lun_devid)
9545                 data_len += lun->lun_devid->len;
9546         if (port && port->port_devid)
9547                 data_len += port->port_devid->len;
9548         if (port && port->target_devid)
9549                 data_len += port->target_devid->len;
9550
9551         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9552         devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr;
9553         ctsio->kern_sg_entries = 0;
9554         ctsio->kern_rel_offset = 0;
9555         ctsio->kern_sg_entries = 0;
9556         ctsio->kern_data_len = min(data_len, alloc_len);
9557         ctsio->kern_total_len = ctsio->kern_data_len;
9558
9559         /*
9560          * The control device is always connected.  The disk device, on the
9561          * other hand, may not be online all the time.
9562          */
9563         if (lun != NULL)
9564                 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9565                                      lun->be_lun->lun_type;
9566         else
9567                 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9568         devid_ptr->page_code = SVPD_DEVICE_ID;
9569         scsi_ulto2b(data_len - 4, devid_ptr->length);
9570
9571         if (port && port->port_type == CTL_PORT_FC)
9572                 proto = SCSI_PROTO_FC << 4;
9573         else if (port && port->port_type == CTL_PORT_SAS)
9574                 proto = SCSI_PROTO_SAS << 4;
9575         else if (port && port->port_type == CTL_PORT_ISCSI)
9576                 proto = SCSI_PROTO_ISCSI << 4;
9577         else
9578                 proto = SCSI_PROTO_SPI << 4;
9579         desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list;
9580
9581         /*
9582          * We're using a LUN association here.  i.e., this device ID is a
9583          * per-LUN identifier.
9584          */
9585         if (lun && lun->lun_devid) {
9586                 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len);
9587                 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc +
9588                     lun->lun_devid->len);
9589         }
9590
9591         /*
9592          * This is for the WWPN which is a port association.
9593          */
9594         if (port && port->port_devid) {
9595                 memcpy(desc, port->port_devid->data, port->port_devid->len);
9596                 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc +
9597                     port->port_devid->len);
9598         }
9599
9600         /*
9601          * This is for the Relative Target Port(type 4h) identifier
9602          */
9603         desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
9604         desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
9605             SVPD_ID_TYPE_RELTARG;
9606         desc->length = 4;
9607         scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]);
9608         desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
9609             sizeof(struct scsi_vpd_id_rel_trgt_port_id));
9610
9611         /*
9612          * This is for the Target Port Group(type 5h) identifier
9613          */
9614         desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
9615         desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
9616             SVPD_ID_TYPE_TPORTGRP;
9617         desc->length = 4;
9618         if (softc->is_single ||
9619             (port && port->status & CTL_PORT_STATUS_HA_SHARED))
9620                 g = 1;
9621         else
9622                 g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt;
9623         scsi_ulto2b(g, &desc->identifier[2]);
9624         desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
9625             sizeof(struct scsi_vpd_id_trgt_port_grp_id));
9626
9627         /*
9628          * This is for the Target identifier
9629          */
9630         if (port && port->target_devid) {
9631                 memcpy(desc, port->target_devid->data, port->target_devid->len);
9632         }
9633
9634         ctl_set_success(ctsio);
9635         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9636         ctsio->be_move_done = ctl_config_move_done;
9637         ctl_datamove((union ctl_io *)ctsio);
9638         return (CTL_RETVAL_COMPLETE);
9639 }
9640
9641 static int
9642 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
9643 {
9644         struct ctl_softc *softc = CTL_SOFTC(ctsio);
9645         struct ctl_lun *lun = CTL_LUN(ctsio);
9646         struct scsi_vpd_scsi_ports *sp;
9647         struct scsi_vpd_port_designation *pd;
9648         struct scsi_vpd_port_designation_cont *pdc;
9649         struct ctl_port *port;
9650         int data_len, num_target_ports, iid_len, id_len;
9651
9652         num_target_ports = 0;
9653         iid_len = 0;
9654         id_len = 0;
9655         mtx_lock(&softc->ctl_lock);
9656         STAILQ_FOREACH(port, &softc->port_list, links) {
9657                 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
9658                         continue;
9659                 if (lun != NULL &&
9660                     ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
9661                         continue;
9662                 num_target_ports++;
9663                 if (port->init_devid)
9664                         iid_len += port->init_devid->len;
9665                 if (port->port_devid)
9666                         id_len += port->port_devid->len;
9667         }
9668         mtx_unlock(&softc->ctl_lock);
9669
9670         data_len = sizeof(struct scsi_vpd_scsi_ports) +
9671             num_target_ports * (sizeof(struct scsi_vpd_port_designation) +
9672              sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len;
9673         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9674         sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr;
9675         ctsio->kern_sg_entries = 0;
9676         ctsio->kern_rel_offset = 0;
9677         ctsio->kern_sg_entries = 0;
9678         ctsio->kern_data_len = min(data_len, alloc_len);
9679         ctsio->kern_total_len = ctsio->kern_data_len;
9680
9681         /*
9682          * The control device is always connected.  The disk device, on the
9683          * other hand, may not be online all the time.  Need to change this
9684          * to figure out whether the disk device is actually online or not.
9685          */
9686         if (lun != NULL)
9687                 sp->device = (SID_QUAL_LU_CONNECTED << 5) |
9688                                   lun->be_lun->lun_type;
9689         else
9690                 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9691
9692         sp->page_code = SVPD_SCSI_PORTS;
9693         scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports),
9694             sp->page_length);
9695         pd = &sp->design[0];
9696
9697         mtx_lock(&softc->ctl_lock);
9698         STAILQ_FOREACH(port, &softc->port_list, links) {
9699                 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
9700                         continue;
9701                 if (lun != NULL &&
9702                     ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
9703                         continue;
9704                 scsi_ulto2b(port->targ_port, pd->relative_port_id);
9705                 if (port->init_devid) {
9706                         iid_len = port->init_devid->len;
9707                         memcpy(pd->initiator_transportid,
9708                             port->init_devid->data, port->init_devid->len);
9709                 } else
9710                         iid_len = 0;
9711                 scsi_ulto2b(iid_len, pd->initiator_transportid_length);
9712                 pdc = (struct scsi_vpd_port_designation_cont *)
9713                     (&pd->initiator_transportid[iid_len]);
9714                 if (port->port_devid) {
9715                         id_len = port->port_devid->len;
9716                         memcpy(pdc->target_port_descriptors,
9717                             port->port_devid->data, port->port_devid->len);
9718                 } else
9719                         id_len = 0;
9720                 scsi_ulto2b(id_len, pdc->target_port_descriptors_length);
9721                 pd = (struct scsi_vpd_port_designation *)
9722                     ((uint8_t *)pdc->target_port_descriptors + id_len);
9723         }
9724         mtx_unlock(&softc->ctl_lock);
9725
9726         ctl_set_success(ctsio);
9727         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9728         ctsio->be_move_done = ctl_config_move_done;
9729         ctl_datamove((union ctl_io *)ctsio);
9730         return (CTL_RETVAL_COMPLETE);
9731 }
9732
9733 static int
9734 ctl_inquiry_evpd_sfs(struct ctl_scsiio *ctsio, int alloc_len)
9735 {
9736         struct ctl_lun *lun = CTL_LUN(ctsio);
9737         struct scsi_vpd_sfs *sfs_ptr;
9738         int sfs_page_size, n;
9739
9740         sfs_page_size = sizeof(*sfs_ptr) + 5 * 2;
9741         ctsio->kern_data_ptr = malloc(sfs_page_size, M_CTL, M_WAITOK | M_ZERO);
9742         sfs_ptr = (struct scsi_vpd_sfs *)ctsio->kern_data_ptr;
9743         ctsio->kern_sg_entries = 0;
9744         ctsio->kern_rel_offset = 0;
9745         ctsio->kern_sg_entries = 0;
9746         ctsio->kern_data_len = min(sfs_page_size, alloc_len);
9747         ctsio->kern_total_len = ctsio->kern_data_len;
9748
9749         /*
9750          * The control device is always connected.  The disk device, on the
9751          * other hand, may not be online all the time.  Need to change this
9752          * to figure out whether the disk device is actually online or not.
9753          */
9754         if (lun != NULL)
9755                 sfs_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9756                                   lun->be_lun->lun_type;
9757         else
9758                 sfs_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9759
9760         sfs_ptr->page_code = SVPD_SCSI_SFS;
9761         n = 0;
9762         /* Discovery 2016 */
9763         scsi_ulto2b(0x0001, &sfs_ptr->codes[2 * n++]);
9764         if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) {
9765                  /* SBC Base 2016 */
9766                 scsi_ulto2b(0x0101, &sfs_ptr->codes[2 * n++]);
9767                  /* SBC Base 2010 */
9768                 scsi_ulto2b(0x0102, &sfs_ptr->codes[2 * n++]);
9769                 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
9770                         /* Basic Provisioning 2016 */
9771                         scsi_ulto2b(0x0103, &sfs_ptr->codes[2 * n++]);
9772                 }
9773                 /* Drive Maintenance 2016 */
9774                 //scsi_ulto2b(0x0104, &sfs_ptr->codes[2 * n++]);
9775         }
9776         scsi_ulto2b(4 + 2 * n, sfs_ptr->page_length);
9777
9778         ctl_set_success(ctsio);
9779         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9780         ctsio->be_move_done = ctl_config_move_done;
9781         ctl_datamove((union ctl_io *)ctsio);
9782         return (CTL_RETVAL_COMPLETE);
9783 }
9784
9785 static int
9786 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len)
9787 {
9788         struct ctl_lun *lun = CTL_LUN(ctsio);
9789         struct scsi_vpd_block_limits *bl_ptr;
9790         const char *val;
9791         uint64_t ival;
9792
9793         ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO);
9794         bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr;
9795         ctsio->kern_sg_entries = 0;
9796         ctsio->kern_rel_offset = 0;
9797         ctsio->kern_sg_entries = 0;
9798         ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len);
9799         ctsio->kern_total_len = ctsio->kern_data_len;
9800
9801         /*
9802          * The control device is always connected.  The disk device, on the
9803          * other hand, may not be online all the time.  Need to change this
9804          * to figure out whether the disk device is actually online or not.
9805          */
9806         if (lun != NULL)
9807                 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9808                                   lun->be_lun->lun_type;
9809         else
9810                 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9811
9812         bl_ptr->page_code = SVPD_BLOCK_LIMITS;
9813         scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length);
9814         bl_ptr->max_cmp_write_len = 0xff;
9815         scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len);
9816         if (lun != NULL) {
9817                 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len);
9818                 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
9819                         ival = 0xffffffff;
9820                         val = dnvlist_get_string(lun->be_lun->options,
9821                             "unmap_max_lba", NULL);
9822                         if (val != NULL)
9823                                 ctl_expand_number(val, &ival);
9824                         scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt);
9825                         ival = 0xffffffff;
9826                         val = dnvlist_get_string(lun->be_lun->options,
9827                             "unmap_max_descr", NULL);
9828                         if (val != NULL)
9829                                 ctl_expand_number(val, &ival);
9830                         scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt);
9831                         if (lun->be_lun->ublockexp != 0) {
9832                                 scsi_ulto4b((1 << lun->be_lun->ublockexp),
9833                                     bl_ptr->opt_unmap_grain);
9834                                 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff,
9835                                     bl_ptr->unmap_grain_align);
9836                         }
9837                 }
9838                 scsi_ulto4b(lun->be_lun->atomicblock,
9839                     bl_ptr->max_atomic_transfer_length);
9840                 scsi_ulto4b(0, bl_ptr->atomic_alignment);
9841                 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity);
9842                 scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary);
9843                 scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size);
9844                 ival = UINT64_MAX;
9845                 val = dnvlist_get_string(lun->be_lun->options,
9846                     "write_same_max_lba", NULL);
9847                 if (val != NULL)
9848                         ctl_expand_number(val, &ival);
9849                 scsi_u64to8b(ival, bl_ptr->max_write_same_length);
9850                 if (lun->be_lun->maxlba + 1 > ival)
9851                         bl_ptr->flags |= SVPD_BL_WSNZ;
9852         }
9853
9854         ctl_set_success(ctsio);
9855         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9856         ctsio->be_move_done = ctl_config_move_done;
9857         ctl_datamove((union ctl_io *)ctsio);
9858         return (CTL_RETVAL_COMPLETE);
9859 }
9860
9861 static int
9862 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len)
9863 {
9864         struct ctl_lun *lun = CTL_LUN(ctsio);
9865         struct scsi_vpd_block_device_characteristics *bdc_ptr;
9866         const char *value;
9867         u_int i;
9868
9869         ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO);
9870         bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr;
9871         ctsio->kern_sg_entries = 0;
9872         ctsio->kern_rel_offset = 0;
9873         ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len);
9874         ctsio->kern_total_len = ctsio->kern_data_len;
9875
9876         /*
9877          * The control device is always connected.  The disk device, on the
9878          * other hand, may not be online all the time.  Need to change this
9879          * to figure out whether the disk device is actually online or not.
9880          */
9881         if (lun != NULL)
9882                 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9883                                   lun->be_lun->lun_type;
9884         else
9885                 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9886         bdc_ptr->page_code = SVPD_BDC;
9887         scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length);
9888         if (lun != NULL &&
9889             (value = dnvlist_get_string(lun->be_lun->options, "rpm", NULL)) != NULL)
9890                 i = strtol(value, NULL, 0);
9891         else
9892                 i = CTL_DEFAULT_ROTATION_RATE;
9893         scsi_ulto2b(i, bdc_ptr->medium_rotation_rate);
9894         if (lun != NULL &&
9895             (value = dnvlist_get_string(lun->be_lun->options, "formfactor", NULL)) != NULL)
9896                 i = strtol(value, NULL, 0);
9897         else
9898                 i = 0;
9899         bdc_ptr->wab_wac_ff = (i & 0x0f);
9900         bdc_ptr->flags = SVPD_RBWZ | SVPD_FUAB | SVPD_VBULS;
9901
9902         ctl_set_success(ctsio);
9903         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9904         ctsio->be_move_done = ctl_config_move_done;
9905         ctl_datamove((union ctl_io *)ctsio);
9906         return (CTL_RETVAL_COMPLETE);
9907 }
9908
9909 static int
9910 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len)
9911 {
9912         struct ctl_lun *lun = CTL_LUN(ctsio);
9913         struct scsi_vpd_logical_block_prov *lbp_ptr;
9914         const char *value;
9915
9916         ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO);
9917         lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr;
9918         ctsio->kern_sg_entries = 0;
9919         ctsio->kern_rel_offset = 0;
9920         ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len);
9921         ctsio->kern_total_len = ctsio->kern_data_len;
9922
9923         /*
9924          * The control device is always connected.  The disk device, on the
9925          * other hand, may not be online all the time.  Need to change this
9926          * to figure out whether the disk device is actually online or not.
9927          */
9928         if (lun != NULL)
9929                 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9930                                   lun->be_lun->lun_type;
9931         else
9932                 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9933
9934         lbp_ptr->page_code = SVPD_LBP;
9935         scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length);
9936         lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT;
9937         if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
9938                 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 |
9939                     SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP;
9940                 value = dnvlist_get_string(lun->be_lun->options,
9941                     "provisioning_type", NULL);
9942                 if (value != NULL) {
9943                         if (strcmp(value, "resource") == 0)
9944                                 lbp_ptr->prov_type = SVPD_LBP_RESOURCE;
9945                         else if (strcmp(value, "thin") == 0)
9946                                 lbp_ptr->prov_type = SVPD_LBP_THIN;
9947                 } else
9948                         lbp_ptr->prov_type = SVPD_LBP_THIN;
9949         }
9950
9951         ctl_set_success(ctsio);
9952         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9953         ctsio->be_move_done = ctl_config_move_done;
9954         ctl_datamove((union ctl_io *)ctsio);
9955         return (CTL_RETVAL_COMPLETE);
9956 }
9957
9958 /*
9959  * INQUIRY with the EVPD bit set.
9960  */
9961 static int
9962 ctl_inquiry_evpd(struct ctl_scsiio *ctsio)
9963 {
9964         struct ctl_lun *lun = CTL_LUN(ctsio);
9965         struct scsi_inquiry *cdb;
9966         int alloc_len, retval;
9967
9968         cdb = (struct scsi_inquiry *)ctsio->cdb;
9969         alloc_len = scsi_2btoul(cdb->length);
9970
9971         switch (cdb->page_code) {
9972         case SVPD_SUPPORTED_PAGES:
9973                 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len);
9974                 break;
9975         case SVPD_UNIT_SERIAL_NUMBER:
9976                 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len);
9977                 break;
9978         case SVPD_DEVICE_ID:
9979                 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len);
9980                 break;
9981         case SVPD_EXTENDED_INQUIRY_DATA:
9982                 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len);
9983                 break;
9984         case SVPD_MODE_PAGE_POLICY:
9985                 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len);
9986                 break;
9987         case SVPD_SCSI_PORTS:
9988                 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len);
9989                 break;
9990         case SVPD_SCSI_TPC:
9991                 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len);
9992                 break;
9993         case SVPD_SCSI_SFS:
9994                 retval = ctl_inquiry_evpd_sfs(ctsio, alloc_len);
9995                 break;
9996         case SVPD_BLOCK_LIMITS:
9997                 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT)
9998                         goto err;
9999                 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len);
10000                 break;
10001         case SVPD_BDC:
10002                 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT)
10003                         goto err;
10004                 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len);
10005                 break;
10006         case SVPD_LBP:
10007                 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT)
10008                         goto err;
10009                 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len);
10010                 break;
10011         default:
10012 err:
10013                 ctl_set_invalid_field(ctsio,
10014                                       /*sks_valid*/ 1,
10015                                       /*command*/ 1,
10016                                       /*field*/ 2,
10017                                       /*bit_valid*/ 0,
10018                                       /*bit*/ 0);
10019                 ctl_done((union ctl_io *)ctsio);
10020                 retval = CTL_RETVAL_COMPLETE;
10021                 break;
10022         }
10023
10024         return (retval);
10025 }
10026
10027 /*
10028  * Standard INQUIRY data.
10029  */
10030 static int
10031 ctl_inquiry_std(struct ctl_scsiio *ctsio)
10032 {
10033         struct ctl_softc *softc = CTL_SOFTC(ctsio);
10034         struct ctl_port *port = CTL_PORT(ctsio);
10035         struct ctl_lun *lun = CTL_LUN(ctsio);
10036         struct scsi_inquiry_data *inq_ptr;
10037         struct scsi_inquiry *cdb;
10038         const char *val;
10039         uint32_t alloc_len, data_len;
10040         ctl_port_type port_type;
10041
10042         port_type = port->port_type;
10043         if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL)
10044                 port_type = CTL_PORT_SCSI;
10045
10046         cdb = (struct scsi_inquiry *)ctsio->cdb;
10047         alloc_len = scsi_2btoul(cdb->length);
10048
10049         /*
10050          * We malloc the full inquiry data size here and fill it
10051          * in.  If the user only asks for less, we'll give him
10052          * that much.
10053          */
10054         data_len = offsetof(struct scsi_inquiry_data, vendor_specific1);
10055         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10056         inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr;
10057         ctsio->kern_sg_entries = 0;
10058         ctsio->kern_rel_offset = 0;
10059         ctsio->kern_data_len = min(data_len, alloc_len);
10060         ctsio->kern_total_len = ctsio->kern_data_len;
10061
10062         if (lun != NULL) {
10063                 if ((lun->flags & CTL_LUN_PRIMARY_SC) ||
10064                     softc->ha_link >= CTL_HA_LINK_UNKNOWN) {
10065                         inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
10066                             lun->be_lun->lun_type;
10067                 } else {
10068                         inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) |
10069                             lun->be_lun->lun_type;
10070                 }
10071                 if (lun->flags & CTL_LUN_REMOVABLE)
10072                         inq_ptr->dev_qual2 |= SID_RMB;
10073         } else
10074                 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE;
10075
10076         /* RMB in byte 2 is 0 */
10077         inq_ptr->version = SCSI_REV_SPC5;
10078
10079         /*
10080          * According to SAM-3, even if a device only supports a single
10081          * level of LUN addressing, it should still set the HISUP bit:
10082          *
10083          * 4.9.1 Logical unit numbers overview
10084          *
10085          * All logical unit number formats described in this standard are
10086          * hierarchical in structure even when only a single level in that
10087          * hierarchy is used. The HISUP bit shall be set to one in the
10088          * standard INQUIRY data (see SPC-2) when any logical unit number
10089          * format described in this standard is used.  Non-hierarchical
10090          * formats are outside the scope of this standard.
10091          *
10092          * Therefore we set the HiSup bit here.
10093          *
10094          * The response format is 2, per SPC-3.
10095          */
10096         inq_ptr->response_format = SID_HiSup | 2;
10097
10098         inq_ptr->additional_length = data_len -
10099             (offsetof(struct scsi_inquiry_data, additional_length) + 1);
10100         CTL_DEBUG_PRINT(("additional_length = %d\n",
10101                          inq_ptr->additional_length));
10102
10103         inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT;
10104         if (port_type == CTL_PORT_SCSI)
10105                 inq_ptr->spc2_flags = SPC2_SID_ADDR16;
10106         inq_ptr->spc2_flags |= SPC2_SID_MultiP;
10107         inq_ptr->flags = SID_CmdQue;
10108         if (port_type == CTL_PORT_SCSI)
10109                 inq_ptr->flags |= SID_WBus16 | SID_Sync;
10110
10111         /*
10112          * Per SPC-3, unused bytes in ASCII strings are filled with spaces.
10113          * We have 8 bytes for the vendor name, and 16 bytes for the device
10114          * name and 4 bytes for the revision.
10115          */
10116         if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options,
10117             "vendor", NULL)) == NULL) {
10118                 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor));
10119         } else {
10120                 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor));
10121                 strncpy(inq_ptr->vendor, val,
10122                     min(sizeof(inq_ptr->vendor), strlen(val)));
10123         }
10124         if (lun == NULL) {
10125                 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT,
10126                     sizeof(inq_ptr->product));
10127         } else if ((val = dnvlist_get_string(lun->be_lun->options, "product",
10128             NULL)) == NULL) {
10129                 switch (lun->be_lun->lun_type) {
10130                 case T_DIRECT:
10131                         strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT,
10132                             sizeof(inq_ptr->product));
10133                         break;
10134                 case T_PROCESSOR:
10135                         strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT,
10136                             sizeof(inq_ptr->product));
10137                         break;
10138                 case T_CDROM:
10139                         strncpy(inq_ptr->product, CTL_CDROM_PRODUCT,
10140                             sizeof(inq_ptr->product));
10141                         break;
10142                 default:
10143                         strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT,
10144                             sizeof(inq_ptr->product));
10145                         break;
10146                 }
10147         } else {
10148                 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product));
10149                 strncpy(inq_ptr->product, val,
10150                     min(sizeof(inq_ptr->product), strlen(val)));
10151         }
10152
10153         /*
10154          * XXX make this a macro somewhere so it automatically gets
10155          * incremented when we make changes.
10156          */
10157         if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options,
10158             "revision", NULL)) == NULL) {
10159                 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision));
10160         } else {
10161                 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision));
10162                 strncpy(inq_ptr->revision, val,
10163                     min(sizeof(inq_ptr->revision), strlen(val)));
10164         }
10165
10166         /*
10167          * For parallel SCSI, we support double transition and single
10168          * transition clocking.  We also support QAS (Quick Arbitration
10169          * and Selection) and Information Unit transfers on both the
10170          * control and array devices.
10171          */
10172         if (port_type == CTL_PORT_SCSI)
10173                 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS |
10174                                     SID_SPI_IUS;
10175
10176         /* SAM-6 (no version claimed) */
10177         scsi_ulto2b(0x00C0, inq_ptr->version1);
10178         /* SPC-5 (no version claimed) */
10179         scsi_ulto2b(0x05C0, inq_ptr->version2);
10180         if (port_type == CTL_PORT_FC) {
10181                 /* FCP-2 ANSI INCITS.350:2003 */
10182                 scsi_ulto2b(0x0917, inq_ptr->version3);
10183         } else if (port_type == CTL_PORT_SCSI) {
10184                 /* SPI-4 ANSI INCITS.362:200x */
10185                 scsi_ulto2b(0x0B56, inq_ptr->version3);
10186         } else if (port_type == CTL_PORT_ISCSI) {
10187                 /* iSCSI (no version claimed) */
10188                 scsi_ulto2b(0x0960, inq_ptr->version3);
10189         } else if (port_type == CTL_PORT_SAS) {
10190                 /* SAS (no version claimed) */
10191                 scsi_ulto2b(0x0BE0, inq_ptr->version3);
10192         } else if (port_type == CTL_PORT_UMASS) {
10193                 /* USB Mass Storage Class Bulk-Only Transport, Revision 1.0 */
10194                 scsi_ulto2b(0x1730, inq_ptr->version3);
10195         }
10196
10197         if (lun == NULL) {
10198                 /* SBC-4 (no version claimed) */
10199                 scsi_ulto2b(0x0600, inq_ptr->version4);
10200         } else {
10201                 switch (lun->be_lun->lun_type) {
10202                 case T_DIRECT:
10203                         /* SBC-4 (no version claimed) */
10204                         scsi_ulto2b(0x0600, inq_ptr->version4);
10205                         break;
10206                 case T_PROCESSOR:
10207                         break;
10208                 case T_CDROM:
10209                         /* MMC-6 (no version claimed) */
10210                         scsi_ulto2b(0x04E0, inq_ptr->version4);
10211                         break;
10212                 default:
10213                         break;
10214                 }
10215         }
10216
10217         ctl_set_success(ctsio);
10218         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10219         ctsio->be_move_done = ctl_config_move_done;
10220         ctl_datamove((union ctl_io *)ctsio);
10221         return (CTL_RETVAL_COMPLETE);
10222 }
10223
10224 int
10225 ctl_inquiry(struct ctl_scsiio *ctsio)
10226 {
10227         struct scsi_inquiry *cdb;
10228         int retval;
10229
10230         CTL_DEBUG_PRINT(("ctl_inquiry\n"));
10231
10232         cdb = (struct scsi_inquiry *)ctsio->cdb;
10233         if (cdb->byte2 & SI_EVPD)
10234                 retval = ctl_inquiry_evpd(ctsio);
10235         else if (cdb->page_code == 0)
10236                 retval = ctl_inquiry_std(ctsio);
10237         else {
10238                 ctl_set_invalid_field(ctsio,
10239                                       /*sks_valid*/ 1,
10240                                       /*command*/ 1,
10241                                       /*field*/ 2,
10242                                       /*bit_valid*/ 0,
10243                                       /*bit*/ 0);
10244                 ctl_done((union ctl_io *)ctsio);
10245                 return (CTL_RETVAL_COMPLETE);
10246         }
10247
10248         return (retval);
10249 }
10250
10251 int
10252 ctl_get_config(struct ctl_scsiio *ctsio)
10253 {
10254         struct ctl_lun *lun = CTL_LUN(ctsio);
10255         struct scsi_get_config_header *hdr;
10256         struct scsi_get_config_feature *feature;
10257         struct scsi_get_config *cdb;
10258         uint32_t alloc_len, data_len;
10259         int rt, starting;
10260
10261         cdb = (struct scsi_get_config *)ctsio->cdb;
10262         rt = (cdb->rt & SGC_RT_MASK);
10263         starting = scsi_2btoul(cdb->starting_feature);
10264         alloc_len = scsi_2btoul(cdb->length);
10265
10266         data_len = sizeof(struct scsi_get_config_header) +
10267             sizeof(struct scsi_get_config_feature) + 8 +
10268             sizeof(struct scsi_get_config_feature) + 8 +
10269             sizeof(struct scsi_get_config_feature) + 4 +
10270             sizeof(struct scsi_get_config_feature) + 4 +
10271             sizeof(struct scsi_get_config_feature) + 8 +
10272             sizeof(struct scsi_get_config_feature) +
10273             sizeof(struct scsi_get_config_feature) + 4 +
10274             sizeof(struct scsi_get_config_feature) + 4 +
10275             sizeof(struct scsi_get_config_feature) + 4 +
10276             sizeof(struct scsi_get_config_feature) + 4 +
10277             sizeof(struct scsi_get_config_feature) + 4 +
10278             sizeof(struct scsi_get_config_feature) + 4;
10279         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10280         ctsio->kern_sg_entries = 0;
10281         ctsio->kern_rel_offset = 0;
10282
10283         hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr;
10284         if (lun->flags & CTL_LUN_NO_MEDIA)
10285                 scsi_ulto2b(0x0000, hdr->current_profile);
10286         else
10287                 scsi_ulto2b(0x0010, hdr->current_profile);
10288         feature = (struct scsi_get_config_feature *)(hdr + 1);
10289
10290         if (starting > 0x003b)
10291                 goto done;
10292         if (starting > 0x003a)
10293                 goto f3b;
10294         if (starting > 0x002b)
10295                 goto f3a;
10296         if (starting > 0x002a)
10297                 goto f2b;
10298         if (starting > 0x001f)
10299                 goto f2a;
10300         if (starting > 0x001e)
10301                 goto f1f;
10302         if (starting > 0x001d)
10303                 goto f1e;
10304         if (starting > 0x0010)
10305                 goto f1d;
10306         if (starting > 0x0003)
10307                 goto f10;
10308         if (starting > 0x0002)
10309                 goto f3;
10310         if (starting > 0x0001)
10311                 goto f2;
10312         if (starting > 0x0000)
10313                 goto f1;
10314
10315         /* Profile List */
10316         scsi_ulto2b(0x0000, feature->feature_code);
10317         feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT;
10318         feature->add_length = 8;
10319         scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */
10320         feature->feature_data[2] = 0x00;
10321         scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */
10322         feature->feature_data[6] = 0x01;
10323         feature = (struct scsi_get_config_feature *)
10324             &feature->feature_data[feature->add_length];
10325
10326 f1:     /* Core */
10327         scsi_ulto2b(0x0001, feature->feature_code);
10328         feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT;
10329         feature->add_length = 8;
10330         scsi_ulto4b(0x00000000, &feature->feature_data[0]);
10331         feature->feature_data[4] = 0x03;
10332         feature = (struct scsi_get_config_feature *)
10333             &feature->feature_data[feature->add_length];
10334
10335 f2:     /* Morphing */
10336         scsi_ulto2b(0x0002, feature->feature_code);
10337         feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT;
10338         feature->add_length = 4;
10339         feature->feature_data[0] = 0x02;
10340         feature = (struct scsi_get_config_feature *)
10341             &feature->feature_data[feature->add_length];
10342
10343 f3:     /* Removable Medium */
10344         scsi_ulto2b(0x0003, feature->feature_code);
10345         feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT;
10346         feature->add_length = 4;
10347         feature->feature_data[0] = 0x39;
10348         feature = (struct scsi_get_config_feature *)
10349             &feature->feature_data[feature->add_length];
10350
10351         if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA))
10352                 goto done;
10353
10354 f10:    /* Random Read */
10355         scsi_ulto2b(0x0010, feature->feature_code);
10356         feature->flags = 0x00;
10357         if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10358                 feature->flags |= SGC_F_CURRENT;
10359         feature->add_length = 8;
10360         scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]);
10361         scsi_ulto2b(1, &feature->feature_data[4]);
10362         feature->feature_data[6] = 0x00;
10363         feature = (struct scsi_get_config_feature *)
10364             &feature->feature_data[feature->add_length];
10365
10366 f1d:    /* Multi-Read */
10367         scsi_ulto2b(0x001D, feature->feature_code);
10368         feature->flags = 0x00;
10369         if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10370                 feature->flags |= SGC_F_CURRENT;
10371         feature->add_length = 0;
10372         feature = (struct scsi_get_config_feature *)
10373             &feature->feature_data[feature->add_length];
10374
10375 f1e:    /* CD Read */
10376         scsi_ulto2b(0x001E, feature->feature_code);
10377         feature->flags = 0x00;
10378         if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10379                 feature->flags |= SGC_F_CURRENT;
10380         feature->add_length = 4;
10381         feature->feature_data[0] = 0x00;
10382         feature = (struct scsi_get_config_feature *)
10383             &feature->feature_data[feature->add_length];
10384
10385 f1f:    /* DVD Read */
10386         scsi_ulto2b(0x001F, feature->feature_code);
10387         feature->flags = 0x08;
10388         if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10389                 feature->flags |= SGC_F_CURRENT;
10390         feature->add_length = 4;
10391         feature->feature_data[0] = 0x01;
10392         feature->feature_data[2] = 0x03;
10393         feature = (struct scsi_get_config_feature *)
10394             &feature->feature_data[feature->add_length];
10395
10396 f2a:    /* DVD+RW */
10397         scsi_ulto2b(0x002A, feature->feature_code);
10398         feature->flags = 0x04;
10399         if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10400                 feature->flags |= SGC_F_CURRENT;
10401         feature->add_length = 4;
10402         feature->feature_data[0] = 0x00;
10403         feature->feature_data[1] = 0x00;
10404         feature = (struct scsi_get_config_feature *)
10405             &feature->feature_data[feature->add_length];
10406
10407 f2b:    /* DVD+R */
10408         scsi_ulto2b(0x002B, feature->feature_code);
10409         feature->flags = 0x00;
10410         if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10411                 feature->flags |= SGC_F_CURRENT;
10412         feature->add_length = 4;
10413         feature->feature_data[0] = 0x00;
10414         feature = (struct scsi_get_config_feature *)
10415             &feature->feature_data[feature->add_length];
10416
10417 f3a:    /* DVD+RW Dual Layer */
10418         scsi_ulto2b(0x003A, feature->feature_code);
10419         feature->flags = 0x00;
10420         if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10421                 feature->flags |= SGC_F_CURRENT;
10422         feature->add_length = 4;
10423         feature->feature_data[0] = 0x00;
10424         feature->feature_data[1] = 0x00;
10425         feature = (struct scsi_get_config_feature *)
10426             &feature->feature_data[feature->add_length];
10427
10428 f3b:    /* DVD+R Dual Layer */
10429         scsi_ulto2b(0x003B, feature->feature_code);
10430         feature->flags = 0x00;
10431         if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10432                 feature->flags |= SGC_F_CURRENT;
10433         feature->add_length = 4;
10434         feature->feature_data[0] = 0x00;
10435         feature = (struct scsi_get_config_feature *)
10436             &feature->feature_data[feature->add_length];
10437
10438 done:
10439         data_len = (uint8_t *)feature - (uint8_t *)hdr;
10440         if (rt == SGC_RT_SPECIFIC && data_len > 4) {
10441                 feature = (struct scsi_get_config_feature *)(hdr + 1);
10442                 if (scsi_2btoul(feature->feature_code) == starting)
10443                         feature = (struct scsi_get_config_feature *)
10444                             &feature->feature_data[feature->add_length];
10445                 data_len = (uint8_t *)feature - (uint8_t *)hdr;
10446         }
10447         scsi_ulto4b(data_len - 4, hdr->data_length);
10448         ctsio->kern_data_len = min(data_len, alloc_len);
10449         ctsio->kern_total_len = ctsio->kern_data_len;
10450
10451         ctl_set_success(ctsio);
10452         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10453         ctsio->be_move_done = ctl_config_move_done;
10454         ctl_datamove((union ctl_io *)ctsio);
10455         return (CTL_RETVAL_COMPLETE);
10456 }
10457
10458 int
10459 ctl_get_event_status(struct ctl_scsiio *ctsio)
10460 {
10461         struct scsi_get_event_status_header *hdr;
10462         struct scsi_get_event_status *cdb;
10463         uint32_t alloc_len, data_len;
10464
10465         cdb = (struct scsi_get_event_status *)ctsio->cdb;
10466         if ((cdb->byte2 & SGESN_POLLED) == 0) {
10467                 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
10468                     /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0);
10469                 ctl_done((union ctl_io *)ctsio);
10470                 return (CTL_RETVAL_COMPLETE);
10471         }
10472         alloc_len = scsi_2btoul(cdb->length);
10473
10474         data_len = sizeof(struct scsi_get_event_status_header);
10475         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10476         ctsio->kern_sg_entries = 0;
10477         ctsio->kern_rel_offset = 0;
10478         ctsio->kern_data_len = min(data_len, alloc_len);
10479         ctsio->kern_total_len = ctsio->kern_data_len;
10480
10481         hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr;
10482         scsi_ulto2b(0, hdr->descr_length);
10483         hdr->nea_class = SGESN_NEA;
10484         hdr->supported_class = 0;
10485
10486         ctl_set_success(ctsio);
10487         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10488         ctsio->be_move_done = ctl_config_move_done;
10489         ctl_datamove((union ctl_io *)ctsio);
10490         return (CTL_RETVAL_COMPLETE);
10491 }
10492
10493 int
10494 ctl_mechanism_status(struct ctl_scsiio *ctsio)
10495 {
10496         struct scsi_mechanism_status_header *hdr;
10497         struct scsi_mechanism_status *cdb;
10498         uint32_t alloc_len, data_len;
10499
10500         cdb = (struct scsi_mechanism_status *)ctsio->cdb;
10501         alloc_len = scsi_2btoul(cdb->length);
10502
10503         data_len = sizeof(struct scsi_mechanism_status_header);
10504         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10505         ctsio->kern_sg_entries = 0;
10506         ctsio->kern_rel_offset = 0;
10507         ctsio->kern_data_len = min(data_len, alloc_len);
10508         ctsio->kern_total_len = ctsio->kern_data_len;
10509
10510         hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr;
10511         hdr->state1 = 0x00;
10512         hdr->state2 = 0xe0;
10513         scsi_ulto3b(0, hdr->lba);
10514         hdr->slots_num = 0;
10515         scsi_ulto2b(0, hdr->slots_length);
10516
10517         ctl_set_success(ctsio);
10518         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10519         ctsio->be_move_done = ctl_config_move_done;
10520         ctl_datamove((union ctl_io *)ctsio);
10521         return (CTL_RETVAL_COMPLETE);
10522 }
10523
10524 static void
10525 ctl_ultomsf(uint32_t lba, uint8_t *buf)
10526 {
10527
10528         lba += 150;
10529         buf[0] = 0;
10530         buf[1] = bin2bcd((lba / 75) / 60);
10531         buf[2] = bin2bcd((lba / 75) % 60);
10532         buf[3] = bin2bcd(lba % 75);
10533 }
10534
10535 int
10536 ctl_read_toc(struct ctl_scsiio *ctsio)
10537 {
10538         struct ctl_lun *lun = CTL_LUN(ctsio);
10539         struct scsi_read_toc_hdr *hdr;
10540         struct scsi_read_toc_type01_descr *descr;
10541         struct scsi_read_toc *cdb;
10542         uint32_t alloc_len, data_len;
10543         int format, msf;
10544
10545         cdb = (struct scsi_read_toc *)ctsio->cdb;
10546         msf = (cdb->byte2 & CD_MSF) != 0;
10547         format = cdb->format;
10548         alloc_len = scsi_2btoul(cdb->data_len);
10549
10550         data_len = sizeof(struct scsi_read_toc_hdr);
10551         if (format == 0)
10552                 data_len += 2 * sizeof(struct scsi_read_toc_type01_descr);
10553         else
10554                 data_len += sizeof(struct scsi_read_toc_type01_descr);
10555         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10556         ctsio->kern_sg_entries = 0;
10557         ctsio->kern_rel_offset = 0;
10558         ctsio->kern_data_len = min(data_len, alloc_len);
10559         ctsio->kern_total_len = ctsio->kern_data_len;
10560
10561         hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr;
10562         if (format == 0) {
10563                 scsi_ulto2b(0x12, hdr->data_length);
10564                 hdr->first = 1;
10565                 hdr->last = 1;
10566                 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1);
10567                 descr->addr_ctl = 0x14;
10568                 descr->track_number = 1;
10569                 if (msf)
10570                         ctl_ultomsf(0, descr->track_start);
10571                 else
10572                         scsi_ulto4b(0, descr->track_start);
10573                 descr++;
10574                 descr->addr_ctl = 0x14;
10575                 descr->track_number = 0xaa;
10576                 if (msf)
10577                         ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start);
10578                 else
10579                         scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start);
10580         } else {
10581                 scsi_ulto2b(0x0a, hdr->data_length);
10582                 hdr->first = 1;
10583                 hdr->last = 1;
10584                 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1);
10585                 descr->addr_ctl = 0x14;
10586                 descr->track_number = 1;
10587                 if (msf)
10588                         ctl_ultomsf(0, descr->track_start);
10589                 else
10590                         scsi_ulto4b(0, descr->track_start);
10591         }
10592
10593         ctl_set_success(ctsio);
10594         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10595         ctsio->be_move_done = ctl_config_move_done;
10596         ctl_datamove((union ctl_io *)ctsio);
10597         return (CTL_RETVAL_COMPLETE);
10598 }
10599
10600 /*
10601  * For known CDB types, parse the LBA and length.
10602  */
10603 static int
10604 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len)
10605 {
10606         if (io->io_hdr.io_type != CTL_IO_SCSI)
10607                 return (1);
10608
10609         switch (io->scsiio.cdb[0]) {
10610         case COMPARE_AND_WRITE: {
10611                 struct scsi_compare_and_write *cdb;
10612
10613                 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb;
10614
10615                 *lba = scsi_8btou64(cdb->addr);
10616                 *len = cdb->length;
10617                 break;
10618         }
10619         case READ_6:
10620         case WRITE_6: {
10621                 struct scsi_rw_6 *cdb;
10622
10623                 cdb = (struct scsi_rw_6 *)io->scsiio.cdb;
10624
10625                 *lba = scsi_3btoul(cdb->addr);
10626                 /* only 5 bits are valid in the most significant address byte */
10627                 *lba &= 0x1fffff;
10628                 *len = cdb->length;
10629                 break;
10630         }
10631         case READ_10:
10632         case WRITE_10: {
10633                 struct scsi_rw_10 *cdb;
10634
10635                 cdb = (struct scsi_rw_10 *)io->scsiio.cdb;
10636
10637                 *lba = scsi_4btoul(cdb->addr);
10638                 *len = scsi_2btoul(cdb->length);
10639                 break;
10640         }
10641         case WRITE_VERIFY_10: {
10642                 struct scsi_write_verify_10 *cdb;
10643
10644                 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb;
10645
10646                 *lba = scsi_4btoul(cdb->addr);
10647                 *len = scsi_2btoul(cdb->length);
10648                 break;
10649         }
10650         case READ_12:
10651         case WRITE_12: {
10652                 struct scsi_rw_12 *cdb;
10653
10654                 cdb = (struct scsi_rw_12 *)io->scsiio.cdb;
10655
10656                 *lba = scsi_4btoul(cdb->addr);
10657                 *len = scsi_4btoul(cdb->length);
10658                 break;
10659         }
10660         case WRITE_VERIFY_12: {
10661                 struct scsi_write_verify_12 *cdb;
10662
10663                 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb;
10664
10665                 *lba = scsi_4btoul(cdb->addr);
10666                 *len = scsi_4btoul(cdb->length);
10667                 break;
10668         }
10669         case READ_16:
10670         case WRITE_16: {
10671                 struct scsi_rw_16 *cdb;
10672
10673                 cdb = (struct scsi_rw_16 *)io->scsiio.cdb;
10674
10675                 *lba = scsi_8btou64(cdb->addr);
10676                 *len = scsi_4btoul(cdb->length);
10677                 break;
10678         }
10679         case WRITE_ATOMIC_16: {
10680                 struct scsi_write_atomic_16 *cdb;
10681
10682                 cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb;
10683
10684                 *lba = scsi_8btou64(cdb->addr);
10685                 *len = scsi_2btoul(cdb->length);
10686                 break;
10687         }
10688         case WRITE_VERIFY_16: {
10689                 struct scsi_write_verify_16 *cdb;
10690
10691                 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb;
10692
10693                 *lba = scsi_8btou64(cdb->addr);
10694                 *len = scsi_4btoul(cdb->length);
10695                 break;
10696         }
10697         case WRITE_SAME_10: {
10698                 struct scsi_write_same_10 *cdb;
10699
10700                 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb;
10701
10702                 *lba = scsi_4btoul(cdb->addr);
10703                 *len = scsi_2btoul(cdb->length);
10704                 break;
10705         }
10706         case WRITE_SAME_16: {
10707                 struct scsi_write_same_16 *cdb;
10708
10709                 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb;
10710
10711                 *lba = scsi_8btou64(cdb->addr);
10712                 *len = scsi_4btoul(cdb->length);
10713                 break;
10714         }
10715         case VERIFY_10: {
10716                 struct scsi_verify_10 *cdb;
10717
10718                 cdb = (struct scsi_verify_10 *)io->scsiio.cdb;
10719
10720                 *lba = scsi_4btoul(cdb->addr);
10721                 *len = scsi_2btoul(cdb->length);
10722                 break;
10723         }
10724         case VERIFY_12: {
10725                 struct scsi_verify_12 *cdb;
10726
10727                 cdb = (struct scsi_verify_12 *)io->scsiio.cdb;
10728
10729                 *lba = scsi_4btoul(cdb->addr);
10730                 *len = scsi_4btoul(cdb->length);
10731                 break;
10732         }
10733         case VERIFY_16: {
10734                 struct scsi_verify_16 *cdb;
10735
10736                 cdb = (struct scsi_verify_16 *)io->scsiio.cdb;
10737
10738                 *lba = scsi_8btou64(cdb->addr);
10739                 *len = scsi_4btoul(cdb->length);
10740                 break;
10741         }
10742         case UNMAP: {
10743                 *lba = 0;
10744                 *len = UINT64_MAX;
10745                 break;
10746         }
10747         case SERVICE_ACTION_IN: {       /* GET LBA STATUS */
10748                 struct scsi_get_lba_status *cdb;
10749
10750                 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb;
10751                 *lba = scsi_8btou64(cdb->addr);
10752                 *len = UINT32_MAX;
10753                 break;
10754         }
10755         default:
10756                 return (1);
10757                 break; /* NOTREACHED */
10758         }
10759
10760         return (0);
10761 }
10762
10763 static ctl_action
10764 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2,
10765     bool seq)
10766 {
10767         uint64_t endlba1, endlba2;
10768
10769         endlba1 = lba1 + len1 - (seq ? 0 : 1);
10770         endlba2 = lba2 + len2 - 1;
10771
10772         if ((endlba1 < lba2) || (endlba2 < lba1))
10773                 return (CTL_ACTION_PASS);
10774         else
10775                 return (CTL_ACTION_BLOCK);
10776 }
10777
10778 static int
10779 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2)
10780 {
10781         struct ctl_ptr_len_flags *ptrlen;
10782         struct scsi_unmap_desc *buf, *end, *range;
10783         uint64_t lba;
10784         uint32_t len;
10785
10786         /* If not UNMAP -- go other way. */
10787         if (io->io_hdr.io_type != CTL_IO_SCSI ||
10788             io->scsiio.cdb[0] != UNMAP)
10789                 return (CTL_ACTION_ERROR);
10790
10791         /* If UNMAP without data -- block and wait for data. */
10792         ptrlen = (struct ctl_ptr_len_flags *)
10793             &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
10794         if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 ||
10795             ptrlen->ptr == NULL)
10796                 return (CTL_ACTION_BLOCK);
10797
10798         /* UNMAP with data -- check for collision. */
10799         buf = (struct scsi_unmap_desc *)ptrlen->ptr;
10800         end = buf + ptrlen->len / sizeof(*buf);
10801         for (range = buf; range < end; range++) {
10802                 lba = scsi_8btou64(range->lba);
10803                 len = scsi_4btoul(range->length);
10804                 if ((lba < lba2 + len2) && (lba + len > lba2))
10805                         return (CTL_ACTION_BLOCK);
10806         }
10807         return (CTL_ACTION_PASS);
10808 }
10809
10810 static ctl_action
10811 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq)
10812 {
10813         uint64_t lba1, lba2;
10814         uint64_t len1, len2;
10815         int retval;
10816
10817         if (ctl_get_lba_len(io2, &lba2, &len2) != 0)
10818                 return (CTL_ACTION_ERROR);
10819
10820         retval = ctl_extent_check_unmap(io1, lba2, len2);
10821         if (retval != CTL_ACTION_ERROR)
10822                 return (retval);
10823
10824         if (ctl_get_lba_len(io1, &lba1, &len1) != 0)
10825                 return (CTL_ACTION_ERROR);
10826
10827         if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE)
10828                 seq = FALSE;
10829         return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq));
10830 }
10831
10832 static ctl_action
10833 ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2)
10834 {
10835         uint64_t lba1, lba2;
10836         uint64_t len1, len2;
10837
10838         if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE)
10839                 return (CTL_ACTION_PASS);
10840         if (ctl_get_lba_len(io1, &lba1, &len1) != 0)
10841                 return (CTL_ACTION_ERROR);
10842         if (ctl_get_lba_len(io2, &lba2, &len2) != 0)
10843                 return (CTL_ACTION_ERROR);
10844
10845         if (lba1 + len1 == lba2)
10846                 return (CTL_ACTION_BLOCK);
10847         return (CTL_ACTION_PASS);
10848 }
10849
10850 static ctl_action
10851 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io,
10852     union ctl_io *ooa_io)
10853 {
10854         const struct ctl_cmd_entry *pending_entry, *ooa_entry;
10855         const ctl_serialize_action *serialize_row;
10856
10857         /*
10858          * Aborted commands are not going to be executed and may even
10859          * not report completion, so we don't care about their order.
10860          * Let them complete ASAP to clean the OOA queue.
10861          */
10862         if (pending_io->io_hdr.flags & CTL_FLAG_ABORT)
10863                 return (CTL_ACTION_SKIP);
10864
10865         /*
10866          * The initiator attempted multiple untagged commands at the same
10867          * time.  Can't do that.
10868          */
10869         if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
10870          && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
10871          && ((pending_io->io_hdr.nexus.targ_port ==
10872               ooa_io->io_hdr.nexus.targ_port)
10873           && (pending_io->io_hdr.nexus.initid ==
10874               ooa_io->io_hdr.nexus.initid))
10875          && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT |
10876               CTL_FLAG_STATUS_SENT)) == 0))
10877                 return (CTL_ACTION_OVERLAP);
10878
10879         /*
10880          * The initiator attempted to send multiple tagged commands with
10881          * the same ID.  (It's fine if different initiators have the same
10882          * tag ID.)
10883          *
10884          * Even if all of those conditions are true, we don't kill the I/O
10885          * if the command ahead of us has been aborted.  We won't end up
10886          * sending it to the FETD, and it's perfectly legal to resend a
10887          * command with the same tag number as long as the previous
10888          * instance of this tag number has been aborted somehow.
10889          */
10890         if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
10891          && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
10892          && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num)
10893          && ((pending_io->io_hdr.nexus.targ_port ==
10894               ooa_io->io_hdr.nexus.targ_port)
10895           && (pending_io->io_hdr.nexus.initid ==
10896               ooa_io->io_hdr.nexus.initid))
10897          && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT |
10898               CTL_FLAG_STATUS_SENT)) == 0))
10899                 return (CTL_ACTION_OVERLAP_TAG);
10900
10901         /*
10902          * If we get a head of queue tag, SAM-3 says that we should
10903          * immediately execute it.
10904          *
10905          * What happens if this command would normally block for some other
10906          * reason?  e.g. a request sense with a head of queue tag
10907          * immediately after a write.  Normally that would block, but this
10908          * will result in its getting executed immediately...
10909          *
10910          * We currently return "pass" instead of "skip", so we'll end up
10911          * going through the rest of the queue to check for overlapped tags.
10912          *
10913          * XXX KDM check for other types of blockage first??
10914          */
10915         if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)
10916                 return (CTL_ACTION_PASS);
10917
10918         /*
10919          * Ordered tags have to block until all items ahead of them
10920          * have completed.  If we get called with an ordered tag, we always
10921          * block, if something else is ahead of us in the queue.
10922          */
10923         if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED)
10924                 return (CTL_ACTION_BLOCK);
10925
10926         /*
10927          * Simple tags get blocked until all head of queue and ordered tags
10928          * ahead of them have completed.  I'm lumping untagged commands in
10929          * with simple tags here.  XXX KDM is that the right thing to do?
10930          */
10931         if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
10932           || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE))
10933          && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)
10934           || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED)))
10935                 return (CTL_ACTION_BLOCK);
10936
10937         pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL);
10938         KASSERT(pending_entry->seridx < CTL_SERIDX_COUNT,
10939             ("%s: Invalid seridx %d for pending CDB %02x %02x @ %p",
10940              __func__, pending_entry->seridx, pending_io->scsiio.cdb[0],
10941              pending_io->scsiio.cdb[1], pending_io));
10942         ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL);
10943         if (ooa_entry->seridx == CTL_SERIDX_INVLD)
10944                 return (CTL_ACTION_PASS); /* Unsupported command in OOA queue */
10945         KASSERT(ooa_entry->seridx < CTL_SERIDX_COUNT,
10946             ("%s: Invalid seridx %d for ooa CDB %02x %02x @ %p",
10947              __func__, ooa_entry->seridx, ooa_io->scsiio.cdb[0],
10948              ooa_io->scsiio.cdb[1], ooa_io));
10949
10950         serialize_row = ctl_serialize_table[ooa_entry->seridx];
10951
10952         switch (serialize_row[pending_entry->seridx]) {
10953         case CTL_SER_BLOCK:
10954                 return (CTL_ACTION_BLOCK);
10955         case CTL_SER_EXTENT:
10956                 return (ctl_extent_check(ooa_io, pending_io,
10957                     (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON)));
10958         case CTL_SER_EXTENTOPT:
10959                 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) !=
10960                     SCP_QUEUE_ALG_UNRESTRICTED)
10961                         return (ctl_extent_check(ooa_io, pending_io,
10962                             (lun->be_lun &&
10963                              lun->be_lun->serseq == CTL_LUN_SERSEQ_ON)));
10964                 return (CTL_ACTION_PASS);
10965         case CTL_SER_EXTENTSEQ:
10966                 if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF)
10967                         return (ctl_extent_check_seq(ooa_io, pending_io));
10968                 return (CTL_ACTION_PASS);
10969         case CTL_SER_PASS:
10970                 return (CTL_ACTION_PASS);
10971         case CTL_SER_BLOCKOPT:
10972                 if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) !=
10973                     SCP_QUEUE_ALG_UNRESTRICTED)
10974                         return (CTL_ACTION_BLOCK);
10975                 return (CTL_ACTION_PASS);
10976         case CTL_SER_SKIP:
10977                 return (CTL_ACTION_SKIP);
10978         default:
10979                 panic("%s: Invalid serialization value %d for %d => %d",
10980                     __func__, serialize_row[pending_entry->seridx],
10981                     pending_entry->seridx, ooa_entry->seridx);
10982         }
10983
10984         return (CTL_ACTION_ERROR);
10985 }
10986
10987 /*
10988  * Check for blockage or overlaps against the OOA (Order Of Arrival) queue.
10989  * Assumptions:
10990  * - pending_io is generally either incoming, or on the blocked queue
10991  * - starting I/O is the I/O we want to start the check with.
10992  */
10993 static ctl_action
10994 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
10995               union ctl_io **starting_io)
10996 {
10997         union ctl_io *ooa_io;
10998         ctl_action action;
10999
11000         mtx_assert(&lun->lun_lock, MA_OWNED);
11001
11002         /*
11003          * Run back along the OOA queue, starting with the current
11004          * blocked I/O and going through every I/O before it on the
11005          * queue.  If starting_io is NULL, we'll just end up returning
11006          * CTL_ACTION_PASS.
11007          */
11008         for (ooa_io = *starting_io; ooa_io != NULL;
11009              ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq,
11010              ooa_links)){
11011                 action = ctl_check_for_blockage(lun, pending_io, ooa_io);
11012                 if (action != CTL_ACTION_PASS) {
11013                         *starting_io = ooa_io;
11014                         return (action);
11015                 }
11016         }
11017
11018         *starting_io = NULL;
11019         return (CTL_ACTION_PASS);
11020 }
11021
11022 /*
11023  * Try to unblock the specified I/O.
11024  *
11025  * skip parameter allows explicitly skip present blocker of the I/O,
11026  * starting from the previous one on OOA queue.  It can be used when
11027  * we know for sure that the blocker I/O does no longer count.
11028  */
11029 static void
11030 ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, bool skip)
11031 {
11032         struct ctl_softc *softc = lun->ctl_softc;
11033         union ctl_io *bio, *obio;
11034         const struct ctl_cmd_entry *entry;
11035         union ctl_ha_msg msg_info;
11036         ctl_action action;
11037
11038         mtx_assert(&lun->lun_lock, MA_OWNED);
11039
11040         if (io->io_hdr.blocker == NULL)
11041                 return;
11042
11043         obio = bio = io->io_hdr.blocker;
11044         if (skip)
11045                 bio = (union ctl_io *)TAILQ_PREV(&bio->io_hdr, ctl_ooaq,
11046                     ooa_links);
11047         action = ctl_check_ooa(lun, io, &bio);
11048         if (action == CTL_ACTION_BLOCK) {
11049                 /* Still blocked, but may be by different I/O now. */
11050                 if (bio != obio) {
11051                         TAILQ_REMOVE(&obio->io_hdr.blocked_queue,
11052                             &io->io_hdr, blocked_links);
11053                         TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue,
11054                             &io->io_hdr, blocked_links);
11055                         io->io_hdr.blocker = bio;
11056                 }
11057                 return;
11058         }
11059
11060         /* No longer blocked, one way or another. */
11061         TAILQ_REMOVE(&obio->io_hdr.blocked_queue, &io->io_hdr, blocked_links);
11062         io->io_hdr.blocker = NULL;
11063
11064         switch (action) {
11065         case CTL_ACTION_OVERLAP:
11066                 ctl_set_overlapped_cmd(&io->scsiio);
11067                 goto error;
11068         case CTL_ACTION_OVERLAP_TAG:
11069                 ctl_set_overlapped_tag(&io->scsiio,
11070                     io->scsiio.tag_num & 0xff);
11071                 goto error;
11072         case CTL_ACTION_PASS:
11073         case CTL_ACTION_SKIP:
11074
11075                 /* Serializing commands from the other SC retire there. */
11076                 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) &&
11077                     (softc->ha_mode != CTL_HA_MODE_XFER)) {
11078                         io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
11079                         msg_info.hdr.original_sc = io->io_hdr.remote_io;
11080                         msg_info.hdr.serializing_sc = io;
11081                         msg_info.hdr.msg_type = CTL_MSG_R2R;
11082                         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
11083                             sizeof(msg_info.hdr), M_NOWAIT);
11084                         break;
11085                 }
11086
11087                 /*
11088                  * Check this I/O for LUN state changes that may have happened
11089                  * while this command was blocked. The LUN state may have been
11090                  * changed by a command ahead of us in the queue.
11091                  */
11092                 entry = ctl_get_cmd_entry(&io->scsiio, NULL);
11093                 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) {
11094                         ctl_done(io);
11095                         break;
11096                 }
11097
11098                 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
11099                 ctl_enqueue_rtr(io);
11100                 break;
11101         case CTL_ACTION_ERROR:
11102         default:
11103                 ctl_set_internal_failure(&io->scsiio,
11104                                          /*sks_valid*/ 0,
11105                                          /*retry_count*/ 0);
11106
11107 error:
11108                 /* Serializing commands from the other SC are done here. */
11109                 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) &&
11110                     (softc->ha_mode != CTL_HA_MODE_XFER)) {
11111                         ctl_try_unblock_others(lun, io, TRUE);
11112                         TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
11113
11114                         ctl_copy_sense_data_back(io, &msg_info);
11115                         msg_info.hdr.original_sc = io->io_hdr.remote_io;
11116                         msg_info.hdr.serializing_sc = NULL;
11117                         msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
11118                         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
11119                             sizeof(msg_info.scsi), M_WAITOK);
11120                         ctl_free_io(io);
11121                         break;
11122                 }
11123
11124                 ctl_done(io);
11125                 break;
11126         }
11127 }
11128
11129 /*
11130  * Try to unblock I/Os blocked by the specified I/O.
11131  *
11132  * skip parameter allows explicitly skip the specified I/O as blocker,
11133  * starting from the previous one on the OOA queue.  It can be used when
11134  * we know for sure that the specified I/O does no longer count (done).
11135  * It has to be still on OOA queue though so that we know where to start.
11136  */
11137 static void
11138 ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *bio, bool skip)
11139 {
11140         union ctl_io *io, *next_io;
11141
11142         mtx_assert(&lun->lun_lock, MA_OWNED);
11143
11144         for (io = (union ctl_io *)TAILQ_FIRST(&bio->io_hdr.blocked_queue);
11145              io != NULL; io = next_io) {
11146                 next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, blocked_links);
11147
11148                 KASSERT(io->io_hdr.blocker != NULL,
11149                     ("I/O %p on blocked list without blocker", io));
11150                 ctl_try_unblock_io(lun, io, skip);
11151         }
11152         KASSERT(!skip || TAILQ_EMPTY(&bio->io_hdr.blocked_queue),
11153             ("blocked_queue is not empty after skipping %p", bio));
11154 }
11155
11156 /*
11157  * This routine (with one exception) checks LUN flags that can be set by
11158  * commands ahead of us in the OOA queue.  These flags have to be checked
11159  * when a command initially comes in, and when we pull a command off the
11160  * blocked queue and are preparing to execute it.  The reason we have to
11161  * check these flags for commands on the blocked queue is that the LUN
11162  * state may have been changed by a command ahead of us while we're on the
11163  * blocked queue.
11164  *
11165  * Ordering is somewhat important with these checks, so please pay
11166  * careful attention to the placement of any new checks.
11167  */
11168 static int
11169 ctl_scsiio_lun_check(struct ctl_lun *lun,
11170     const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio)
11171 {
11172         struct ctl_softc *softc = lun->ctl_softc;
11173         int retval;
11174         uint32_t residx;
11175
11176         retval = 0;
11177
11178         mtx_assert(&lun->lun_lock, MA_OWNED);
11179
11180         /*
11181          * If this shelf is a secondary shelf controller, we may have to
11182          * reject some commands disallowed by HA mode and link state.
11183          */
11184         if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) {
11185                 if (softc->ha_link == CTL_HA_LINK_OFFLINE &&
11186                     (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) {
11187                         ctl_set_lun_unavail(ctsio);
11188                         retval = 1;
11189                         goto bailout;
11190                 }
11191                 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 &&
11192                     (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) {
11193                         ctl_set_lun_transit(ctsio);
11194                         retval = 1;
11195                         goto bailout;
11196                 }
11197                 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY &&
11198                     (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) {
11199                         ctl_set_lun_standby(ctsio);
11200                         retval = 1;
11201                         goto bailout;
11202                 }
11203
11204                 /* The rest of checks are only done on executing side */
11205                 if (softc->ha_mode == CTL_HA_MODE_XFER)
11206                         goto bailout;
11207         }
11208
11209         if (entry->pattern & CTL_LUN_PAT_WRITE) {
11210                 if (lun->be_lun &&
11211                     lun->be_lun->flags & CTL_LUN_FLAG_READONLY) {
11212                         ctl_set_hw_write_protected(ctsio);
11213                         retval = 1;
11214                         goto bailout;
11215                 }
11216                 if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) {
11217                         ctl_set_sense(ctsio, /*current_error*/ 1,
11218                             /*sense_key*/ SSD_KEY_DATA_PROTECT,
11219                             /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE);
11220                         retval = 1;
11221                         goto bailout;
11222                 }
11223         }
11224
11225         /*
11226          * Check for a reservation conflict.  If this command isn't allowed
11227          * even on reserved LUNs, and if this initiator isn't the one who
11228          * reserved us, reject the command with a reservation conflict.
11229          */
11230         residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
11231         if ((lun->flags & CTL_LUN_RESERVED)
11232          && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) {
11233                 if (lun->res_idx != residx) {
11234                         ctl_set_reservation_conflict(ctsio);
11235                         retval = 1;
11236                         goto bailout;
11237                 }
11238         }
11239
11240         if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 ||
11241             (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) {
11242                 /* No reservation or command is allowed. */;
11243         } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) &&
11244             (lun->pr_res_type == SPR_TYPE_WR_EX ||
11245              lun->pr_res_type == SPR_TYPE_WR_EX_RO ||
11246              lun->pr_res_type == SPR_TYPE_WR_EX_AR)) {
11247                 /* The command is allowed for Write Exclusive resv. */;
11248         } else {
11249                 /*
11250                  * if we aren't registered or it's a res holder type
11251                  * reservation and this isn't the res holder then set a
11252                  * conflict.
11253                  */
11254                 if (ctl_get_prkey(lun, residx) == 0 ||
11255                     (residx != lun->pr_res_idx && lun->pr_res_type < 4)) {
11256                         ctl_set_reservation_conflict(ctsio);
11257                         retval = 1;
11258                         goto bailout;
11259                 }
11260         }
11261
11262         if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) {
11263                 if (lun->flags & CTL_LUN_EJECTED)
11264                         ctl_set_lun_ejected(ctsio);
11265                 else if (lun->flags & CTL_LUN_NO_MEDIA) {
11266                         if (lun->flags & CTL_LUN_REMOVABLE)
11267                                 ctl_set_lun_no_media(ctsio);
11268                         else
11269                                 ctl_set_lun_int_reqd(ctsio);
11270                 } else if (lun->flags & CTL_LUN_STOPPED)
11271                         ctl_set_lun_stopped(ctsio);
11272                 else
11273                         goto bailout;
11274                 retval = 1;
11275                 goto bailout;
11276         }
11277
11278 bailout:
11279         return (retval);
11280 }
11281
11282 static void
11283 ctl_failover_io(union ctl_io *io, int have_lock)
11284 {
11285         ctl_set_busy(&io->scsiio);
11286         ctl_done(io);
11287 }
11288
11289 static void
11290 ctl_failover_lun(union ctl_io *rio)
11291 {
11292         struct ctl_softc *softc = CTL_SOFTC(rio);
11293         struct ctl_lun *lun;
11294         struct ctl_io_hdr *io, *next_io;
11295         uint32_t targ_lun;
11296
11297         targ_lun = rio->io_hdr.nexus.targ_mapped_lun;
11298         CTL_DEBUG_PRINT(("FAILOVER for lun %u\n", targ_lun));
11299
11300         /* Find and lock the LUN. */
11301         mtx_lock(&softc->ctl_lock);
11302         if (targ_lun > ctl_max_luns ||
11303             (lun = softc->ctl_luns[targ_lun]) == NULL) {
11304                 mtx_unlock(&softc->ctl_lock);
11305                 return;
11306         }
11307         mtx_lock(&lun->lun_lock);
11308         mtx_unlock(&softc->ctl_lock);
11309         if (lun->flags & CTL_LUN_DISABLED) {
11310                 mtx_unlock(&lun->lun_lock);
11311                 return;
11312         }
11313
11314         if (softc->ha_mode == CTL_HA_MODE_XFER) {
11315                 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) {
11316                         /* We are master */
11317                         if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
11318                                 if (io->flags & CTL_FLAG_IO_ACTIVE) {
11319                                         io->flags |= CTL_FLAG_ABORT;
11320                                         io->flags |= CTL_FLAG_FAILOVER;
11321                                         ctl_try_unblock_io(lun,
11322                                             (union ctl_io *)io, FALSE);
11323                                 } else { /* This can be only due to DATAMOVE */
11324                                         io->msg_type = CTL_MSG_DATAMOVE_DONE;
11325                                         io->flags &= ~CTL_FLAG_DMA_INPROG;
11326                                         io->flags |= CTL_FLAG_IO_ACTIVE;
11327                                         io->port_status = 31340;
11328                                         ctl_enqueue_isc((union ctl_io *)io);
11329                                 }
11330                         } else
11331                         /* We are slave */
11332                         if (io->flags & CTL_FLAG_SENT_2OTHER_SC) {
11333                                 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC;
11334                                 if (io->flags & CTL_FLAG_IO_ACTIVE) {
11335                                         io->flags |= CTL_FLAG_FAILOVER;
11336                                 } else {
11337                                         ctl_set_busy(&((union ctl_io *)io)->
11338                                             scsiio);
11339                                         ctl_done((union ctl_io *)io);
11340                                 }
11341                         }
11342                 }
11343         } else { /* SERIALIZE modes */
11344                 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) {
11345                         /* We are master */
11346                         if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
11347                                 if (io->blocker != NULL) {
11348                                         TAILQ_REMOVE(&io->blocker->io_hdr.blocked_queue,
11349                                             io, blocked_links);
11350                                         io->blocker = NULL;
11351                                 }
11352                                 ctl_try_unblock_others(lun, (union ctl_io *)io,
11353                                     TRUE);
11354                                 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links);
11355                                 ctl_free_io((union ctl_io *)io);
11356                         } else
11357                         /* We are slave */
11358                         if (io->flags & CTL_FLAG_SENT_2OTHER_SC) {
11359                                 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC;
11360                                 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) {
11361                                         ctl_set_busy(&((union ctl_io *)io)->
11362                                             scsiio);
11363                                         ctl_done((union ctl_io *)io);
11364                                 }
11365                         }
11366                 }
11367         }
11368         mtx_unlock(&lun->lun_lock);
11369 }
11370
11371 static int
11372 ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio)
11373 {
11374         struct ctl_lun *lun;
11375         const struct ctl_cmd_entry *entry;
11376         union ctl_io *bio;
11377         uint32_t initidx, targ_lun;
11378         int retval = 0;
11379
11380         lun = NULL;
11381         targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
11382         if (targ_lun < ctl_max_luns)
11383                 lun = softc->ctl_luns[targ_lun];
11384         if (lun) {
11385                 /*
11386                  * If the LUN is invalid, pretend that it doesn't exist.
11387                  * It will go away as soon as all pending I/O has been
11388                  * completed.
11389                  */
11390                 mtx_lock(&lun->lun_lock);
11391                 if (lun->flags & CTL_LUN_DISABLED) {
11392                         mtx_unlock(&lun->lun_lock);
11393                         lun = NULL;
11394                 }
11395         }
11396         CTL_LUN(ctsio) = lun;
11397         if (lun) {
11398                 CTL_BACKEND_LUN(ctsio) = lun->be_lun;
11399
11400                 /*
11401                  * Every I/O goes into the OOA queue for a particular LUN,
11402                  * and stays there until completion.
11403                  */
11404 #ifdef CTL_TIME_IO
11405                 if (TAILQ_EMPTY(&lun->ooa_queue))
11406                         lun->idle_time += getsbinuptime() - lun->last_busy;
11407 #endif
11408                 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
11409         }
11410
11411         /* Get command entry and return error if it is unsuppotyed. */
11412         entry = ctl_validate_command(ctsio);
11413         if (entry == NULL) {
11414                 if (lun)
11415                         mtx_unlock(&lun->lun_lock);
11416                 return (retval);
11417         }
11418
11419         ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
11420         ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK;
11421
11422         /*
11423          * Check to see whether we can send this command to LUNs that don't
11424          * exist.  This should pretty much only be the case for inquiry
11425          * and request sense.  Further checks, below, really require having
11426          * a LUN, so we can't really check the command anymore.  Just put
11427          * it on the rtr queue.
11428          */
11429         if (lun == NULL) {
11430                 if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) {
11431                         ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
11432                         ctl_enqueue_rtr((union ctl_io *)ctsio);
11433                         return (retval);
11434                 }
11435
11436                 ctl_set_unsupported_lun(ctsio);
11437                 ctl_done((union ctl_io *)ctsio);
11438                 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n"));
11439                 return (retval);
11440         } else {
11441                 /*
11442                  * Make sure we support this particular command on this LUN.
11443                  * e.g., we don't support writes to the control LUN.
11444                  */
11445                 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) {
11446                         mtx_unlock(&lun->lun_lock);
11447                         ctl_set_invalid_opcode(ctsio);
11448                         ctl_done((union ctl_io *)ctsio);
11449                         return (retval);
11450                 }
11451         }
11452
11453         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
11454
11455         /*
11456          * If we've got a request sense, it'll clear the contingent
11457          * allegiance condition.  Otherwise, if we have a CA condition for
11458          * this initiator, clear it, because it sent down a command other
11459          * than request sense.
11460          */
11461         if (ctsio->cdb[0] != REQUEST_SENSE) {
11462                 struct scsi_sense_data *ps;
11463
11464                 ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT];
11465                 if (ps != NULL)
11466                         ps[initidx % CTL_MAX_INIT_PER_PORT].error_code = 0;
11467         }
11468
11469         /*
11470          * If the command has this flag set, it handles its own unit
11471          * attention reporting, we shouldn't do anything.  Otherwise we
11472          * check for any pending unit attentions, and send them back to the
11473          * initiator.  We only do this when a command initially comes in,
11474          * not when we pull it off the blocked queue.
11475          *
11476          * According to SAM-3, section 5.3.2, the order that things get
11477          * presented back to the host is basically unit attentions caused
11478          * by some sort of reset event, busy status, reservation conflicts
11479          * or task set full, and finally any other status.
11480          *
11481          * One issue here is that some of the unit attentions we report
11482          * don't fall into the "reset" category (e.g. "reported luns data
11483          * has changed").  So reporting it here, before the reservation
11484          * check, may be technically wrong.  I guess the only thing to do
11485          * would be to check for and report the reset events here, and then
11486          * check for the other unit attention types after we check for a
11487          * reservation conflict.
11488          *
11489          * XXX KDM need to fix this
11490          */
11491         if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) {
11492                 ctl_ua_type ua_type;
11493                 u_int sense_len = 0;
11494
11495                 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data,
11496                     &sense_len, SSD_TYPE_NONE);
11497                 if (ua_type != CTL_UA_NONE) {
11498                         mtx_unlock(&lun->lun_lock);
11499                         ctsio->scsi_status = SCSI_STATUS_CHECK_COND;
11500                         ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
11501                         ctsio->sense_len = sense_len;
11502                         ctl_done((union ctl_io *)ctsio);
11503                         return (retval);
11504                 }
11505         }
11506
11507
11508         if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) {
11509                 mtx_unlock(&lun->lun_lock);
11510                 ctl_done((union ctl_io *)ctsio);
11511                 return (retval);
11512         }
11513
11514         /*
11515          * XXX CHD this is where we want to send IO to other side if
11516          * this LUN is secondary on this SC. We will need to make a copy
11517          * of the IO and flag the IO on this side as SENT_2OTHER and the flag
11518          * the copy we send as FROM_OTHER.
11519          * We also need to stuff the address of the original IO so we can
11520          * find it easily. Something similar will need be done on the other
11521          * side so when we are done we can find the copy.
11522          */
11523         if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
11524             (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 &&
11525             (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) {
11526                 union ctl_ha_msg msg_info;
11527                 int isc_retval;
11528
11529                 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
11530                 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
11531                 mtx_unlock(&lun->lun_lock);
11532
11533                 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE;
11534                 msg_info.hdr.original_sc = (union ctl_io *)ctsio;
11535                 msg_info.hdr.serializing_sc = NULL;
11536                 msg_info.hdr.nexus = ctsio->io_hdr.nexus;
11537                 msg_info.scsi.tag_num = ctsio->tag_num;
11538                 msg_info.scsi.tag_type = ctsio->tag_type;
11539                 msg_info.scsi.cdb_len = ctsio->cdb_len;
11540                 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN);
11541
11542                 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
11543                     sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data),
11544                     M_WAITOK)) > CTL_HA_STATUS_SUCCESS) {
11545                         ctl_set_busy(ctsio);
11546                         ctl_done((union ctl_io *)ctsio);
11547                         return (retval);
11548                 }
11549                 return (retval);
11550         }
11551
11552         bio = (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, ooa_links);
11553         switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) {
11554         case CTL_ACTION_BLOCK:
11555                 ctsio->io_hdr.blocker = bio;
11556                 TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr,
11557                                   blocked_links);
11558                 mtx_unlock(&lun->lun_lock);
11559                 return (retval);
11560         case CTL_ACTION_PASS:
11561         case CTL_ACTION_SKIP:
11562                 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
11563                 mtx_unlock(&lun->lun_lock);
11564                 ctl_enqueue_rtr((union ctl_io *)ctsio);
11565                 break;
11566         case CTL_ACTION_OVERLAP:
11567                 mtx_unlock(&lun->lun_lock);
11568                 ctl_set_overlapped_cmd(ctsio);
11569                 ctl_done((union ctl_io *)ctsio);
11570                 break;
11571         case CTL_ACTION_OVERLAP_TAG:
11572                 mtx_unlock(&lun->lun_lock);
11573                 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff);
11574                 ctl_done((union ctl_io *)ctsio);
11575                 break;
11576         case CTL_ACTION_ERROR:
11577         default:
11578                 mtx_unlock(&lun->lun_lock);
11579                 ctl_set_internal_failure(ctsio,
11580                                          /*sks_valid*/ 0,
11581                                          /*retry_count*/ 0);
11582                 ctl_done((union ctl_io *)ctsio);
11583                 break;
11584         }
11585         return (retval);
11586 }
11587
11588 const struct ctl_cmd_entry *
11589 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa)
11590 {
11591         const struct ctl_cmd_entry *entry;
11592         int service_action;
11593
11594         entry = &ctl_cmd_table[ctsio->cdb[0]];
11595         if (sa)
11596                 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0);
11597         if (entry->flags & CTL_CMD_FLAG_SA5) {
11598                 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK;
11599                 entry = &((const struct ctl_cmd_entry *)
11600                     entry->execute)[service_action];
11601         }
11602         return (entry);
11603 }
11604
11605 const struct ctl_cmd_entry *
11606 ctl_validate_command(struct ctl_scsiio *ctsio)
11607 {
11608         const struct ctl_cmd_entry *entry;
11609         int i, sa;
11610         uint8_t diff;
11611
11612         entry = ctl_get_cmd_entry(ctsio, &sa);
11613         if (entry->execute == NULL) {
11614                 if (sa)
11615                         ctl_set_invalid_field(ctsio,
11616                                               /*sks_valid*/ 1,
11617                                               /*command*/ 1,
11618                                               /*field*/ 1,
11619                                               /*bit_valid*/ 1,
11620                                               /*bit*/ 4);
11621                 else
11622                         ctl_set_invalid_opcode(ctsio);
11623                 ctl_done((union ctl_io *)ctsio);
11624                 return (NULL);
11625         }
11626         KASSERT(entry->length > 0,
11627             ("Not defined length for command 0x%02x/0x%02x",
11628              ctsio->cdb[0], ctsio->cdb[1]));
11629         for (i = 1; i < entry->length; i++) {
11630                 diff = ctsio->cdb[i] & ~entry->usage[i - 1];
11631                 if (diff == 0)
11632                         continue;
11633                 ctl_set_invalid_field(ctsio,
11634                                       /*sks_valid*/ 1,
11635                                       /*command*/ 1,
11636                                       /*field*/ i,
11637                                       /*bit_valid*/ 1,
11638                                       /*bit*/ fls(diff) - 1);
11639                 ctl_done((union ctl_io *)ctsio);
11640                 return (NULL);
11641         }
11642         return (entry);
11643 }
11644
11645 static int
11646 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry)
11647 {
11648
11649         switch (lun_type) {
11650         case T_DIRECT:
11651                 if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0)
11652                         return (0);
11653                 break;
11654         case T_PROCESSOR:
11655                 if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0)
11656                         return (0);
11657                 break;
11658         case T_CDROM:
11659                 if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0)
11660                         return (0);
11661                 break;
11662         default:
11663                 return (0);
11664         }
11665         return (1);
11666 }
11667
11668 static int
11669 ctl_scsiio(struct ctl_scsiio *ctsio)
11670 {
11671         int retval;
11672         const struct ctl_cmd_entry *entry;
11673
11674         retval = CTL_RETVAL_COMPLETE;
11675
11676         CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0]));
11677
11678         entry = ctl_get_cmd_entry(ctsio, NULL);
11679
11680         /*
11681          * If this I/O has been aborted, just send it straight to
11682          * ctl_done() without executing it.
11683          */
11684         if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) {
11685                 ctl_done((union ctl_io *)ctsio);
11686                 goto bailout;
11687         }
11688
11689         /*
11690          * All the checks should have been handled by ctl_scsiio_precheck().
11691          * We should be clear now to just execute the I/O.
11692          */
11693         retval = entry->execute(ctsio);
11694
11695 bailout:
11696         return (retval);
11697 }
11698
11699 static int
11700 ctl_target_reset(union ctl_io *io)
11701 {
11702         struct ctl_softc *softc = CTL_SOFTC(io);
11703         struct ctl_port *port = CTL_PORT(io);
11704         struct ctl_lun *lun;
11705         uint32_t initidx;
11706         ctl_ua_type ua_type;
11707
11708         if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
11709                 union ctl_ha_msg msg_info;
11710
11711                 msg_info.hdr.nexus = io->io_hdr.nexus;
11712                 msg_info.task.task_action = io->taskio.task_action;
11713                 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
11714                 msg_info.hdr.original_sc = NULL;
11715                 msg_info.hdr.serializing_sc = NULL;
11716                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
11717                     sizeof(msg_info.task), M_WAITOK);
11718         }
11719
11720         initidx = ctl_get_initindex(&io->io_hdr.nexus);
11721         if (io->taskio.task_action == CTL_TASK_TARGET_RESET)
11722                 ua_type = CTL_UA_TARG_RESET;
11723         else
11724                 ua_type = CTL_UA_BUS_RESET;
11725         mtx_lock(&softc->ctl_lock);
11726         STAILQ_FOREACH(lun, &softc->lun_list, links) {
11727                 if (port != NULL &&
11728                     ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
11729                         continue;
11730                 ctl_do_lun_reset(lun, initidx, ua_type);
11731         }
11732         mtx_unlock(&softc->ctl_lock);
11733         io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
11734         return (0);
11735 }
11736
11737 /*
11738  * The LUN should always be set.  The I/O is optional, and is used to
11739  * distinguish between I/Os sent by this initiator, and by other
11740  * initiators.  We set unit attention for initiators other than this one.
11741  * SAM-3 is vague on this point.  It does say that a unit attention should
11742  * be established for other initiators when a LUN is reset (see section
11743  * 5.7.3), but it doesn't specifically say that the unit attention should
11744  * be established for this particular initiator when a LUN is reset.  Here
11745  * is the relevant text, from SAM-3 rev 8:
11746  *
11747  * 5.7.2 When a SCSI initiator port aborts its own tasks
11748  *
11749  * When a SCSI initiator port causes its own task(s) to be aborted, no
11750  * notification that the task(s) have been aborted shall be returned to
11751  * the SCSI initiator port other than the completion response for the
11752  * command or task management function action that caused the task(s) to
11753  * be aborted and notification(s) associated with related effects of the
11754  * action (e.g., a reset unit attention condition).
11755  *
11756  * XXX KDM for now, we're setting unit attention for all initiators.
11757  */
11758 static void
11759 ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type)
11760 {
11761         union ctl_io *xio;
11762         int i;
11763
11764         mtx_lock(&lun->lun_lock);
11765         /* Abort tasks. */
11766         for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
11767              xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
11768                 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS;
11769                 ctl_try_unblock_io(lun, xio, FALSE);
11770         }
11771         /* Clear CA. */
11772         for (i = 0; i < ctl_max_ports; i++) {
11773                 free(lun->pending_sense[i], M_CTL);
11774                 lun->pending_sense[i] = NULL;
11775         }
11776         /* Clear reservation. */
11777         lun->flags &= ~CTL_LUN_RESERVED;
11778         /* Clear prevent media removal. */
11779         if (lun->prevent) {
11780                 for (i = 0; i < CTL_MAX_INITIATORS; i++)
11781                         ctl_clear_mask(lun->prevent, i);
11782                 lun->prevent_count = 0;
11783         }
11784         /* Clear TPC status */
11785         ctl_tpc_lun_clear(lun, -1);
11786         /* Establish UA. */
11787 #if 0
11788         ctl_est_ua_all(lun, initidx, ua_type);
11789 #else
11790         ctl_est_ua_all(lun, -1, ua_type);
11791 #endif
11792         mtx_unlock(&lun->lun_lock);
11793 }
11794
11795 static int
11796 ctl_lun_reset(union ctl_io *io)
11797 {
11798         struct ctl_softc *softc = CTL_SOFTC(io);
11799         struct ctl_lun *lun;
11800         uint32_t targ_lun, initidx;
11801
11802         targ_lun = io->io_hdr.nexus.targ_mapped_lun;
11803         initidx = ctl_get_initindex(&io->io_hdr.nexus);
11804         mtx_lock(&softc->ctl_lock);
11805         if (targ_lun >= ctl_max_luns ||
11806             (lun = softc->ctl_luns[targ_lun]) == NULL) {
11807                 mtx_unlock(&softc->ctl_lock);
11808                 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
11809                 return (1);
11810         }
11811         ctl_do_lun_reset(lun, initidx, CTL_UA_LUN_RESET);
11812         mtx_unlock(&softc->ctl_lock);
11813         io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
11814
11815         if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) {
11816                 union ctl_ha_msg msg_info;
11817
11818                 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
11819                 msg_info.hdr.nexus = io->io_hdr.nexus;
11820                 msg_info.task.task_action = CTL_TASK_LUN_RESET;
11821                 msg_info.hdr.original_sc = NULL;
11822                 msg_info.hdr.serializing_sc = NULL;
11823                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
11824                     sizeof(msg_info.task), M_WAITOK);
11825         }
11826         return (0);
11827 }
11828
11829 static void
11830 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id,
11831     int other_sc)
11832 {
11833         union ctl_io *xio;
11834
11835         mtx_assert(&lun->lun_lock, MA_OWNED);
11836
11837         /*
11838          * Run through the OOA queue and attempt to find the given I/O.
11839          * The target port, initiator ID, tag type and tag number have to
11840          * match the values that we got from the initiator.  If we have an
11841          * untagged command to abort, simply abort the first untagged command
11842          * we come to.  We only allow one untagged command at a time of course.
11843          */
11844         for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
11845              xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
11846
11847                 if ((targ_port == UINT32_MAX ||
11848                      targ_port == xio->io_hdr.nexus.targ_port) &&
11849                     (init_id == UINT32_MAX ||
11850                      init_id == xio->io_hdr.nexus.initid)) {
11851                         if (targ_port != xio->io_hdr.nexus.targ_port ||
11852                             init_id != xio->io_hdr.nexus.initid)
11853                                 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS;
11854                         xio->io_hdr.flags |= CTL_FLAG_ABORT;
11855                         if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) {
11856                                 union ctl_ha_msg msg_info;
11857
11858                                 msg_info.hdr.nexus = xio->io_hdr.nexus;
11859                                 msg_info.task.task_action = CTL_TASK_ABORT_TASK;
11860                                 msg_info.task.tag_num = xio->scsiio.tag_num;
11861                                 msg_info.task.tag_type = xio->scsiio.tag_type;
11862                                 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
11863                                 msg_info.hdr.original_sc = NULL;
11864                                 msg_info.hdr.serializing_sc = NULL;
11865                                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
11866                                     sizeof(msg_info.task), M_NOWAIT);
11867                         }
11868                         ctl_try_unblock_io(lun, xio, FALSE);
11869                 }
11870         }
11871 }
11872
11873 static int
11874 ctl_abort_task_set(union ctl_io *io)
11875 {
11876         struct ctl_softc *softc = CTL_SOFTC(io);
11877         struct ctl_lun *lun;
11878         uint32_t targ_lun;
11879
11880         /*
11881          * Look up the LUN.
11882          */
11883         targ_lun = io->io_hdr.nexus.targ_mapped_lun;
11884         mtx_lock(&softc->ctl_lock);
11885         if (targ_lun >= ctl_max_luns ||
11886             (lun = softc->ctl_luns[targ_lun]) == NULL) {
11887                 mtx_unlock(&softc->ctl_lock);
11888                 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
11889                 return (1);
11890         }
11891
11892         mtx_lock(&lun->lun_lock);
11893         mtx_unlock(&softc->ctl_lock);
11894         if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) {
11895                 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port,
11896                     io->io_hdr.nexus.initid,
11897                     (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
11898         } else { /* CTL_TASK_CLEAR_TASK_SET */
11899                 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX,
11900                     (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
11901         }
11902         mtx_unlock(&lun->lun_lock);
11903         io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
11904         return (0);
11905 }
11906
11907 static void
11908 ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx,
11909     ctl_ua_type ua_type)
11910 {
11911         struct ctl_lun *lun;
11912         struct scsi_sense_data *ps;
11913         uint32_t p, i;
11914
11915         p = initidx / CTL_MAX_INIT_PER_PORT;
11916         i = initidx % CTL_MAX_INIT_PER_PORT;
11917         mtx_lock(&softc->ctl_lock);
11918         STAILQ_FOREACH(lun, &softc->lun_list, links) {
11919                 mtx_lock(&lun->lun_lock);
11920                 /* Abort tasks. */
11921                 ctl_abort_tasks_lun(lun, p, i, 1);
11922                 /* Clear CA. */
11923                 ps = lun->pending_sense[p];
11924                 if (ps != NULL)
11925                         ps[i].error_code = 0;
11926                 /* Clear reservation. */
11927                 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx))
11928                         lun->flags &= ~CTL_LUN_RESERVED;
11929                 /* Clear prevent media removal. */
11930                 if (lun->prevent && ctl_is_set(lun->prevent, initidx)) {
11931                         ctl_clear_mask(lun->prevent, initidx);
11932                         lun->prevent_count--;
11933                 }
11934                 /* Clear TPC status */
11935                 ctl_tpc_lun_clear(lun, initidx);
11936                 /* Establish UA. */
11937                 ctl_est_ua(lun, initidx, ua_type);
11938                 mtx_unlock(&lun->lun_lock);
11939         }
11940         mtx_unlock(&softc->ctl_lock);
11941 }
11942
11943 static int
11944 ctl_i_t_nexus_reset(union ctl_io *io)
11945 {
11946         struct ctl_softc *softc = CTL_SOFTC(io);
11947         uint32_t initidx;
11948
11949         if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
11950                 union ctl_ha_msg msg_info;
11951
11952                 msg_info.hdr.nexus = io->io_hdr.nexus;
11953                 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET;
11954                 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
11955                 msg_info.hdr.original_sc = NULL;
11956                 msg_info.hdr.serializing_sc = NULL;
11957                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
11958                     sizeof(msg_info.task), M_WAITOK);
11959         }
11960
11961         initidx = ctl_get_initindex(&io->io_hdr.nexus);
11962         ctl_i_t_nexus_loss(softc, initidx, CTL_UA_I_T_NEXUS_LOSS);
11963         io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
11964         return (0);
11965 }
11966
11967 static int
11968 ctl_abort_task(union ctl_io *io)
11969 {
11970         struct ctl_softc *softc = CTL_SOFTC(io);
11971         union ctl_io *xio;
11972         struct ctl_lun *lun;
11973         uint32_t targ_lun;
11974
11975         /*
11976          * Look up the LUN.
11977          */
11978         targ_lun = io->io_hdr.nexus.targ_mapped_lun;
11979         mtx_lock(&softc->ctl_lock);
11980         if (targ_lun >= ctl_max_luns ||
11981             (lun = softc->ctl_luns[targ_lun]) == NULL) {
11982                 mtx_unlock(&softc->ctl_lock);
11983                 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
11984                 return (1);
11985         }
11986
11987         mtx_lock(&lun->lun_lock);
11988         mtx_unlock(&softc->ctl_lock);
11989         /*
11990          * Run through the OOA queue and attempt to find the given I/O.
11991          * The target port, initiator ID, tag type and tag number have to
11992          * match the values that we got from the initiator.  If we have an
11993          * untagged command to abort, simply abort the first untagged command
11994          * we come to.  We only allow one untagged command at a time of course.
11995          */
11996         for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
11997              xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
11998
11999                 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port)
12000                  || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid)
12001                  || (xio->io_hdr.flags & CTL_FLAG_ABORT))
12002                         continue;
12003
12004                 /*
12005                  * If the abort says that the task is untagged, the
12006                  * task in the queue must be untagged.  Otherwise,
12007                  * we just check to see whether the tag numbers
12008                  * match.  This is because the QLogic firmware
12009                  * doesn't pass back the tag type in an abort
12010                  * request.
12011                  */
12012 #if 0
12013                 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED)
12014                   && (io->taskio.tag_type == CTL_TAG_UNTAGGED))
12015                  || (xio->scsiio.tag_num == io->taskio.tag_num)) {
12016 #else
12017                 /*
12018                  * XXX KDM we've got problems with FC, because it
12019                  * doesn't send down a tag type with aborts.  So we
12020                  * can only really go by the tag number...
12021                  * This may cause problems with parallel SCSI.
12022                  * Need to figure that out!!
12023                  */
12024                 if (xio->scsiio.tag_num == io->taskio.tag_num) {
12025 #endif
12026                         xio->io_hdr.flags |= CTL_FLAG_ABORT;
12027                         if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 &&
12028                             !(lun->flags & CTL_LUN_PRIMARY_SC)) {
12029                                 union ctl_ha_msg msg_info;
12030
12031                                 msg_info.hdr.nexus = io->io_hdr.nexus;
12032                                 msg_info.task.task_action = CTL_TASK_ABORT_TASK;
12033                                 msg_info.task.tag_num = io->taskio.tag_num;
12034                                 msg_info.task.tag_type = io->taskio.tag_type;
12035                                 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
12036                                 msg_info.hdr.original_sc = NULL;
12037                                 msg_info.hdr.serializing_sc = NULL;
12038                                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
12039                                     sizeof(msg_info.task), M_NOWAIT);
12040                         }
12041                         ctl_try_unblock_io(lun, xio, FALSE);
12042                 }
12043         }
12044         mtx_unlock(&lun->lun_lock);
12045         io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12046         return (0);
12047 }
12048
12049 static int
12050 ctl_query_task(union ctl_io *io, int task_set)
12051 {
12052         struct ctl_softc *softc = CTL_SOFTC(io);
12053         union ctl_io *xio;
12054         struct ctl_lun *lun;
12055         int found = 0;
12056         uint32_t targ_lun;
12057
12058         targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12059         mtx_lock(&softc->ctl_lock);
12060         if (targ_lun >= ctl_max_luns ||
12061             (lun = softc->ctl_luns[targ_lun]) == NULL) {
12062                 mtx_unlock(&softc->ctl_lock);
12063                 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
12064                 return (1);
12065         }
12066         mtx_lock(&lun->lun_lock);
12067         mtx_unlock(&softc->ctl_lock);
12068         for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
12069              xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
12070
12071                 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port)
12072                  || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid)
12073                  || (xio->io_hdr.flags & CTL_FLAG_ABORT))
12074                         continue;
12075
12076                 if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) {
12077                         found = 1;
12078                         break;
12079                 }
12080         }
12081         mtx_unlock(&lun->lun_lock);
12082         if (found)
12083                 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED;
12084         else
12085                 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12086         return (0);
12087 }
12088
12089 static int
12090 ctl_query_async_event(union ctl_io *io)
12091 {
12092         struct ctl_softc *softc = CTL_SOFTC(io);
12093         struct ctl_lun *lun;
12094         ctl_ua_type ua;
12095         uint32_t targ_lun, initidx;
12096
12097         targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12098         mtx_lock(&softc->ctl_lock);
12099         if (targ_lun >= ctl_max_luns ||
12100             (lun = softc->ctl_luns[targ_lun]) == NULL) {
12101                 mtx_unlock(&softc->ctl_lock);
12102                 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
12103                 return (1);
12104         }
12105         mtx_lock(&lun->lun_lock);
12106         mtx_unlock(&softc->ctl_lock);
12107         initidx = ctl_get_initindex(&io->io_hdr.nexus);
12108         ua = ctl_build_qae(lun, initidx, io->taskio.task_resp);
12109         mtx_unlock(&lun->lun_lock);
12110         if (ua != CTL_UA_NONE)
12111                 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED;
12112         else
12113                 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12114         return (0);
12115 }
12116
12117 static void
12118 ctl_run_task(union ctl_io *io)
12119 {
12120         int retval = 1;
12121
12122         CTL_DEBUG_PRINT(("ctl_run_task\n"));
12123         KASSERT(io->io_hdr.io_type == CTL_IO_TASK,
12124             ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type));
12125         io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED;
12126         bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp));
12127         switch (io->taskio.task_action) {
12128         case CTL_TASK_ABORT_TASK:
12129                 retval = ctl_abort_task(io);
12130                 break;
12131         case CTL_TASK_ABORT_TASK_SET:
12132         case CTL_TASK_CLEAR_TASK_SET:
12133                 retval = ctl_abort_task_set(io);
12134                 break;
12135         case CTL_TASK_CLEAR_ACA:
12136                 break;
12137         case CTL_TASK_I_T_NEXUS_RESET:
12138                 retval = ctl_i_t_nexus_reset(io);
12139                 break;
12140         case CTL_TASK_LUN_RESET:
12141                 retval = ctl_lun_reset(io);
12142                 break;
12143         case CTL_TASK_TARGET_RESET:
12144         case CTL_TASK_BUS_RESET:
12145                 retval = ctl_target_reset(io);
12146                 break;
12147         case CTL_TASK_PORT_LOGIN:
12148                 break;
12149         case CTL_TASK_PORT_LOGOUT:
12150                 break;
12151         case CTL_TASK_QUERY_TASK:
12152                 retval = ctl_query_task(io, 0);
12153                 break;
12154         case CTL_TASK_QUERY_TASK_SET:
12155                 retval = ctl_query_task(io, 1);
12156                 break;
12157         case CTL_TASK_QUERY_ASYNC_EVENT:
12158                 retval = ctl_query_async_event(io);
12159                 break;
12160         default:
12161                 printf("%s: got unknown task management event %d\n",
12162                        __func__, io->taskio.task_action);
12163                 break;
12164         }
12165         if (retval == 0)
12166                 io->io_hdr.status = CTL_SUCCESS;
12167         else
12168                 io->io_hdr.status = CTL_ERROR;
12169         ctl_done(io);
12170 }
12171
12172 /*
12173  * For HA operation.  Handle commands that come in from the other
12174  * controller.
12175  */
12176 static void
12177 ctl_handle_isc(union ctl_io *io)
12178 {
12179         struct ctl_softc *softc = CTL_SOFTC(io);
12180         struct ctl_lun *lun;
12181         const struct ctl_cmd_entry *entry;
12182         uint32_t targ_lun;
12183
12184         targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12185         switch (io->io_hdr.msg_type) {
12186         case CTL_MSG_SERIALIZE:
12187                 ctl_serialize_other_sc_cmd(&io->scsiio);
12188                 break;
12189         case CTL_MSG_R2R:               /* Only used in SER_ONLY mode. */
12190                 entry = ctl_get_cmd_entry(&io->scsiio, NULL);
12191                 if (targ_lun >= ctl_max_luns ||
12192                     (lun = softc->ctl_luns[targ_lun]) == NULL) {
12193                         ctl_done(io);
12194                         break;
12195                 }
12196                 mtx_lock(&lun->lun_lock);
12197                 if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) {
12198                         mtx_unlock(&lun->lun_lock);
12199                         ctl_done(io);
12200                         break;
12201                 }
12202                 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
12203                 mtx_unlock(&lun->lun_lock);
12204                 ctl_enqueue_rtr(io);
12205                 break;
12206         case CTL_MSG_FINISH_IO:
12207                 if (softc->ha_mode == CTL_HA_MODE_XFER) {
12208                         ctl_done(io);
12209                         break;
12210                 }
12211                 if (targ_lun >= ctl_max_luns ||
12212                     (lun = softc->ctl_luns[targ_lun]) == NULL) {
12213                         ctl_free_io(io);
12214                         break;
12215                 }
12216                 mtx_lock(&lun->lun_lock);
12217                 ctl_try_unblock_others(lun, io, TRUE);
12218                 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
12219                 mtx_unlock(&lun->lun_lock);
12220                 ctl_free_io(io);
12221                 break;
12222         case CTL_MSG_PERS_ACTION:
12223                 ctl_hndl_per_res_out_on_other_sc(io);
12224                 ctl_free_io(io);
12225                 break;
12226         case CTL_MSG_BAD_JUJU:
12227                 ctl_done(io);
12228                 break;
12229         case CTL_MSG_DATAMOVE:          /* Only used in XFER mode */
12230                 ctl_datamove_remote(io);
12231                 break;
12232         case CTL_MSG_DATAMOVE_DONE:     /* Only used in XFER mode */
12233                 io->scsiio.be_move_done(io);
12234                 break;
12235         case CTL_MSG_FAILOVER:
12236                 ctl_failover_lun(io);
12237                 ctl_free_io(io);
12238                 break;
12239         default:
12240                 printf("%s: Invalid message type %d\n",
12241                        __func__, io->io_hdr.msg_type);
12242                 ctl_free_io(io);
12243                 break;
12244         }
12245
12246 }
12247
12248
12249 /*
12250  * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if
12251  * there is no match.
12252  */
12253 static ctl_lun_error_pattern
12254 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc)
12255 {
12256         const struct ctl_cmd_entry *entry;
12257         ctl_lun_error_pattern filtered_pattern, pattern;
12258
12259         pattern = desc->error_pattern;
12260
12261         /*
12262          * XXX KDM we need more data passed into this function to match a
12263          * custom pattern, and we actually need to implement custom pattern
12264          * matching.
12265          */
12266         if (pattern & CTL_LUN_PAT_CMD)
12267                 return (CTL_LUN_PAT_CMD);
12268
12269         if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY)
12270                 return (CTL_LUN_PAT_ANY);
12271
12272         entry = ctl_get_cmd_entry(ctsio, NULL);
12273
12274         filtered_pattern = entry->pattern & pattern;
12275
12276         /*
12277          * If the user requested specific flags in the pattern (e.g.
12278          * CTL_LUN_PAT_RANGE), make sure the command supports all of those
12279          * flags.
12280          *
12281          * If the user did not specify any flags, it doesn't matter whether
12282          * or not the command supports the flags.
12283          */
12284         if ((filtered_pattern & ~CTL_LUN_PAT_MASK) !=
12285              (pattern & ~CTL_LUN_PAT_MASK))
12286                 return (CTL_LUN_PAT_NONE);
12287
12288         /*
12289          * If the user asked for a range check, see if the requested LBA
12290          * range overlaps with this command's LBA range.
12291          */
12292         if (filtered_pattern & CTL_LUN_PAT_RANGE) {
12293                 uint64_t lba1;
12294                 uint64_t len1;
12295                 ctl_action action;
12296                 int retval;
12297
12298                 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1);
12299                 if (retval != 0)
12300                         return (CTL_LUN_PAT_NONE);
12301
12302                 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba,
12303                                               desc->lba_range.len, FALSE);
12304                 /*
12305                  * A "pass" means that the LBA ranges don't overlap, so
12306                  * this doesn't match the user's range criteria.
12307                  */
12308                 if (action == CTL_ACTION_PASS)
12309                         return (CTL_LUN_PAT_NONE);
12310         }
12311
12312         return (filtered_pattern);
12313 }
12314
12315 static void
12316 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io)
12317 {
12318         struct ctl_error_desc *desc, *desc2;
12319
12320         mtx_assert(&lun->lun_lock, MA_OWNED);
12321
12322         STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
12323                 ctl_lun_error_pattern pattern;
12324                 /*
12325                  * Check to see whether this particular command matches
12326                  * the pattern in the descriptor.
12327                  */
12328                 pattern = ctl_cmd_pattern_match(&io->scsiio, desc);
12329                 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE)
12330                         continue;
12331
12332                 switch (desc->lun_error & CTL_LUN_INJ_TYPE) {
12333                 case CTL_LUN_INJ_ABORTED:
12334                         ctl_set_aborted(&io->scsiio);
12335                         break;
12336                 case CTL_LUN_INJ_MEDIUM_ERR:
12337                         ctl_set_medium_error(&io->scsiio,
12338                             (io->io_hdr.flags & CTL_FLAG_DATA_MASK) !=
12339                              CTL_FLAG_DATA_OUT);
12340                         break;
12341                 case CTL_LUN_INJ_UA:
12342                         /* 29h/00h  POWER ON, RESET, OR BUS DEVICE RESET
12343                          * OCCURRED */
12344                         ctl_set_ua(&io->scsiio, 0x29, 0x00);
12345                         break;
12346                 case CTL_LUN_INJ_CUSTOM:
12347                         /*
12348                          * We're assuming the user knows what he is doing.
12349                          * Just copy the sense information without doing
12350                          * checks.
12351                          */
12352                         bcopy(&desc->custom_sense, &io->scsiio.sense_data,
12353                               MIN(sizeof(desc->custom_sense),
12354                                   sizeof(io->scsiio.sense_data)));
12355                         io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND;
12356                         io->scsiio.sense_len = SSD_FULL_SIZE;
12357                         io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
12358                         break;
12359                 case CTL_LUN_INJ_NONE:
12360                 default:
12361                         /*
12362                          * If this is an error injection type we don't know
12363                          * about, clear the continuous flag (if it is set)
12364                          * so it will get deleted below.
12365                          */
12366                         desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS;
12367                         break;
12368                 }
12369                 /*
12370                  * By default, each error injection action is a one-shot
12371                  */
12372                 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS)
12373                         continue;
12374
12375                 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links);
12376
12377                 free(desc, M_CTL);
12378         }
12379 }
12380
12381 #ifdef CTL_IO_DELAY
12382 static void
12383 ctl_datamove_timer_wakeup(void *arg)
12384 {
12385         union ctl_io *io;
12386
12387         io = (union ctl_io *)arg;
12388
12389         ctl_datamove(io);
12390 }
12391 #endif /* CTL_IO_DELAY */
12392
12393 void
12394 ctl_datamove(union ctl_io *io)
12395 {
12396         void (*fe_datamove)(union ctl_io *io);
12397
12398         mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED);
12399
12400         CTL_DEBUG_PRINT(("ctl_datamove\n"));
12401
12402         /* No data transferred yet.  Frontend must update this when done. */
12403         io->scsiio.kern_data_resid = io->scsiio.kern_data_len;
12404
12405 #ifdef CTL_TIME_IO
12406         if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
12407                 char str[256];
12408                 char path_str[64];
12409                 struct sbuf sb;
12410
12411                 ctl_scsi_path_string(io, path_str, sizeof(path_str));
12412                 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
12413
12414                 sbuf_cat(&sb, path_str);
12415                 switch (io->io_hdr.io_type) {
12416                 case CTL_IO_SCSI:
12417                         ctl_scsi_command_string(&io->scsiio, NULL, &sb);
12418                         sbuf_printf(&sb, "\n");
12419                         sbuf_cat(&sb, path_str);
12420                         sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
12421                                     io->scsiio.tag_num, io->scsiio.tag_type);
12422                         break;
12423                 case CTL_IO_TASK:
12424                         sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, "
12425                                     "Tag Type: %d\n", io->taskio.task_action,
12426                                     io->taskio.tag_num, io->taskio.tag_type);
12427                         break;
12428                 default:
12429                         panic("%s: Invalid CTL I/O type %d\n",
12430                             __func__, io->io_hdr.io_type);
12431                 }
12432                 sbuf_cat(&sb, path_str);
12433                 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n",
12434                             (intmax_t)time_uptime - io->io_hdr.start_time);
12435                 sbuf_finish(&sb);
12436                 printf("%s", sbuf_data(&sb));
12437         }
12438 #endif /* CTL_TIME_IO */
12439
12440 #ifdef CTL_IO_DELAY
12441         if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
12442                 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
12443         } else {
12444                 struct ctl_lun *lun;
12445
12446                 lun = CTL_LUN(io);
12447                 if ((lun != NULL)
12448                  && (lun->delay_info.datamove_delay > 0)) {
12449
12450                         callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1);
12451                         io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
12452                         callout_reset(&io->io_hdr.delay_callout,
12453                                       lun->delay_info.datamove_delay * hz,
12454                                       ctl_datamove_timer_wakeup, io);
12455                         if (lun->delay_info.datamove_type ==
12456                             CTL_DELAY_TYPE_ONESHOT)
12457                                 lun->delay_info.datamove_delay = 0;
12458                         return;
12459                 }
12460         }
12461 #endif
12462
12463         /*
12464          * This command has been aborted.  Set the port status, so we fail
12465          * the data move.
12466          */
12467         if (io->io_hdr.flags & CTL_FLAG_ABORT) {
12468                 printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n",
12469                        io->scsiio.tag_num, io->io_hdr.nexus.initid,
12470                        io->io_hdr.nexus.targ_port,
12471                        io->io_hdr.nexus.targ_lun);
12472                 io->io_hdr.port_status = 31337;
12473                 /*
12474                  * Note that the backend, in this case, will get the
12475                  * callback in its context.  In other cases it may get
12476                  * called in the frontend's interrupt thread context.
12477                  */
12478                 io->scsiio.be_move_done(io);
12479                 return;
12480         }
12481
12482         /* Don't confuse frontend with zero length data move. */
12483         if (io->scsiio.kern_data_len == 0) {
12484                 io->scsiio.be_move_done(io);
12485                 return;
12486         }
12487
12488         fe_datamove = CTL_PORT(io)->fe_datamove;
12489         fe_datamove(io);
12490 }
12491
12492 static void
12493 ctl_send_datamove_done(union ctl_io *io, int have_lock)
12494 {
12495         union ctl_ha_msg msg;
12496 #ifdef CTL_TIME_IO
12497         struct bintime cur_bt;
12498 #endif
12499
12500         memset(&msg, 0, sizeof(msg));
12501         msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
12502         msg.hdr.original_sc = io;
12503         msg.hdr.serializing_sc = io->io_hdr.remote_io;
12504         msg.hdr.nexus = io->io_hdr.nexus;
12505         msg.hdr.status = io->io_hdr.status;
12506         msg.scsi.kern_data_resid = io->scsiio.kern_data_resid;
12507         msg.scsi.tag_num = io->scsiio.tag_num;
12508         msg.scsi.tag_type = io->scsiio.tag_type;
12509         msg.scsi.scsi_status = io->scsiio.scsi_status;
12510         memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
12511                io->scsiio.sense_len);
12512         msg.scsi.sense_len = io->scsiio.sense_len;
12513         msg.scsi.port_status = io->io_hdr.port_status;
12514         io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
12515         if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
12516                 ctl_failover_io(io, /*have_lock*/ have_lock);
12517                 return;
12518         }
12519         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
12520             sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) +
12521             msg.scsi.sense_len, M_WAITOK);
12522
12523 #ifdef CTL_TIME_IO
12524         getbinuptime(&cur_bt);
12525         bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
12526         bintime_add(&io->io_hdr.dma_bt, &cur_bt);
12527 #endif
12528         io->io_hdr.num_dmas++;
12529 }
12530
12531 /*
12532  * The DMA to the remote side is done, now we need to tell the other side
12533  * we're done so it can continue with its data movement.
12534  */
12535 static void
12536 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq)
12537 {
12538         union ctl_io *io;
12539         uint32_t i;
12540
12541         io = rq->context;
12542
12543         if (rq->ret != CTL_HA_STATUS_SUCCESS) {
12544                 printf("%s: ISC DMA write failed with error %d", __func__,
12545                        rq->ret);
12546                 ctl_set_internal_failure(&io->scsiio,
12547                                          /*sks_valid*/ 1,
12548                                          /*retry_count*/ rq->ret);
12549         }
12550
12551         ctl_dt_req_free(rq);
12552
12553         for (i = 0; i < io->scsiio.kern_sg_entries; i++)
12554                 free(CTL_LSGLT(io)[i].addr, M_CTL);
12555         free(CTL_RSGL(io), M_CTL);
12556         CTL_RSGL(io) = NULL;
12557         CTL_LSGL(io) = NULL;
12558
12559         /*
12560          * The data is in local and remote memory, so now we need to send
12561          * status (good or back) back to the other side.
12562          */
12563         ctl_send_datamove_done(io, /*have_lock*/ 0);
12564 }
12565
12566 /*
12567  * We've moved the data from the host/controller into local memory.  Now we
12568  * need to push it over to the remote controller's memory.
12569  */
12570 static int
12571 ctl_datamove_remote_dm_write_cb(union ctl_io *io)
12572 {
12573         int retval;
12574
12575         retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE,
12576                                           ctl_datamove_remote_write_cb);
12577         return (retval);
12578 }
12579
12580 static void
12581 ctl_datamove_remote_write(union ctl_io *io)
12582 {
12583         int retval;
12584         void (*fe_datamove)(union ctl_io *io);
12585
12586         /*
12587          * - Get the data from the host/HBA into local memory.
12588          * - DMA memory from the local controller to the remote controller.
12589          * - Send status back to the remote controller.
12590          */
12591
12592         retval = ctl_datamove_remote_sgl_setup(io);
12593         if (retval != 0)
12594                 return;
12595
12596         /* Switch the pointer over so the FETD knows what to do */
12597         io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io);
12598
12599         /*
12600          * Use a custom move done callback, since we need to send completion
12601          * back to the other controller, not to the backend on this side.
12602          */
12603         io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb;
12604
12605         fe_datamove = CTL_PORT(io)->fe_datamove;
12606         fe_datamove(io);
12607 }
12608
12609 static int
12610 ctl_datamove_remote_dm_read_cb(union ctl_io *io)
12611 {
12612         uint32_t i;
12613
12614         for (i = 0; i < io->scsiio.kern_sg_entries; i++)
12615                 free(CTL_LSGLT(io)[i].addr, M_CTL);
12616         free(CTL_RSGL(io), M_CTL);
12617         CTL_RSGL(io) = NULL;
12618         CTL_LSGL(io) = NULL;
12619
12620         /*
12621          * The read is done, now we need to send status (good or bad) back
12622          * to the other side.
12623          */
12624         ctl_send_datamove_done(io, /*have_lock*/ 0);
12625
12626         return (0);
12627 }
12628
12629 static void
12630 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq)
12631 {
12632         union ctl_io *io;
12633         void (*fe_datamove)(union ctl_io *io);
12634
12635         io = rq->context;
12636
12637         if (rq->ret != CTL_HA_STATUS_SUCCESS) {
12638                 printf("%s: ISC DMA read failed with error %d\n", __func__,
12639                        rq->ret);
12640                 ctl_set_internal_failure(&io->scsiio,
12641                                          /*sks_valid*/ 1,
12642                                          /*retry_count*/ rq->ret);
12643         }
12644
12645         ctl_dt_req_free(rq);
12646
12647         /* Switch the pointer over so the FETD knows what to do */
12648         io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io);
12649
12650         /*
12651          * Use a custom move done callback, since we need to send completion
12652          * back to the other controller, not to the backend on this side.
12653          */
12654         io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb;
12655
12656         /* XXX KDM add checks like the ones in ctl_datamove? */
12657
12658         fe_datamove = CTL_PORT(io)->fe_datamove;
12659         fe_datamove(io);
12660 }
12661
12662 static int
12663 ctl_datamove_remote_sgl_setup(union ctl_io *io)
12664 {
12665         struct ctl_sg_entry *local_sglist;
12666         uint32_t len_to_go;
12667         int retval;
12668         int i;
12669
12670         retval = 0;
12671         local_sglist = CTL_LSGL(io);
12672         len_to_go = io->scsiio.kern_data_len;
12673
12674         /*
12675          * The difficult thing here is that the size of the various
12676          * S/G segments may be different than the size from the
12677          * remote controller.  That'll make it harder when DMAing
12678          * the data back to the other side.
12679          */
12680         for (i = 0; len_to_go > 0; i++) {
12681                 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT);
12682                 local_sglist[i].addr =
12683                     malloc(local_sglist[i].len, M_CTL, M_WAITOK);
12684
12685                 len_to_go -= local_sglist[i].len;
12686         }
12687         /*
12688          * Reset the number of S/G entries accordingly.  The original
12689          * number of S/G entries is available in rem_sg_entries.
12690          */
12691         io->scsiio.kern_sg_entries = i;
12692
12693         return (retval);
12694 }
12695
12696 static int
12697 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
12698                          ctl_ha_dt_cb callback)
12699 {
12700         struct ctl_ha_dt_req *rq;
12701         struct ctl_sg_entry *remote_sglist, *local_sglist;
12702         uint32_t local_used, remote_used, total_used;
12703         int i, j, isc_ret;
12704
12705         rq = ctl_dt_req_alloc();
12706
12707         /*
12708          * If we failed to allocate the request, and if the DMA didn't fail
12709          * anyway, set busy status.  This is just a resource allocation
12710          * failure.
12711          */
12712         if ((rq == NULL)
12713          && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
12714              (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS))
12715                 ctl_set_busy(&io->scsiio);
12716
12717         if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
12718             (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
12719
12720                 if (rq != NULL)
12721                         ctl_dt_req_free(rq);
12722
12723                 /*
12724                  * The data move failed.  We need to return status back
12725                  * to the other controller.  No point in trying to DMA
12726                  * data to the remote controller.
12727                  */
12728
12729                 ctl_send_datamove_done(io, /*have_lock*/ 0);
12730
12731                 return (1);
12732         }
12733
12734         local_sglist = CTL_LSGL(io);
12735         remote_sglist = CTL_RSGL(io);
12736         local_used = 0;
12737         remote_used = 0;
12738         total_used = 0;
12739
12740         /*
12741          * Pull/push the data over the wire from/to the other controller.
12742          * This takes into account the possibility that the local and
12743          * remote sglists may not be identical in terms of the size of
12744          * the elements and the number of elements.
12745          *
12746          * One fundamental assumption here is that the length allocated for
12747          * both the local and remote sglists is identical.  Otherwise, we've
12748          * essentially got a coding error of some sort.
12749          */
12750         isc_ret = CTL_HA_STATUS_SUCCESS;
12751         for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) {
12752                 uint32_t cur_len;
12753                 uint8_t *tmp_ptr;
12754
12755                 rq->command = command;
12756                 rq->context = io;
12757
12758                 /*
12759                  * Both pointers should be aligned.  But it is possible
12760                  * that the allocation length is not.  They should both
12761                  * also have enough slack left over at the end, though,
12762                  * to round up to the next 8 byte boundary.
12763                  */
12764                 cur_len = MIN(local_sglist[i].len - local_used,
12765                               remote_sglist[j].len - remote_used);
12766                 rq->size = cur_len;
12767
12768                 tmp_ptr = (uint8_t *)local_sglist[i].addr;
12769                 tmp_ptr += local_used;
12770
12771 #if 0
12772                 /* Use physical addresses when talking to ISC hardware */
12773                 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) {
12774                         /* XXX KDM use busdma */
12775                         rq->local = vtophys(tmp_ptr);
12776                 } else
12777                         rq->local = tmp_ptr;
12778 #else
12779                 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0,
12780                     ("HA does not support BUS_ADDR"));
12781                 rq->local = tmp_ptr;
12782 #endif
12783
12784                 tmp_ptr = (uint8_t *)remote_sglist[j].addr;
12785                 tmp_ptr += remote_used;
12786                 rq->remote = tmp_ptr;
12787
12788                 rq->callback = NULL;
12789
12790                 local_used += cur_len;
12791                 if (local_used >= local_sglist[i].len) {
12792                         i++;
12793                         local_used = 0;
12794                 }
12795
12796                 remote_used += cur_len;
12797                 if (remote_used >= remote_sglist[j].len) {
12798                         j++;
12799                         remote_used = 0;
12800                 }
12801                 total_used += cur_len;
12802
12803                 if (total_used >= io->scsiio.kern_data_len)
12804                         rq->callback = callback;
12805
12806                 isc_ret = ctl_dt_single(rq);
12807                 if (isc_ret > CTL_HA_STATUS_SUCCESS)
12808                         break;
12809         }
12810         if (isc_ret != CTL_HA_STATUS_WAIT) {
12811                 rq->ret = isc_ret;
12812                 callback(rq);
12813         }
12814
12815         return (0);
12816 }
12817
12818 static void
12819 ctl_datamove_remote_read(union ctl_io *io)
12820 {
12821         int retval;
12822         uint32_t i;
12823
12824         /*
12825          * This will send an error to the other controller in the case of a
12826          * failure.
12827          */
12828         retval = ctl_datamove_remote_sgl_setup(io);
12829         if (retval != 0)
12830                 return;
12831
12832         retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ,
12833                                           ctl_datamove_remote_read_cb);
12834         if (retval != 0) {
12835                 /*
12836                  * Make sure we free memory if there was an error..  The
12837                  * ctl_datamove_remote_xfer() function will send the
12838                  * datamove done message, or call the callback with an
12839                  * error if there is a problem.
12840                  */
12841                 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
12842                         free(CTL_LSGLT(io)[i].addr, M_CTL);
12843                 free(CTL_RSGL(io), M_CTL);
12844                 CTL_RSGL(io) = NULL;
12845                 CTL_LSGL(io) = NULL;
12846         }
12847 }
12848
12849 /*
12850  * Process a datamove request from the other controller.  This is used for
12851  * XFER mode only, not SER_ONLY mode.  For writes, we DMA into local memory
12852  * first.  Once that is complete, the data gets DMAed into the remote
12853  * controller's memory.  For reads, we DMA from the remote controller's
12854  * memory into our memory first, and then move it out to the FETD.
12855  */
12856 static void
12857 ctl_datamove_remote(union ctl_io *io)
12858 {
12859
12860         mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED);
12861
12862         if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
12863                 ctl_failover_io(io, /*have_lock*/ 0);
12864                 return;
12865         }
12866
12867         /*
12868          * Note that we look for an aborted I/O here, but don't do some of
12869          * the other checks that ctl_datamove() normally does.
12870          * We don't need to run the datamove delay code, since that should
12871          * have been done if need be on the other controller.
12872          */
12873         if (io->io_hdr.flags & CTL_FLAG_ABORT) {
12874                 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__,
12875                        io->scsiio.tag_num, io->io_hdr.nexus.initid,
12876                        io->io_hdr.nexus.targ_port,
12877                        io->io_hdr.nexus.targ_lun);
12878                 io->io_hdr.port_status = 31338;
12879                 ctl_send_datamove_done(io, /*have_lock*/ 0);
12880                 return;
12881         }
12882
12883         if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT)
12884                 ctl_datamove_remote_write(io);
12885         else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN)
12886                 ctl_datamove_remote_read(io);
12887         else {
12888                 io->io_hdr.port_status = 31339;
12889                 ctl_send_datamove_done(io, /*have_lock*/ 0);
12890         }
12891 }
12892
12893 static void
12894 ctl_process_done(union ctl_io *io)
12895 {
12896         struct ctl_softc *softc = CTL_SOFTC(io);
12897         struct ctl_port *port = CTL_PORT(io);
12898         struct ctl_lun *lun = CTL_LUN(io);
12899         void (*fe_done)(union ctl_io *io);
12900         union ctl_ha_msg msg;
12901
12902         CTL_DEBUG_PRINT(("ctl_process_done\n"));
12903         fe_done = port->fe_done;
12904
12905 #ifdef CTL_TIME_IO
12906         if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
12907                 char str[256];
12908                 char path_str[64];
12909                 struct sbuf sb;
12910
12911                 ctl_scsi_path_string(io, path_str, sizeof(path_str));
12912                 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
12913
12914                 sbuf_cat(&sb, path_str);
12915                 switch (io->io_hdr.io_type) {
12916                 case CTL_IO_SCSI:
12917                         ctl_scsi_command_string(&io->scsiio, NULL, &sb);
12918                         sbuf_printf(&sb, "\n");
12919                         sbuf_cat(&sb, path_str);
12920                         sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
12921                                     io->scsiio.tag_num, io->scsiio.tag_type);
12922                         break;
12923                 case CTL_IO_TASK:
12924                         sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, "
12925                                     "Tag Type: %d\n", io->taskio.task_action,
12926                                     io->taskio.tag_num, io->taskio.tag_type);
12927                         break;
12928                 default:
12929                         panic("%s: Invalid CTL I/O type %d\n",
12930                             __func__, io->io_hdr.io_type);
12931                 }
12932                 sbuf_cat(&sb, path_str);
12933                 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n",
12934                             (intmax_t)time_uptime - io->io_hdr.start_time);
12935                 sbuf_finish(&sb);
12936                 printf("%s", sbuf_data(&sb));
12937         }
12938 #endif /* CTL_TIME_IO */
12939
12940         switch (io->io_hdr.io_type) {
12941         case CTL_IO_SCSI:
12942                 break;
12943         case CTL_IO_TASK:
12944                 if (ctl_debug & CTL_DEBUG_INFO)
12945                         ctl_io_error_print(io, NULL);
12946                 fe_done(io);
12947                 return;
12948         default:
12949                 panic("%s: Invalid CTL I/O type %d\n",
12950                     __func__, io->io_hdr.io_type);
12951         }
12952
12953         if (lun == NULL) {
12954                 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n",
12955                                  io->io_hdr.nexus.targ_mapped_lun));
12956                 goto bailout;
12957         }
12958
12959         mtx_lock(&lun->lun_lock);
12960
12961         /*
12962          * Check to see if we have any informational exception and status
12963          * of this command can be modified to report it in form of either
12964          * RECOVERED ERROR or NO SENSE, depending on MRIE mode page field.
12965          */
12966         if (lun->ie_reported == 0 && lun->ie_asc != 0 &&
12967             io->io_hdr.status == CTL_SUCCESS &&
12968             (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) {
12969                 uint8_t mrie = lun->MODE_IE.mrie;
12970                 uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) ||
12971                     (lun->MODE_VER.byte3 & SMS_VER_PER));
12972                 if (((mrie == SIEP_MRIE_REC_COND && per) ||
12973                      mrie == SIEP_MRIE_REC_UNCOND ||
12974                      mrie == SIEP_MRIE_NO_SENSE) &&
12975                     (ctl_get_cmd_entry(&io->scsiio, NULL)->flags &
12976                      CTL_CMD_FLAG_NO_SENSE) == 0) {
12977                         ctl_set_sense(&io->scsiio,
12978                               /*current_error*/ 1,
12979                               /*sense_key*/ (mrie == SIEP_MRIE_NO_SENSE) ?
12980                                 SSD_KEY_NO_SENSE : SSD_KEY_RECOVERED_ERROR,
12981                               /*asc*/ lun->ie_asc,
12982                               /*ascq*/ lun->ie_ascq,
12983                               SSD_ELEM_NONE);
12984                         lun->ie_reported = 1;
12985                 }
12986         } else if (lun->ie_reported < 0)
12987                 lun->ie_reported = 0;
12988
12989         /*
12990          * Check to see if we have any errors to inject here.  We only
12991          * inject errors for commands that don't already have errors set.
12992          */
12993         if (!STAILQ_EMPTY(&lun->error_list) &&
12994             ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) &&
12995             ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0))
12996                 ctl_inject_error(lun, io);
12997
12998         /*
12999          * XXX KDM how do we treat commands that aren't completed
13000          * successfully?
13001          *
13002          * XXX KDM should we also track I/O latency?
13003          */
13004         if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS &&
13005             io->io_hdr.io_type == CTL_IO_SCSI) {
13006                 int type;
13007 #ifdef CTL_TIME_IO
13008                 struct bintime bt;
13009
13010                 getbinuptime(&bt);
13011                 bintime_sub(&bt, &io->io_hdr.start_bt);
13012 #endif
13013                 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
13014                     CTL_FLAG_DATA_IN)
13015                         type = CTL_STATS_READ;
13016                 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
13017                     CTL_FLAG_DATA_OUT)
13018                         type = CTL_STATS_WRITE;
13019                 else
13020                         type = CTL_STATS_NO_IO;
13021
13022                 lun->stats.bytes[type] += io->scsiio.kern_total_len;
13023                 lun->stats.operations[type] ++;
13024                 lun->stats.dmas[type] += io->io_hdr.num_dmas;
13025 #ifdef CTL_TIME_IO
13026                 bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt);
13027                 bintime_add(&lun->stats.time[type], &bt);
13028 #endif
13029
13030                 mtx_lock(&port->port_lock);
13031                 port->stats.bytes[type] += io->scsiio.kern_total_len;
13032                 port->stats.operations[type] ++;
13033                 port->stats.dmas[type] += io->io_hdr.num_dmas;
13034 #ifdef CTL_TIME_IO
13035                 bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt);
13036                 bintime_add(&port->stats.time[type], &bt);
13037 #endif
13038                 mtx_unlock(&port->port_lock);
13039         }
13040
13041         /*
13042          * Run through the blocked queue of this I/O and see if anything
13043          * can be unblocked, now that this I/O is done and will be removed.
13044          * We need to do it before removal to have OOA position to start.
13045          */
13046         ctl_try_unblock_others(lun, io, TRUE);
13047
13048         /*
13049          * Remove this from the OOA queue.
13050          */
13051         TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
13052 #ifdef CTL_TIME_IO
13053         if (TAILQ_EMPTY(&lun->ooa_queue))
13054                 lun->last_busy = getsbinuptime();
13055 #endif
13056
13057         /*
13058          * If the LUN has been invalidated, free it if there is nothing
13059          * left on its OOA queue.
13060          */
13061         if ((lun->flags & CTL_LUN_INVALID)
13062          && TAILQ_EMPTY(&lun->ooa_queue)) {
13063                 mtx_unlock(&lun->lun_lock);
13064                 ctl_free_lun(lun);
13065         } else
13066                 mtx_unlock(&lun->lun_lock);
13067
13068 bailout:
13069
13070         /*
13071          * If this command has been aborted, make sure we set the status
13072          * properly.  The FETD is responsible for freeing the I/O and doing
13073          * whatever it needs to do to clean up its state.
13074          */
13075         if (io->io_hdr.flags & CTL_FLAG_ABORT)
13076                 ctl_set_task_aborted(&io->scsiio);
13077
13078         /*
13079          * If enabled, print command error status.
13080          */
13081         if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS &&
13082             (ctl_debug & CTL_DEBUG_INFO) != 0)
13083                 ctl_io_error_print(io, NULL);
13084
13085         /*
13086          * Tell the FETD or the other shelf controller we're done with this
13087          * command.  Note that only SCSI commands get to this point.  Task
13088          * management commands are completed above.
13089          */
13090         if ((softc->ha_mode != CTL_HA_MODE_XFER) &&
13091             (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) {
13092                 memset(&msg, 0, sizeof(msg));
13093                 msg.hdr.msg_type = CTL_MSG_FINISH_IO;
13094                 msg.hdr.serializing_sc = io->io_hdr.remote_io;
13095                 msg.hdr.nexus = io->io_hdr.nexus;
13096                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
13097                     sizeof(msg.scsi) - sizeof(msg.scsi.sense_data),
13098                     M_WAITOK);
13099         }
13100
13101         fe_done(io);
13102 }
13103
13104 /*
13105  * Front end should call this if it doesn't do autosense.  When the request
13106  * sense comes back in from the initiator, we'll dequeue this and send it.
13107  */
13108 int
13109 ctl_queue_sense(union ctl_io *io)
13110 {
13111         struct ctl_softc *softc = CTL_SOFTC(io);
13112         struct ctl_port *port = CTL_PORT(io);
13113         struct ctl_lun *lun;
13114         struct scsi_sense_data *ps;
13115         uint32_t initidx, p, targ_lun;
13116
13117         CTL_DEBUG_PRINT(("ctl_queue_sense\n"));
13118
13119         targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
13120
13121         /*
13122          * LUN lookup will likely move to the ctl_work_thread() once we
13123          * have our new queueing infrastructure (that doesn't put things on
13124          * a per-LUN queue initially).  That is so that we can handle
13125          * things like an INQUIRY to a LUN that we don't have enabled.  We
13126          * can't deal with that right now.
13127          * If we don't have a LUN for this, just toss the sense information.
13128          */
13129         mtx_lock(&softc->ctl_lock);
13130         if (targ_lun >= ctl_max_luns ||
13131             (lun = softc->ctl_luns[targ_lun]) == NULL) {
13132                 mtx_unlock(&softc->ctl_lock);
13133                 goto bailout;
13134         }
13135         mtx_lock(&lun->lun_lock);
13136         mtx_unlock(&softc->ctl_lock);
13137
13138         initidx = ctl_get_initindex(&io->io_hdr.nexus);
13139         p = initidx / CTL_MAX_INIT_PER_PORT;
13140         if (lun->pending_sense[p] == NULL) {
13141                 lun->pending_sense[p] = malloc(sizeof(*ps) * CTL_MAX_INIT_PER_PORT,
13142                     M_CTL, M_NOWAIT | M_ZERO);
13143         }
13144         if ((ps = lun->pending_sense[p]) != NULL) {
13145                 ps += initidx % CTL_MAX_INIT_PER_PORT;
13146                 memset(ps, 0, sizeof(*ps));
13147                 memcpy(ps, &io->scsiio.sense_data, io->scsiio.sense_len);
13148         }
13149         mtx_unlock(&lun->lun_lock);
13150
13151 bailout:
13152         ctl_free_io(io);
13153         return (CTL_RETVAL_COMPLETE);
13154 }
13155
13156 /*
13157  * Primary command inlet from frontend ports.  All SCSI and task I/O
13158  * requests must go through this function.
13159  */
13160 int
13161 ctl_queue(union ctl_io *io)
13162 {
13163         struct ctl_port *port = CTL_PORT(io);
13164
13165         CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0]));
13166
13167 #ifdef CTL_TIME_IO
13168         io->io_hdr.start_time = time_uptime;
13169         getbinuptime(&io->io_hdr.start_bt);
13170 #endif /* CTL_TIME_IO */
13171
13172         /* Map FE-specific LUN ID into global one. */
13173         io->io_hdr.nexus.targ_mapped_lun =
13174             ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
13175
13176         switch (io->io_hdr.io_type) {
13177         case CTL_IO_SCSI:
13178         case CTL_IO_TASK:
13179                 if (ctl_debug & CTL_DEBUG_CDB)
13180                         ctl_io_print(io);
13181                 ctl_enqueue_incoming(io);
13182                 break;
13183         default:
13184                 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type);
13185                 return (EINVAL);
13186         }
13187
13188         return (CTL_RETVAL_COMPLETE);
13189 }
13190
13191 #ifdef CTL_IO_DELAY
13192 static void
13193 ctl_done_timer_wakeup(void *arg)
13194 {
13195         union ctl_io *io;
13196
13197         io = (union ctl_io *)arg;
13198         ctl_done(io);
13199 }
13200 #endif /* CTL_IO_DELAY */
13201
13202 void
13203 ctl_serseq_done(union ctl_io *io)
13204 {
13205         struct ctl_lun *lun = CTL_LUN(io);
13206
13207         if (lun->be_lun == NULL ||
13208             lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF)
13209                 return;
13210         mtx_lock(&lun->lun_lock);
13211         io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE;
13212         ctl_try_unblock_others(lun, io, FALSE);
13213         mtx_unlock(&lun->lun_lock);
13214 }
13215
13216 void
13217 ctl_done(union ctl_io *io)
13218 {
13219
13220         /*
13221          * Enable this to catch duplicate completion issues.
13222          */
13223 #if 0
13224         if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) {
13225                 printf("%s: type %d msg %d cdb %x iptl: "
13226                        "%u:%u:%u tag 0x%04x "
13227                        "flag %#x status %x\n",
13228                         __func__,
13229                         io->io_hdr.io_type,
13230                         io->io_hdr.msg_type,
13231                         io->scsiio.cdb[0],
13232                         io->io_hdr.nexus.initid,
13233                         io->io_hdr.nexus.targ_port,
13234                         io->io_hdr.nexus.targ_lun,
13235                         (io->io_hdr.io_type ==
13236                         CTL_IO_TASK) ?
13237                         io->taskio.tag_num :
13238                         io->scsiio.tag_num,
13239                         io->io_hdr.flags,
13240                         io->io_hdr.status);
13241         } else
13242                 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE;
13243 #endif
13244
13245         /*
13246          * This is an internal copy of an I/O, and should not go through
13247          * the normal done processing logic.
13248          */
13249         if (io->io_hdr.flags & CTL_FLAG_INT_COPY)
13250                 return;
13251
13252 #ifdef CTL_IO_DELAY
13253         if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
13254                 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
13255         } else {
13256                 struct ctl_lun *lun = CTL_LUN(io);
13257
13258                 if ((lun != NULL)
13259                  && (lun->delay_info.done_delay > 0)) {
13260
13261                         callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1);
13262                         io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
13263                         callout_reset(&io->io_hdr.delay_callout,
13264                                       lun->delay_info.done_delay * hz,
13265                                       ctl_done_timer_wakeup, io);
13266                         if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT)
13267                                 lun->delay_info.done_delay = 0;
13268                         return;
13269                 }
13270         }
13271 #endif /* CTL_IO_DELAY */
13272
13273         ctl_enqueue_done(io);
13274 }
13275
13276 static void
13277 ctl_work_thread(void *arg)
13278 {
13279         struct ctl_thread *thr = (struct ctl_thread *)arg;
13280         struct ctl_softc *softc = thr->ctl_softc;
13281         union ctl_io *io;
13282         int retval;
13283
13284         CTL_DEBUG_PRINT(("ctl_work_thread starting\n"));
13285         thread_lock(curthread);
13286         sched_prio(curthread, PUSER - 1);
13287         thread_unlock(curthread);
13288
13289         while (!softc->shutdown) {
13290                 /*
13291                  * We handle the queues in this order:
13292                  * - ISC
13293                  * - done queue (to free up resources, unblock other commands)
13294                  * - incoming queue
13295                  * - RtR queue
13296                  *
13297                  * If those queues are empty, we break out of the loop and
13298                  * go to sleep.
13299                  */
13300                 mtx_lock(&thr->queue_lock);
13301                 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue);
13302                 if (io != NULL) {
13303                         STAILQ_REMOVE_HEAD(&thr->isc_queue, links);
13304                         mtx_unlock(&thr->queue_lock);
13305                         ctl_handle_isc(io);
13306                         continue;
13307                 }
13308                 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue);
13309                 if (io != NULL) {
13310                         STAILQ_REMOVE_HEAD(&thr->done_queue, links);
13311                         /* clear any blocked commands, call fe_done */
13312                         mtx_unlock(&thr->queue_lock);
13313                         ctl_process_done(io);
13314                         continue;
13315                 }
13316                 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue);
13317                 if (io != NULL) {
13318                         STAILQ_REMOVE_HEAD(&thr->incoming_queue, links);
13319                         mtx_unlock(&thr->queue_lock);
13320                         if (io->io_hdr.io_type == CTL_IO_TASK)
13321                                 ctl_run_task(io);
13322                         else
13323                                 ctl_scsiio_precheck(softc, &io->scsiio);
13324                         continue;
13325                 }
13326                 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue);
13327                 if (io != NULL) {
13328                         STAILQ_REMOVE_HEAD(&thr->rtr_queue, links);
13329                         mtx_unlock(&thr->queue_lock);
13330                         retval = ctl_scsiio(&io->scsiio);
13331                         if (retval != CTL_RETVAL_COMPLETE)
13332                                 CTL_DEBUG_PRINT(("ctl_scsiio failed\n"));
13333                         continue;
13334                 }
13335
13336                 /* Sleep until we have something to do. */
13337                 mtx_sleep(thr, &thr->queue_lock, PDROP, "-", 0);
13338         }
13339         thr->thread = NULL;
13340         kthread_exit();
13341 }
13342
13343 static void
13344 ctl_thresh_thread(void *arg)
13345 {
13346         struct ctl_softc *softc = (struct ctl_softc *)arg;
13347         struct ctl_lun *lun;
13348         struct ctl_logical_block_provisioning_page *page;
13349         const char *attr;
13350         union ctl_ha_msg msg;
13351         uint64_t thres, val;
13352         int i, e, set;
13353
13354         CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n"));
13355         thread_lock(curthread);
13356         sched_prio(curthread, PUSER - 1);
13357         thread_unlock(curthread);
13358
13359         while (!softc->shutdown) {
13360                 mtx_lock(&softc->ctl_lock);
13361                 STAILQ_FOREACH(lun, &softc->lun_list, links) {
13362                         if ((lun->flags & CTL_LUN_DISABLED) ||
13363                             (lun->flags & CTL_LUN_NO_MEDIA) ||
13364                             lun->backend->lun_attr == NULL)
13365                                 continue;
13366                         if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
13367                             softc->ha_mode == CTL_HA_MODE_XFER)
13368                                 continue;
13369                         if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0)
13370                                 continue;
13371                         e = 0;
13372                         page = &lun->MODE_LBP;
13373                         for (i = 0; i < CTL_NUM_LBP_THRESH; i++) {
13374                                 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0)
13375                                         continue;
13376                                 thres = scsi_4btoul(page->descr[i].count);
13377                                 thres <<= CTL_LBP_EXPONENT;
13378                                 switch (page->descr[i].resource) {
13379                                 case 0x01:
13380                                         attr = "blocksavail";
13381                                         break;
13382                                 case 0x02:
13383                                         attr = "blocksused";
13384                                         break;
13385                                 case 0xf1:
13386                                         attr = "poolblocksavail";
13387                                         break;
13388                                 case 0xf2:
13389                                         attr = "poolblocksused";
13390                                         break;
13391                                 default:
13392                                         continue;
13393                                 }
13394                                 mtx_unlock(&softc->ctl_lock); // XXX
13395                                 val = lun->backend->lun_attr(lun->be_lun, attr);
13396                                 mtx_lock(&softc->ctl_lock);
13397                                 if (val == UINT64_MAX)
13398                                         continue;
13399                                 if ((page->descr[i].flags & SLBPPD_ARMING_MASK)
13400                                     == SLBPPD_ARMING_INC)
13401                                         e = (val >= thres);
13402                                 else
13403                                         e = (val <= thres);
13404                                 if (e)
13405                                         break;
13406                         }
13407                         mtx_lock(&lun->lun_lock);
13408                         if (e) {
13409                                 scsi_u64to8b((uint8_t *)&page->descr[i] -
13410                                     (uint8_t *)page, lun->ua_tpt_info);
13411                                 if (lun->lasttpt == 0 ||
13412                                     time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) {
13413                                         lun->lasttpt = time_uptime;
13414                                         ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES);
13415                                         set = 1;
13416                                 } else
13417                                         set = 0;
13418                         } else {
13419                                 lun->lasttpt = 0;
13420                                 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES);
13421                                 set = -1;
13422                         }
13423                         mtx_unlock(&lun->lun_lock);
13424                         if (set != 0 &&
13425                             lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
13426                                 /* Send msg to other side. */
13427                                 bzero(&msg.ua, sizeof(msg.ua));
13428                                 msg.hdr.msg_type = CTL_MSG_UA;
13429                                 msg.hdr.nexus.initid = -1;
13430                                 msg.hdr.nexus.targ_port = -1;
13431                                 msg.hdr.nexus.targ_lun = lun->lun;
13432                                 msg.hdr.nexus.targ_mapped_lun = lun->lun;
13433                                 msg.ua.ua_all = 1;
13434                                 msg.ua.ua_set = (set > 0);
13435                                 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES;
13436                                 memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8);
13437                                 mtx_unlock(&softc->ctl_lock); // XXX
13438                                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
13439                                     sizeof(msg.ua), M_WAITOK);
13440                                 mtx_lock(&softc->ctl_lock);
13441                         }
13442                 }
13443                 mtx_sleep(&softc->thresh_thread, &softc->ctl_lock,
13444                     PDROP, "-", CTL_LBP_PERIOD * hz);
13445         }
13446         softc->thresh_thread = NULL;
13447         kthread_exit();
13448 }
13449
13450 static void
13451 ctl_enqueue_incoming(union ctl_io *io)
13452 {
13453         struct ctl_softc *softc = CTL_SOFTC(io);
13454         struct ctl_thread *thr;
13455         u_int idx;
13456
13457         idx = (io->io_hdr.nexus.targ_port * 127 +
13458                io->io_hdr.nexus.initid) % worker_threads;
13459         thr = &softc->threads[idx];
13460         mtx_lock(&thr->queue_lock);
13461         STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links);
13462         mtx_unlock(&thr->queue_lock);
13463         wakeup(thr);
13464 }
13465
13466 static void
13467 ctl_enqueue_rtr(union ctl_io *io)
13468 {
13469         struct ctl_softc *softc = CTL_SOFTC(io);
13470         struct ctl_thread *thr;
13471
13472         thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
13473         mtx_lock(&thr->queue_lock);
13474         STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links);
13475         mtx_unlock(&thr->queue_lock);
13476         wakeup(thr);
13477 }
13478
13479 static void
13480 ctl_enqueue_done(union ctl_io *io)
13481 {
13482         struct ctl_softc *softc = CTL_SOFTC(io);
13483         struct ctl_thread *thr;
13484
13485         thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
13486         mtx_lock(&thr->queue_lock);
13487         STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links);
13488         mtx_unlock(&thr->queue_lock);
13489         wakeup(thr);
13490 }
13491
13492 static void
13493 ctl_enqueue_isc(union ctl_io *io)
13494 {
13495         struct ctl_softc *softc = CTL_SOFTC(io);
13496         struct ctl_thread *thr;
13497
13498         thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
13499         mtx_lock(&thr->queue_lock);
13500         STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links);
13501         mtx_unlock(&thr->queue_lock);
13502         wakeup(thr);
13503 }
13504
13505 /*
13506  *  vim: ts=8
13507  */