]> CyberLeo.Net >> Repos - FreeBSD/releng/10.3.git/blob - sys/cam/ctl/ctl.c
- Copy stable/10@296371 to releng/10.3 in preparation for 10.3-RC1
[FreeBSD/releng/10.3.git] / sys / cam / ctl / ctl.c
1 /*-
2  * Copyright (c) 2003-2009 Silicon Graphics International Corp.
3  * Copyright (c) 2012 The FreeBSD Foundation
4  * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Portions of this software were developed by Edward Tomasz Napierala
8  * under sponsorship from the FreeBSD Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions, and the following disclaimer,
15  *    without modification.
16  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17  *    substantially similar to the "NO WARRANTY" disclaimer below
18  *    ("Disclaimer") and any redistribution must be conditioned upon
19  *    including a substantially similar Disclaimer requirement for further
20  *    binary redistribution.
21  *
22  * NO WARRANTY
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
32  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGES.
34  *
35  * $Id$
36  */
37 /*
38  * CAM Target Layer, a SCSI device emulation subsystem.
39  *
40  * Author: Ken Merry <ken@FreeBSD.org>
41  */
42
43 #define _CTL_C
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/ctype.h>
51 #include <sys/kernel.h>
52 #include <sys/types.h>
53 #include <sys/kthread.h>
54 #include <sys/bio.h>
55 #include <sys/fcntl.h>
56 #include <sys/lock.h>
57 #include <sys/module.h>
58 #include <sys/mutex.h>
59 #include <sys/condvar.h>
60 #include <sys/malloc.h>
61 #include <sys/conf.h>
62 #include <sys/ioccom.h>
63 #include <sys/queue.h>
64 #include <sys/sbuf.h>
65 #include <sys/smp.h>
66 #include <sys/endian.h>
67 #include <sys/sysctl.h>
68 #include <vm/uma.h>
69
70 #include <cam/cam.h>
71 #include <cam/scsi/scsi_all.h>
72 #include <cam/scsi/scsi_cd.h>
73 #include <cam/scsi/scsi_da.h>
74 #include <cam/ctl/ctl_io.h>
75 #include <cam/ctl/ctl.h>
76 #include <cam/ctl/ctl_frontend.h>
77 #include <cam/ctl/ctl_util.h>
78 #include <cam/ctl/ctl_backend.h>
79 #include <cam/ctl/ctl_ioctl.h>
80 #include <cam/ctl/ctl_ha.h>
81 #include <cam/ctl/ctl_private.h>
82 #include <cam/ctl/ctl_debug.h>
83 #include <cam/ctl/ctl_scsi_all.h>
84 #include <cam/ctl/ctl_error.h>
85
86 struct ctl_softc *control_softc = NULL;
87
88 /*
89  * Template mode pages.
90  */
91
92 /*
93  * Note that these are default values only.  The actual values will be
94  * filled in when the user does a mode sense.
95  */
96 const static struct copan_debugconf_subpage debugconf_page_default = {
97         DBGCNF_PAGE_CODE | SMPH_SPF,    /* page_code */
98         DBGCNF_SUBPAGE_CODE,            /* subpage */
99         {(sizeof(struct copan_debugconf_subpage) - 4) >> 8,
100          (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */
101         DBGCNF_VERSION,                 /* page_version */
102         {CTL_TIME_IO_DEFAULT_SECS>>8,
103          CTL_TIME_IO_DEFAULT_SECS>>0},  /* ctl_time_io_secs */
104 };
105
106 const static struct copan_debugconf_subpage debugconf_page_changeable = {
107         DBGCNF_PAGE_CODE | SMPH_SPF,    /* page_code */
108         DBGCNF_SUBPAGE_CODE,            /* subpage */
109         {(sizeof(struct copan_debugconf_subpage) - 4) >> 8,
110          (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */
111         0,                              /* page_version */
112         {0xff,0xff},                    /* ctl_time_io_secs */
113 };
114
115 const static struct scsi_da_rw_recovery_page rw_er_page_default = {
116         /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE,
117         /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2,
118         /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE,
119         /*read_retry_count*/0,
120         /*correction_span*/0,
121         /*head_offset_count*/0,
122         /*data_strobe_offset_cnt*/0,
123         /*byte8*/SMS_RWER_LBPERE,
124         /*write_retry_count*/0,
125         /*reserved2*/0,
126         /*recovery_time_limit*/{0, 0},
127 };
128
129 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = {
130         /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE,
131         /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2,
132         /*byte3*/0,
133         /*read_retry_count*/0,
134         /*correction_span*/0,
135         /*head_offset_count*/0,
136         /*data_strobe_offset_cnt*/0,
137         /*byte8*/0,
138         /*write_retry_count*/0,
139         /*reserved2*/0,
140         /*recovery_time_limit*/{0, 0},
141 };
142
143 const static struct scsi_format_page format_page_default = {
144         /*page_code*/SMS_FORMAT_DEVICE_PAGE,
145         /*page_length*/sizeof(struct scsi_format_page) - 2,
146         /*tracks_per_zone*/ {0, 0},
147         /*alt_sectors_per_zone*/ {0, 0},
148         /*alt_tracks_per_zone*/ {0, 0},
149         /*alt_tracks_per_lun*/ {0, 0},
150         /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff,
151                                 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff},
152         /*bytes_per_sector*/ {0, 0},
153         /*interleave*/ {0, 0},
154         /*track_skew*/ {0, 0},
155         /*cylinder_skew*/ {0, 0},
156         /*flags*/ SFP_HSEC,
157         /*reserved*/ {0, 0, 0}
158 };
159
160 const static struct scsi_format_page format_page_changeable = {
161         /*page_code*/SMS_FORMAT_DEVICE_PAGE,
162         /*page_length*/sizeof(struct scsi_format_page) - 2,
163         /*tracks_per_zone*/ {0, 0},
164         /*alt_sectors_per_zone*/ {0, 0},
165         /*alt_tracks_per_zone*/ {0, 0},
166         /*alt_tracks_per_lun*/ {0, 0},
167         /*sectors_per_track*/ {0, 0},
168         /*bytes_per_sector*/ {0, 0},
169         /*interleave*/ {0, 0},
170         /*track_skew*/ {0, 0},
171         /*cylinder_skew*/ {0, 0},
172         /*flags*/ 0,
173         /*reserved*/ {0, 0, 0}
174 };
175
176 const static struct scsi_rigid_disk_page rigid_disk_page_default = {
177         /*page_code*/SMS_RIGID_DISK_PAGE,
178         /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2,
179         /*cylinders*/ {0, 0, 0},
180         /*heads*/ CTL_DEFAULT_HEADS,
181         /*start_write_precomp*/ {0, 0, 0},
182         /*start_reduced_current*/ {0, 0, 0},
183         /*step_rate*/ {0, 0},
184         /*landing_zone_cylinder*/ {0, 0, 0},
185         /*rpl*/ SRDP_RPL_DISABLED,
186         /*rotational_offset*/ 0,
187         /*reserved1*/ 0,
188         /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff,
189                            CTL_DEFAULT_ROTATION_RATE & 0xff},
190         /*reserved2*/ {0, 0}
191 };
192
193 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = {
194         /*page_code*/SMS_RIGID_DISK_PAGE,
195         /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2,
196         /*cylinders*/ {0, 0, 0},
197         /*heads*/ 0,
198         /*start_write_precomp*/ {0, 0, 0},
199         /*start_reduced_current*/ {0, 0, 0},
200         /*step_rate*/ {0, 0},
201         /*landing_zone_cylinder*/ {0, 0, 0},
202         /*rpl*/ 0,
203         /*rotational_offset*/ 0,
204         /*reserved1*/ 0,
205         /*rotation_rate*/ {0, 0},
206         /*reserved2*/ {0, 0}
207 };
208
209 const static struct scsi_caching_page caching_page_default = {
210         /*page_code*/SMS_CACHING_PAGE,
211         /*page_length*/sizeof(struct scsi_caching_page) - 2,
212         /*flags1*/ SCP_DISC | SCP_WCE,
213         /*ret_priority*/ 0,
214         /*disable_pf_transfer_len*/ {0xff, 0xff},
215         /*min_prefetch*/ {0, 0},
216         /*max_prefetch*/ {0xff, 0xff},
217         /*max_pf_ceiling*/ {0xff, 0xff},
218         /*flags2*/ 0,
219         /*cache_segments*/ 0,
220         /*cache_seg_size*/ {0, 0},
221         /*reserved*/ 0,
222         /*non_cache_seg_size*/ {0, 0, 0}
223 };
224
225 const static struct scsi_caching_page caching_page_changeable = {
226         /*page_code*/SMS_CACHING_PAGE,
227         /*page_length*/sizeof(struct scsi_caching_page) - 2,
228         /*flags1*/ SCP_WCE | SCP_RCD,
229         /*ret_priority*/ 0,
230         /*disable_pf_transfer_len*/ {0, 0},
231         /*min_prefetch*/ {0, 0},
232         /*max_prefetch*/ {0, 0},
233         /*max_pf_ceiling*/ {0, 0},
234         /*flags2*/ 0,
235         /*cache_segments*/ 0,
236         /*cache_seg_size*/ {0, 0},
237         /*reserved*/ 0,
238         /*non_cache_seg_size*/ {0, 0, 0}
239 };
240
241 const static struct scsi_control_page control_page_default = {
242         /*page_code*/SMS_CONTROL_MODE_PAGE,
243         /*page_length*/sizeof(struct scsi_control_page) - 2,
244         /*rlec*/0,
245         /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED,
246         /*eca_and_aen*/0,
247         /*flags4*/SCP_TAS,
248         /*aen_holdoff_period*/{0, 0},
249         /*busy_timeout_period*/{0, 0},
250         /*extended_selftest_completion_time*/{0, 0}
251 };
252
253 const static struct scsi_control_page control_page_changeable = {
254         /*page_code*/SMS_CONTROL_MODE_PAGE,
255         /*page_length*/sizeof(struct scsi_control_page) - 2,
256         /*rlec*/SCP_DSENSE,
257         /*queue_flags*/SCP_QUEUE_ALG_MASK,
258         /*eca_and_aen*/SCP_SWP,
259         /*flags4*/0,
260         /*aen_holdoff_period*/{0, 0},
261         /*busy_timeout_period*/{0, 0},
262         /*extended_selftest_completion_time*/{0, 0}
263 };
264
265 #define CTL_CEM_LEN     (sizeof(struct scsi_control_ext_page) - 4)
266
267 const static struct scsi_control_ext_page control_ext_page_default = {
268         /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF,
269         /*subpage_code*/0x01,
270         /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN},
271         /*flags*/0,
272         /*prio*/0,
273         /*max_sense*/0
274 };
275
276 const static struct scsi_control_ext_page control_ext_page_changeable = {
277         /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF,
278         /*subpage_code*/0x01,
279         /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN},
280         /*flags*/0,
281         /*prio*/0,
282         /*max_sense*/0
283 };
284
285 const static struct scsi_info_exceptions_page ie_page_default = {
286         /*page_code*/SMS_INFO_EXCEPTIONS_PAGE,
287         /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2,
288         /*info_flags*/SIEP_FLAGS_DEXCPT,
289         /*mrie*/0,
290         /*interval_timer*/{0, 0, 0, 0},
291         /*report_count*/{0, 0, 0, 0}
292 };
293
294 const static struct scsi_info_exceptions_page ie_page_changeable = {
295         /*page_code*/SMS_INFO_EXCEPTIONS_PAGE,
296         /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2,
297         /*info_flags*/0,
298         /*mrie*/0,
299         /*interval_timer*/{0, 0, 0, 0},
300         /*report_count*/{0, 0, 0, 0}
301 };
302
303 #define CTL_LBPM_LEN    (sizeof(struct ctl_logical_block_provisioning_page) - 4)
304
305 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{
306         /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF,
307         /*subpage_code*/0x02,
308         /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN},
309         /*flags*/0,
310         /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
311         /*descr*/{}},
312         {{/*flags*/0,
313           /*resource*/0x01,
314           /*reserved*/{0, 0},
315           /*count*/{0, 0, 0, 0}},
316          {/*flags*/0,
317           /*resource*/0x02,
318           /*reserved*/{0, 0},
319           /*count*/{0, 0, 0, 0}},
320          {/*flags*/0,
321           /*resource*/0xf1,
322           /*reserved*/{0, 0},
323           /*count*/{0, 0, 0, 0}},
324          {/*flags*/0,
325           /*resource*/0xf2,
326           /*reserved*/{0, 0},
327           /*count*/{0, 0, 0, 0}}
328         }
329 };
330
331 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{
332         /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF,
333         /*subpage_code*/0x02,
334         /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN},
335         /*flags*/0,
336         /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
337         /*descr*/{}},
338         {{/*flags*/0,
339           /*resource*/0,
340           /*reserved*/{0, 0},
341           /*count*/{0, 0, 0, 0}},
342          {/*flags*/0,
343           /*resource*/0,
344           /*reserved*/{0, 0},
345           /*count*/{0, 0, 0, 0}},
346          {/*flags*/0,
347           /*resource*/0,
348           /*reserved*/{0, 0},
349           /*count*/{0, 0, 0, 0}},
350          {/*flags*/0,
351           /*resource*/0,
352           /*reserved*/{0, 0},
353           /*count*/{0, 0, 0, 0}}
354         }
355 };
356
357 const static struct scsi_cddvd_capabilities_page cddvd_page_default = {
358         /*page_code*/SMS_CDDVD_CAPS_PAGE,
359         /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2,
360         /*caps1*/0x3f,
361         /*caps2*/0x00,
362         /*caps3*/0xf0,
363         /*caps4*/0x00,
364         /*caps5*/0x29,
365         /*caps6*/0x00,
366         /*obsolete*/{0, 0},
367         /*nvol_levels*/{0, 0},
368         /*buffer_size*/{8, 0},
369         /*obsolete2*/{0, 0},
370         /*reserved*/0,
371         /*digital*/0,
372         /*obsolete3*/0,
373         /*copy_management*/0,
374         /*reserved2*/0,
375         /*rotation_control*/0,
376         /*cur_write_speed*/0,
377         /*num_speed_descr*/0,
378 };
379
380 const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = {
381         /*page_code*/SMS_CDDVD_CAPS_PAGE,
382         /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2,
383         /*caps1*/0,
384         /*caps2*/0,
385         /*caps3*/0,
386         /*caps4*/0,
387         /*caps5*/0,
388         /*caps6*/0,
389         /*obsolete*/{0, 0},
390         /*nvol_levels*/{0, 0},
391         /*buffer_size*/{0, 0},
392         /*obsolete2*/{0, 0},
393         /*reserved*/0,
394         /*digital*/0,
395         /*obsolete3*/0,
396         /*copy_management*/0,
397         /*reserved2*/0,
398         /*rotation_control*/0,
399         /*cur_write_speed*/0,
400         /*num_speed_descr*/0,
401 };
402
403 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer");
404 static int worker_threads = -1;
405 TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads);
406 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN,
407     &worker_threads, 1, "Number of worker threads");
408 static int ctl_debug = CTL_DEBUG_NONE;
409 TUNABLE_INT("kern.cam.ctl.debug", &ctl_debug);
410 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN,
411     &ctl_debug, 0, "Enabled debug flags");
412
413 /*
414  * Supported pages (0x00), Serial number (0x80), Device ID (0x83),
415  * Extended INQUIRY Data (0x86), Mode Page Policy (0x87),
416  * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0),
417  * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2)
418  */
419 #define SCSI_EVPD_NUM_SUPPORTED_PAGES   10
420
421 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event,
422                                   int param);
423 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest);
424 static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest);
425 static int ctl_init(void);
426 void ctl_shutdown(void);
427 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td);
428 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td);
429 static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio);
430 static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
431                               struct ctl_ooa *ooa_hdr,
432                               struct ctl_ooa_entry *kern_entries);
433 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
434                      struct thread *td);
435 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
436                          struct ctl_be_lun *be_lun);
437 static int ctl_free_lun(struct ctl_lun *lun);
438 static void ctl_create_lun(struct ctl_be_lun *be_lun);
439 static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr);
440
441 static int ctl_do_mode_select(union ctl_io *io);
442 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun,
443                            uint64_t res_key, uint64_t sa_res_key,
444                            uint8_t type, uint32_t residx,
445                            struct ctl_scsiio *ctsio,
446                            struct scsi_per_res_out *cdb,
447                            struct scsi_per_res_out_parms* param);
448 static void ctl_pro_preempt_other(struct ctl_lun *lun,
449                                   union ctl_ha_msg *msg);
450 static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg);
451 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len);
452 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len);
453 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len);
454 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len);
455 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len);
456 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio,
457                                          int alloc_len);
458 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio,
459                                          int alloc_len);
460 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len);
461 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len);
462 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio);
463 static int ctl_inquiry_std(struct ctl_scsiio *ctsio);
464 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len);
465 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2,
466     bool seq);
467 static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2);
468 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun,
469     union ctl_io *pending_io, union ctl_io *ooa_io);
470 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
471                                 union ctl_io *starting_io);
472 static int ctl_check_blocked(struct ctl_lun *lun);
473 static int ctl_scsiio_lun_check(struct ctl_lun *lun,
474                                 const struct ctl_cmd_entry *entry,
475                                 struct ctl_scsiio *ctsio);
476 static void ctl_failover_lun(union ctl_io *io);
477 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc,
478                                struct ctl_scsiio *ctsio);
479 static int ctl_scsiio(struct ctl_scsiio *ctsio);
480
481 static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io);
482 static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
483                             ctl_ua_type ua_type);
484 static int ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io,
485                          ctl_ua_type ua_type);
486 static int ctl_lun_reset(struct ctl_softc *ctl_softc, union ctl_io *io);
487 static int ctl_abort_task(union ctl_io *io);
488 static int ctl_abort_task_set(union ctl_io *io);
489 static int ctl_query_task(union ctl_io *io, int task_set);
490 static int ctl_i_t_nexus_reset(union ctl_io *io);
491 static int ctl_query_async_event(union ctl_io *io);
492 static void ctl_run_task(union ctl_io *io);
493 #ifdef CTL_IO_DELAY
494 static void ctl_datamove_timer_wakeup(void *arg);
495 static void ctl_done_timer_wakeup(void *arg);
496 #endif /* CTL_IO_DELAY */
497
498 static void ctl_send_datamove_done(union ctl_io *io, int have_lock);
499 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq);
500 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io);
501 static void ctl_datamove_remote_write(union ctl_io *io);
502 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io);
503 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq);
504 static int ctl_datamove_remote_sgl_setup(union ctl_io *io);
505 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
506                                     ctl_ha_dt_cb callback);
507 static void ctl_datamove_remote_read(union ctl_io *io);
508 static void ctl_datamove_remote(union ctl_io *io);
509 static void ctl_process_done(union ctl_io *io);
510 static void ctl_lun_thread(void *arg);
511 static void ctl_thresh_thread(void *arg);
512 static void ctl_work_thread(void *arg);
513 static void ctl_enqueue_incoming(union ctl_io *io);
514 static void ctl_enqueue_rtr(union ctl_io *io);
515 static void ctl_enqueue_done(union ctl_io *io);
516 static void ctl_enqueue_isc(union ctl_io *io);
517 static const struct ctl_cmd_entry *
518     ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa);
519 static const struct ctl_cmd_entry *
520     ctl_validate_command(struct ctl_scsiio *ctsio);
521 static int ctl_cmd_applicable(uint8_t lun_type,
522     const struct ctl_cmd_entry *entry);
523
524 static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx);
525 static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx);
526 static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx);
527 static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key);
528
529 /*
530  * Load the serialization table.  This isn't very pretty, but is probably
531  * the easiest way to do it.
532  */
533 #include "ctl_ser_table.c"
534
535 /*
536  * We only need to define open, close and ioctl routines for this driver.
537  */
538 static struct cdevsw ctl_cdevsw = {
539         .d_version =    D_VERSION,
540         .d_flags =      0,
541         .d_open =       ctl_open,
542         .d_close =      ctl_close,
543         .d_ioctl =      ctl_ioctl,
544         .d_name =       "ctl",
545 };
546
547
548 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL");
549
550 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *);
551
552 static moduledata_t ctl_moduledata = {
553         "ctl",
554         ctl_module_event_handler,
555         NULL
556 };
557
558 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD);
559 MODULE_VERSION(ctl, 1);
560
561 static struct ctl_frontend ha_frontend =
562 {
563         .name = "ha",
564 };
565
566 static void
567 ctl_ha_datamove(union ctl_io *io)
568 {
569         struct ctl_lun *lun;
570         struct ctl_sg_entry *sgl;
571         union ctl_ha_msg msg;
572         uint32_t sg_entries_sent;
573         int do_sg_copy, i, j;
574
575         lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
576         memset(&msg.dt, 0, sizeof(msg.dt));
577         msg.hdr.msg_type = CTL_MSG_DATAMOVE;
578         msg.hdr.original_sc = io->io_hdr.original_sc;
579         msg.hdr.serializing_sc = io;
580         msg.hdr.nexus = io->io_hdr.nexus;
581         msg.hdr.status = io->io_hdr.status;
582         msg.dt.flags = io->io_hdr.flags;
583
584         /*
585          * We convert everything into a S/G list here.  We can't
586          * pass by reference, only by value between controllers.
587          * So we can't pass a pointer to the S/G list, only as many
588          * S/G entries as we can fit in here.  If it's possible for
589          * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries,
590          * then we need to break this up into multiple transfers.
591          */
592         if (io->scsiio.kern_sg_entries == 0) {
593                 msg.dt.kern_sg_entries = 1;
594 #if 0
595                 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
596                         msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr;
597                 } else {
598                         /* XXX KDM use busdma here! */
599                         msg.dt.sg_list[0].addr =
600                             (void *)vtophys(io->scsiio.kern_data_ptr);
601                 }
602 #else
603                 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0,
604                     ("HA does not support BUS_ADDR"));
605                 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr;
606 #endif
607                 msg.dt.sg_list[0].len = io->scsiio.kern_data_len;
608                 do_sg_copy = 0;
609         } else {
610                 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries;
611                 do_sg_copy = 1;
612         }
613
614         msg.dt.kern_data_len = io->scsiio.kern_data_len;
615         msg.dt.kern_total_len = io->scsiio.kern_total_len;
616         msg.dt.kern_data_resid = io->scsiio.kern_data_resid;
617         msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset;
618         msg.dt.sg_sequence = 0;
619
620         /*
621          * Loop until we've sent all of the S/G entries.  On the
622          * other end, we'll recompose these S/G entries into one
623          * contiguous list before processing.
624          */
625         for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries;
626             msg.dt.sg_sequence++) {
627                 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) /
628                     sizeof(msg.dt.sg_list[0])),
629                     msg.dt.kern_sg_entries - sg_entries_sent);
630                 if (do_sg_copy != 0) {
631                         sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
632                         for (i = sg_entries_sent, j = 0;
633                              i < msg.dt.cur_sg_entries; i++, j++) {
634 #if 0
635                                 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
636                                         msg.dt.sg_list[j].addr = sgl[i].addr;
637                                 } else {
638                                         /* XXX KDM use busdma here! */
639                                         msg.dt.sg_list[j].addr =
640                                             (void *)vtophys(sgl[i].addr);
641                                 }
642 #else
643                                 KASSERT((io->io_hdr.flags &
644                                     CTL_FLAG_BUS_ADDR) == 0,
645                                     ("HA does not support BUS_ADDR"));
646                                 msg.dt.sg_list[j].addr = sgl[i].addr;
647 #endif
648                                 msg.dt.sg_list[j].len = sgl[i].len;
649                         }
650                 }
651
652                 sg_entries_sent += msg.dt.cur_sg_entries;
653                 msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries);
654                 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
655                     sizeof(msg.dt) - sizeof(msg.dt.sg_list) +
656                     sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries,
657                     M_WAITOK) > CTL_HA_STATUS_SUCCESS) {
658                         io->io_hdr.port_status = 31341;
659                         io->scsiio.be_move_done(io);
660                         return;
661                 }
662                 msg.dt.sent_sg_entries = sg_entries_sent;
663         }
664
665         /*
666          * Officially handover the request from us to peer.
667          * If failover has just happened, then we must return error.
668          * If failover happen just after, then it is not our problem.
669          */
670         if (lun)
671                 mtx_lock(&lun->lun_lock);
672         if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
673                 if (lun)
674                         mtx_unlock(&lun->lun_lock);
675                 io->io_hdr.port_status = 31342;
676                 io->scsiio.be_move_done(io);
677                 return;
678         }
679         io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
680         io->io_hdr.flags |= CTL_FLAG_DMA_INPROG;
681         if (lun)
682                 mtx_unlock(&lun->lun_lock);
683 }
684
685 static void
686 ctl_ha_done(union ctl_io *io)
687 {
688         union ctl_ha_msg msg;
689
690         if (io->io_hdr.io_type == CTL_IO_SCSI) {
691                 memset(&msg, 0, sizeof(msg));
692                 msg.hdr.msg_type = CTL_MSG_FINISH_IO;
693                 msg.hdr.original_sc = io->io_hdr.original_sc;
694                 msg.hdr.nexus = io->io_hdr.nexus;
695                 msg.hdr.status = io->io_hdr.status;
696                 msg.scsi.scsi_status = io->scsiio.scsi_status;
697                 msg.scsi.tag_num = io->scsiio.tag_num;
698                 msg.scsi.tag_type = io->scsiio.tag_type;
699                 msg.scsi.sense_len = io->scsiio.sense_len;
700                 msg.scsi.sense_residual = io->scsiio.sense_residual;
701                 msg.scsi.residual = io->scsiio.residual;
702                 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
703                     io->scsiio.sense_len);
704                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
705                     sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) +
706                     msg.scsi.sense_len, M_WAITOK);
707         }
708         ctl_free_io(io);
709 }
710
711 static void
712 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc,
713                             union ctl_ha_msg *msg_info)
714 {
715         struct ctl_scsiio *ctsio;
716
717         if (msg_info->hdr.original_sc == NULL) {
718                 printf("%s: original_sc == NULL!\n", __func__);
719                 /* XXX KDM now what? */
720                 return;
721         }
722
723         ctsio = &msg_info->hdr.original_sc->scsiio;
724         ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
725         ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
726         ctsio->io_hdr.status = msg_info->hdr.status;
727         ctsio->scsi_status = msg_info->scsi.scsi_status;
728         ctsio->sense_len = msg_info->scsi.sense_len;
729         ctsio->sense_residual = msg_info->scsi.sense_residual;
730         ctsio->residual = msg_info->scsi.residual;
731         memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data,
732                msg_info->scsi.sense_len);
733         ctl_enqueue_isc((union ctl_io *)ctsio);
734 }
735
736 static void
737 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc,
738                                 union ctl_ha_msg *msg_info)
739 {
740         struct ctl_scsiio *ctsio;
741
742         if (msg_info->hdr.serializing_sc == NULL) {
743                 printf("%s: serializing_sc == NULL!\n", __func__);
744                 /* XXX KDM now what? */
745                 return;
746         }
747
748         ctsio = &msg_info->hdr.serializing_sc->scsiio;
749         ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
750         ctl_enqueue_isc((union ctl_io *)ctsio);
751 }
752
753 void
754 ctl_isc_announce_lun(struct ctl_lun *lun)
755 {
756         struct ctl_softc *softc = lun->ctl_softc;
757         union ctl_ha_msg *msg;
758         struct ctl_ha_msg_lun_pr_key pr_key;
759         int i, k;
760
761         if (softc->ha_link != CTL_HA_LINK_ONLINE)
762                 return;
763         mtx_lock(&lun->lun_lock);
764         i = sizeof(msg->lun);
765         if (lun->lun_devid)
766                 i += lun->lun_devid->len;
767         i += sizeof(pr_key) * lun->pr_key_count;
768 alloc:
769         mtx_unlock(&lun->lun_lock);
770         msg = malloc(i, M_CTL, M_WAITOK);
771         mtx_lock(&lun->lun_lock);
772         k = sizeof(msg->lun);
773         if (lun->lun_devid)
774                 k += lun->lun_devid->len;
775         k += sizeof(pr_key) * lun->pr_key_count;
776         if (i < k) {
777                 free(msg, M_CTL);
778                 i = k;
779                 goto alloc;
780         }
781         bzero(&msg->lun, sizeof(msg->lun));
782         msg->hdr.msg_type = CTL_MSG_LUN_SYNC;
783         msg->hdr.nexus.targ_lun = lun->lun;
784         msg->hdr.nexus.targ_mapped_lun = lun->lun;
785         msg->lun.flags = lun->flags;
786         msg->lun.pr_generation = lun->pr_generation;
787         msg->lun.pr_res_idx = lun->pr_res_idx;
788         msg->lun.pr_res_type = lun->pr_res_type;
789         msg->lun.pr_key_count = lun->pr_key_count;
790         i = 0;
791         if (lun->lun_devid) {
792                 msg->lun.lun_devid_len = lun->lun_devid->len;
793                 memcpy(&msg->lun.data[i], lun->lun_devid->data,
794                     msg->lun.lun_devid_len);
795                 i += msg->lun.lun_devid_len;
796         }
797         for (k = 0; k < CTL_MAX_INITIATORS; k++) {
798                 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0)
799                         continue;
800                 pr_key.pr_iid = k;
801                 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key));
802                 i += sizeof(pr_key);
803         }
804         mtx_unlock(&lun->lun_lock);
805         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i,
806             M_WAITOK);
807         free(msg, M_CTL);
808
809         if (lun->flags & CTL_LUN_PRIMARY_SC) {
810                 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
811                         ctl_isc_announce_mode(lun, -1,
812                             lun->mode_pages.index[i].page_code & SMPH_PC_MASK,
813                             lun->mode_pages.index[i].subpage);
814                 }
815         }
816 }
817
818 void
819 ctl_isc_announce_port(struct ctl_port *port)
820 {
821         struct ctl_softc *softc = port->ctl_softc;
822         union ctl_ha_msg *msg;
823         int i;
824
825         if (port->targ_port < softc->port_min ||
826             port->targ_port >= softc->port_max ||
827             softc->ha_link != CTL_HA_LINK_ONLINE)
828                 return;
829         i = sizeof(msg->port) + strlen(port->port_name) + 1;
830         if (port->lun_map)
831                 i += sizeof(uint32_t) * CTL_MAX_LUNS;
832         if (port->port_devid)
833                 i += port->port_devid->len;
834         if (port->target_devid)
835                 i += port->target_devid->len;
836         if (port->init_devid)
837                 i += port->init_devid->len;
838         msg = malloc(i, M_CTL, M_WAITOK);
839         bzero(&msg->port, sizeof(msg->port));
840         msg->hdr.msg_type = CTL_MSG_PORT_SYNC;
841         msg->hdr.nexus.targ_port = port->targ_port;
842         msg->port.port_type = port->port_type;
843         msg->port.physical_port = port->physical_port;
844         msg->port.virtual_port = port->virtual_port;
845         msg->port.status = port->status;
846         i = 0;
847         msg->port.name_len = sprintf(&msg->port.data[i],
848             "%d:%s", softc->ha_id, port->port_name) + 1;
849         i += msg->port.name_len;
850         if (port->lun_map) {
851                 msg->port.lun_map_len = sizeof(uint32_t) * CTL_MAX_LUNS;
852                 memcpy(&msg->port.data[i], port->lun_map,
853                     msg->port.lun_map_len);
854                 i += msg->port.lun_map_len;
855         }
856         if (port->port_devid) {
857                 msg->port.port_devid_len = port->port_devid->len;
858                 memcpy(&msg->port.data[i], port->port_devid->data,
859                     msg->port.port_devid_len);
860                 i += msg->port.port_devid_len;
861         }
862         if (port->target_devid) {
863                 msg->port.target_devid_len = port->target_devid->len;
864                 memcpy(&msg->port.data[i], port->target_devid->data,
865                     msg->port.target_devid_len);
866                 i += msg->port.target_devid_len;
867         }
868         if (port->init_devid) {
869                 msg->port.init_devid_len = port->init_devid->len;
870                 memcpy(&msg->port.data[i], port->init_devid->data,
871                     msg->port.init_devid_len);
872                 i += msg->port.init_devid_len;
873         }
874         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i,
875             M_WAITOK);
876         free(msg, M_CTL);
877 }
878
879 void
880 ctl_isc_announce_iid(struct ctl_port *port, int iid)
881 {
882         struct ctl_softc *softc = port->ctl_softc;
883         union ctl_ha_msg *msg;
884         int i, l;
885
886         if (port->targ_port < softc->port_min ||
887             port->targ_port >= softc->port_max ||
888             softc->ha_link != CTL_HA_LINK_ONLINE)
889                 return;
890         mtx_lock(&softc->ctl_lock);
891         i = sizeof(msg->iid);
892         l = 0;
893         if (port->wwpn_iid[iid].name)
894                 l = strlen(port->wwpn_iid[iid].name) + 1;
895         i += l;
896         msg = malloc(i, M_CTL, M_NOWAIT);
897         if (msg == NULL) {
898                 mtx_unlock(&softc->ctl_lock);
899                 return;
900         }
901         bzero(&msg->iid, sizeof(msg->iid));
902         msg->hdr.msg_type = CTL_MSG_IID_SYNC;
903         msg->hdr.nexus.targ_port = port->targ_port;
904         msg->hdr.nexus.initid = iid;
905         msg->iid.in_use = port->wwpn_iid[iid].in_use;
906         msg->iid.name_len = l;
907         msg->iid.wwpn = port->wwpn_iid[iid].wwpn;
908         if (port->wwpn_iid[iid].name)
909                 strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l);
910         mtx_unlock(&softc->ctl_lock);
911         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT);
912         free(msg, M_CTL);
913 }
914
915 void
916 ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx,
917     uint8_t page, uint8_t subpage)
918 {
919         struct ctl_softc *softc = lun->ctl_softc;
920         union ctl_ha_msg msg;
921         int i;
922
923         if (softc->ha_link != CTL_HA_LINK_ONLINE)
924                 return;
925         for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
926                 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) ==
927                     page && lun->mode_pages.index[i].subpage == subpage)
928                         break;
929         }
930         if (i == CTL_NUM_MODE_PAGES)
931                 return;
932
933         /* Don't try to replicate pages not present on this device. */
934         if (lun->mode_pages.index[i].page_data == NULL)
935                 return;
936
937         bzero(&msg.mode, sizeof(msg.mode));
938         msg.hdr.msg_type = CTL_MSG_MODE_SYNC;
939         msg.hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT;
940         msg.hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT;
941         msg.hdr.nexus.targ_lun = lun->lun;
942         msg.hdr.nexus.targ_mapped_lun = lun->lun;
943         msg.mode.page_code = page;
944         msg.mode.subpage = subpage;
945         msg.mode.page_len = lun->mode_pages.index[i].page_len;
946         memcpy(msg.mode.data, lun->mode_pages.index[i].page_data,
947             msg.mode.page_len);
948         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.mode, sizeof(msg.mode),
949             M_WAITOK);
950 }
951
952 static void
953 ctl_isc_ha_link_up(struct ctl_softc *softc)
954 {
955         struct ctl_port *port;
956         struct ctl_lun *lun;
957         union ctl_ha_msg msg;
958         int i;
959
960         /* Announce this node parameters to peer for validation. */
961         msg.login.msg_type = CTL_MSG_LOGIN;
962         msg.login.version = CTL_HA_VERSION;
963         msg.login.ha_mode = softc->ha_mode;
964         msg.login.ha_id = softc->ha_id;
965         msg.login.max_luns = CTL_MAX_LUNS;
966         msg.login.max_ports = CTL_MAX_PORTS;
967         msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT;
968         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login),
969             M_WAITOK);
970
971         STAILQ_FOREACH(port, &softc->port_list, links) {
972                 ctl_isc_announce_port(port);
973                 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
974                         if (port->wwpn_iid[i].in_use)
975                                 ctl_isc_announce_iid(port, i);
976                 }
977         }
978         STAILQ_FOREACH(lun, &softc->lun_list, links)
979                 ctl_isc_announce_lun(lun);
980 }
981
982 static void
983 ctl_isc_ha_link_down(struct ctl_softc *softc)
984 {
985         struct ctl_port *port;
986         struct ctl_lun *lun;
987         union ctl_io *io;
988         int i;
989
990         mtx_lock(&softc->ctl_lock);
991         STAILQ_FOREACH(lun, &softc->lun_list, links) {
992                 mtx_lock(&lun->lun_lock);
993                 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) {
994                         lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY;
995                         ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
996                 }
997                 mtx_unlock(&lun->lun_lock);
998
999                 mtx_unlock(&softc->ctl_lock);
1000                 io = ctl_alloc_io(softc->othersc_pool);
1001                 mtx_lock(&softc->ctl_lock);
1002                 ctl_zero_io(io);
1003                 io->io_hdr.msg_type = CTL_MSG_FAILOVER;
1004                 io->io_hdr.nexus.targ_mapped_lun = lun->lun;
1005                 ctl_enqueue_isc(io);
1006         }
1007
1008         STAILQ_FOREACH(port, &softc->port_list, links) {
1009                 if (port->targ_port >= softc->port_min &&
1010                     port->targ_port < softc->port_max)
1011                         continue;
1012                 port->status &= ~CTL_PORT_STATUS_ONLINE;
1013                 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
1014                         port->wwpn_iid[i].in_use = 0;
1015                         free(port->wwpn_iid[i].name, M_CTL);
1016                         port->wwpn_iid[i].name = NULL;
1017                 }
1018         }
1019         mtx_unlock(&softc->ctl_lock);
1020 }
1021
1022 static void
1023 ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
1024 {
1025         struct ctl_lun *lun;
1026         uint32_t iid = ctl_get_initindex(&msg->hdr.nexus);
1027
1028         mtx_lock(&softc->ctl_lock);
1029         if (msg->hdr.nexus.targ_lun < CTL_MAX_LUNS &&
1030             (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) != NULL) {
1031                 mtx_lock(&lun->lun_lock);
1032                 mtx_unlock(&softc->ctl_lock);
1033                 if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES &&
1034                     msg->ua.ua_set)
1035                         memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8);
1036                 if (msg->ua.ua_all) {
1037                         if (msg->ua.ua_set)
1038                                 ctl_est_ua_all(lun, iid, msg->ua.ua_type);
1039                         else
1040                                 ctl_clr_ua_all(lun, iid, msg->ua.ua_type);
1041                 } else {
1042                         if (msg->ua.ua_set)
1043                                 ctl_est_ua(lun, iid, msg->ua.ua_type);
1044                         else
1045                                 ctl_clr_ua(lun, iid, msg->ua.ua_type);
1046                 }
1047                 mtx_unlock(&lun->lun_lock);
1048         } else
1049                 mtx_unlock(&softc->ctl_lock);
1050 }
1051
1052 static void
1053 ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
1054 {
1055         struct ctl_lun *lun;
1056         struct ctl_ha_msg_lun_pr_key pr_key;
1057         int i, k;
1058         ctl_lun_flags oflags;
1059         uint32_t targ_lun;
1060
1061         targ_lun = msg->hdr.nexus.targ_mapped_lun;
1062         mtx_lock(&softc->ctl_lock);
1063         if ((targ_lun >= CTL_MAX_LUNS) ||
1064             ((lun = softc->ctl_luns[targ_lun]) == NULL)) {
1065                 mtx_unlock(&softc->ctl_lock);
1066                 return;
1067         }
1068         mtx_lock(&lun->lun_lock);
1069         mtx_unlock(&softc->ctl_lock);
1070         if (lun->flags & CTL_LUN_DISABLED) {
1071                 mtx_unlock(&lun->lun_lock);
1072                 return;
1073         }
1074         i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0;
1075         if (msg->lun.lun_devid_len != i || (i > 0 &&
1076             memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) {
1077                 mtx_unlock(&lun->lun_lock);
1078                 printf("%s: Received conflicting HA LUN %d\n",
1079                     __func__, msg->hdr.nexus.targ_lun);
1080                 return;
1081         } else {
1082                 /* Record whether peer is primary. */
1083                 oflags = lun->flags;
1084                 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) &&
1085                     (msg->lun.flags & CTL_LUN_DISABLED) == 0)
1086                         lun->flags |= CTL_LUN_PEER_SC_PRIMARY;
1087                 else
1088                         lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY;
1089                 if (oflags != lun->flags)
1090                         ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
1091
1092                 /* If peer is primary and we are not -- use data */
1093                 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
1094                     (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) {
1095                         lun->pr_generation = msg->lun.pr_generation;
1096                         lun->pr_res_idx = msg->lun.pr_res_idx;
1097                         lun->pr_res_type = msg->lun.pr_res_type;
1098                         lun->pr_key_count = msg->lun.pr_key_count;
1099                         for (k = 0; k < CTL_MAX_INITIATORS; k++)
1100                                 ctl_clr_prkey(lun, k);
1101                         for (k = 0; k < msg->lun.pr_key_count; k++) {
1102                                 memcpy(&pr_key, &msg->lun.data[i],
1103                                     sizeof(pr_key));
1104                                 ctl_alloc_prkey(lun, pr_key.pr_iid);
1105                                 ctl_set_prkey(lun, pr_key.pr_iid,
1106                                     pr_key.pr_key);
1107                                 i += sizeof(pr_key);
1108                         }
1109                 }
1110
1111                 mtx_unlock(&lun->lun_lock);
1112                 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n",
1113                     __func__, msg->hdr.nexus.targ_lun,
1114                     (msg->lun.flags & CTL_LUN_PRIMARY_SC) ?
1115                     "primary" : "secondary"));
1116
1117                 /* If we are primary but peer doesn't know -- notify */
1118                 if ((lun->flags & CTL_LUN_PRIMARY_SC) &&
1119                     (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0)
1120                         ctl_isc_announce_lun(lun);
1121         }
1122 }
1123
1124 static void
1125 ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
1126 {
1127         struct ctl_port *port;
1128         struct ctl_lun *lun;
1129         int i, new;
1130
1131         port = softc->ctl_ports[msg->hdr.nexus.targ_port];
1132         if (port == NULL) {
1133                 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__,
1134                     msg->hdr.nexus.targ_port));
1135                 new = 1;
1136                 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO);
1137                 port->frontend = &ha_frontend;
1138                 port->targ_port = msg->hdr.nexus.targ_port;
1139                 port->fe_datamove = ctl_ha_datamove;
1140                 port->fe_done = ctl_ha_done;
1141         } else if (port->frontend == &ha_frontend) {
1142                 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__,
1143                     msg->hdr.nexus.targ_port));
1144                 new = 0;
1145         } else {
1146                 printf("%s: Received conflicting HA port %d\n",
1147                     __func__, msg->hdr.nexus.targ_port);
1148                 return;
1149         }
1150         port->port_type = msg->port.port_type;
1151         port->physical_port = msg->port.physical_port;
1152         port->virtual_port = msg->port.virtual_port;
1153         port->status = msg->port.status;
1154         i = 0;
1155         free(port->port_name, M_CTL);
1156         port->port_name = strndup(&msg->port.data[i], msg->port.name_len,
1157             M_CTL);
1158         i += msg->port.name_len;
1159         if (msg->port.lun_map_len != 0) {
1160                 if (port->lun_map == NULL)
1161                         port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS,
1162                             M_CTL, M_WAITOK);
1163                 memcpy(port->lun_map, &msg->port.data[i],
1164                     sizeof(uint32_t) * CTL_MAX_LUNS);
1165                 i += msg->port.lun_map_len;
1166         } else {
1167                 free(port->lun_map, M_CTL);
1168                 port->lun_map = NULL;
1169         }
1170         if (msg->port.port_devid_len != 0) {
1171                 if (port->port_devid == NULL ||
1172                     port->port_devid->len != msg->port.port_devid_len) {
1173                         free(port->port_devid, M_CTL);
1174                         port->port_devid = malloc(sizeof(struct ctl_devid) +
1175                             msg->port.port_devid_len, M_CTL, M_WAITOK);
1176                 }
1177                 memcpy(port->port_devid->data, &msg->port.data[i],
1178                     msg->port.port_devid_len);
1179                 port->port_devid->len = msg->port.port_devid_len;
1180                 i += msg->port.port_devid_len;
1181         } else {
1182                 free(port->port_devid, M_CTL);
1183                 port->port_devid = NULL;
1184         }
1185         if (msg->port.target_devid_len != 0) {
1186                 if (port->target_devid == NULL ||
1187                     port->target_devid->len != msg->port.target_devid_len) {
1188                         free(port->target_devid, M_CTL);
1189                         port->target_devid = malloc(sizeof(struct ctl_devid) +
1190                             msg->port.target_devid_len, M_CTL, M_WAITOK);
1191                 }
1192                 memcpy(port->target_devid->data, &msg->port.data[i],
1193                     msg->port.target_devid_len);
1194                 port->target_devid->len = msg->port.target_devid_len;
1195                 i += msg->port.target_devid_len;
1196         } else {
1197                 free(port->target_devid, M_CTL);
1198                 port->target_devid = NULL;
1199         }
1200         if (msg->port.init_devid_len != 0) {
1201                 if (port->init_devid == NULL ||
1202                     port->init_devid->len != msg->port.init_devid_len) {
1203                         free(port->init_devid, M_CTL);
1204                         port->init_devid = malloc(sizeof(struct ctl_devid) +
1205                             msg->port.init_devid_len, M_CTL, M_WAITOK);
1206                 }
1207                 memcpy(port->init_devid->data, &msg->port.data[i],
1208                     msg->port.init_devid_len);
1209                 port->init_devid->len = msg->port.init_devid_len;
1210                 i += msg->port.init_devid_len;
1211         } else {
1212                 free(port->init_devid, M_CTL);
1213                 port->init_devid = NULL;
1214         }
1215         if (new) {
1216                 if (ctl_port_register(port) != 0) {
1217                         printf("%s: ctl_port_register() failed with error\n",
1218                             __func__);
1219                 }
1220         }
1221         mtx_lock(&softc->ctl_lock);
1222         STAILQ_FOREACH(lun, &softc->lun_list, links) {
1223                 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
1224                         continue;
1225                 mtx_lock(&lun->lun_lock);
1226                 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE);
1227                 mtx_unlock(&lun->lun_lock);
1228         }
1229         mtx_unlock(&softc->ctl_lock);
1230 }
1231
1232 static void
1233 ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
1234 {
1235         struct ctl_port *port;
1236         int iid;
1237
1238         port = softc->ctl_ports[msg->hdr.nexus.targ_port];
1239         if (port == NULL) {
1240                 printf("%s: Received IID for unknown port %d\n",
1241                     __func__, msg->hdr.nexus.targ_port);
1242                 return;
1243         }
1244         iid = msg->hdr.nexus.initid;
1245         port->wwpn_iid[iid].in_use = msg->iid.in_use;
1246         port->wwpn_iid[iid].wwpn = msg->iid.wwpn;
1247         free(port->wwpn_iid[iid].name, M_CTL);
1248         if (msg->iid.name_len) {
1249                 port->wwpn_iid[iid].name = strndup(&msg->iid.data[0],
1250                     msg->iid.name_len, M_CTL);
1251         } else
1252                 port->wwpn_iid[iid].name = NULL;
1253 }
1254
1255 static void
1256 ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
1257 {
1258
1259         if (msg->login.version != CTL_HA_VERSION) {
1260                 printf("CTL HA peers have different versions %d != %d\n",
1261                     msg->login.version, CTL_HA_VERSION);
1262                 ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
1263                 return;
1264         }
1265         if (msg->login.ha_mode != softc->ha_mode) {
1266                 printf("CTL HA peers have different ha_mode %d != %d\n",
1267                     msg->login.ha_mode, softc->ha_mode);
1268                 ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
1269                 return;
1270         }
1271         if (msg->login.ha_id == softc->ha_id) {
1272                 printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id);
1273                 ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
1274                 return;
1275         }
1276         if (msg->login.max_luns != CTL_MAX_LUNS ||
1277             msg->login.max_ports != CTL_MAX_PORTS ||
1278             msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) {
1279                 printf("CTL HA peers have different limits\n");
1280                 ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
1281                 return;
1282         }
1283 }
1284
1285 static void
1286 ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
1287 {
1288         struct ctl_lun *lun;
1289         int i;
1290         uint32_t initidx, targ_lun;
1291
1292         targ_lun = msg->hdr.nexus.targ_mapped_lun;
1293         mtx_lock(&softc->ctl_lock);
1294         if ((targ_lun >= CTL_MAX_LUNS) ||
1295             ((lun = softc->ctl_luns[targ_lun]) == NULL)) {
1296                 mtx_unlock(&softc->ctl_lock);
1297                 return;
1298         }
1299         mtx_lock(&lun->lun_lock);
1300         mtx_unlock(&softc->ctl_lock);
1301         if (lun->flags & CTL_LUN_DISABLED) {
1302                 mtx_unlock(&lun->lun_lock);
1303                 return;
1304         }
1305         for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
1306                 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) ==
1307                     msg->mode.page_code &&
1308                     lun->mode_pages.index[i].subpage == msg->mode.subpage)
1309                         break;
1310         }
1311         if (i == CTL_NUM_MODE_PAGES) {
1312                 mtx_unlock(&lun->lun_lock);
1313                 return;
1314         }
1315         memcpy(lun->mode_pages.index[i].page_data, msg->mode.data,
1316             lun->mode_pages.index[i].page_len);
1317         initidx = ctl_get_initindex(&msg->hdr.nexus);
1318         if (initidx != -1)
1319                 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE);
1320         mtx_unlock(&lun->lun_lock);
1321 }
1322
1323 /*
1324  * ISC (Inter Shelf Communication) event handler.  Events from the HA
1325  * subsystem come in here.
1326  */
1327 static void
1328 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
1329 {
1330         struct ctl_softc *softc = control_softc;
1331         union ctl_io *io;
1332         struct ctl_prio *presio;
1333         ctl_ha_status isc_status;
1334
1335         CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event));
1336         if (event == CTL_HA_EVT_MSG_RECV) {
1337                 union ctl_ha_msg *msg, msgbuf;
1338
1339                 if (param > sizeof(msgbuf))
1340                         msg = malloc(param, M_CTL, M_WAITOK);
1341                 else
1342                         msg = &msgbuf;
1343                 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param,
1344                     M_WAITOK);
1345                 if (isc_status != CTL_HA_STATUS_SUCCESS) {
1346                         printf("%s: Error receiving message: %d\n",
1347                             __func__, isc_status);
1348                         if (msg != &msgbuf)
1349                                 free(msg, M_CTL);
1350                         return;
1351                 }
1352
1353                 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type));
1354                 switch (msg->hdr.msg_type) {
1355                 case CTL_MSG_SERIALIZE:
1356                         io = ctl_alloc_io(softc->othersc_pool);
1357                         ctl_zero_io(io);
1358                         // populate ctsio from msg
1359                         io->io_hdr.io_type = CTL_IO_SCSI;
1360                         io->io_hdr.msg_type = CTL_MSG_SERIALIZE;
1361                         io->io_hdr.original_sc = msg->hdr.original_sc;
1362                         io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC |
1363                                             CTL_FLAG_IO_ACTIVE;
1364                         /*
1365                          * If we're in serialization-only mode, we don't
1366                          * want to go through full done processing.  Thus
1367                          * the COPY flag.
1368                          *
1369                          * XXX KDM add another flag that is more specific.
1370                          */
1371                         if (softc->ha_mode != CTL_HA_MODE_XFER)
1372                                 io->io_hdr.flags |= CTL_FLAG_INT_COPY;
1373                         io->io_hdr.nexus = msg->hdr.nexus;
1374 #if 0
1375                         printf("port %u, iid %u, lun %u\n",
1376                                io->io_hdr.nexus.targ_port,
1377                                io->io_hdr.nexus.initid,
1378                                io->io_hdr.nexus.targ_lun);
1379 #endif
1380                         io->scsiio.tag_num = msg->scsi.tag_num;
1381                         io->scsiio.tag_type = msg->scsi.tag_type;
1382 #ifdef CTL_TIME_IO
1383                         io->io_hdr.start_time = time_uptime;
1384                         getbinuptime(&io->io_hdr.start_bt);
1385 #endif /* CTL_TIME_IO */
1386                         io->scsiio.cdb_len = msg->scsi.cdb_len;
1387                         memcpy(io->scsiio.cdb, msg->scsi.cdb,
1388                                CTL_MAX_CDBLEN);
1389                         if (softc->ha_mode == CTL_HA_MODE_XFER) {
1390                                 const struct ctl_cmd_entry *entry;
1391
1392                                 entry = ctl_get_cmd_entry(&io->scsiio, NULL);
1393                                 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
1394                                 io->io_hdr.flags |=
1395                                         entry->flags & CTL_FLAG_DATA_MASK;
1396                         }
1397                         ctl_enqueue_isc(io);
1398                         break;
1399
1400                 /* Performed on the Originating SC, XFER mode only */
1401                 case CTL_MSG_DATAMOVE: {
1402                         struct ctl_sg_entry *sgl;
1403                         int i, j;
1404
1405                         io = msg->hdr.original_sc;
1406                         if (io == NULL) {
1407                                 printf("%s: original_sc == NULL!\n", __func__);
1408                                 /* XXX KDM do something here */
1409                                 break;
1410                         }
1411                         io->io_hdr.msg_type = CTL_MSG_DATAMOVE;
1412                         io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
1413                         /*
1414                          * Keep track of this, we need to send it back over
1415                          * when the datamove is complete.
1416                          */
1417                         io->io_hdr.serializing_sc = msg->hdr.serializing_sc;
1418                         if (msg->hdr.status == CTL_SUCCESS)
1419                                 io->io_hdr.status = msg->hdr.status;
1420
1421                         if (msg->dt.sg_sequence == 0) {
1422 #ifdef CTL_TIME_IO
1423                                 getbinuptime(&io->io_hdr.dma_start_bt);
1424 #endif
1425                                 i = msg->dt.kern_sg_entries +
1426                                     msg->dt.kern_data_len /
1427                                     CTL_HA_DATAMOVE_SEGMENT + 1;
1428                                 sgl = malloc(sizeof(*sgl) * i, M_CTL,
1429                                     M_WAITOK | M_ZERO);
1430                                 io->io_hdr.remote_sglist = sgl;
1431                                 io->io_hdr.local_sglist =
1432                                     &sgl[msg->dt.kern_sg_entries];
1433
1434                                 io->scsiio.kern_data_ptr = (uint8_t *)sgl;
1435
1436                                 io->scsiio.kern_sg_entries =
1437                                         msg->dt.kern_sg_entries;
1438                                 io->scsiio.rem_sg_entries =
1439                                         msg->dt.kern_sg_entries;
1440                                 io->scsiio.kern_data_len =
1441                                         msg->dt.kern_data_len;
1442                                 io->scsiio.kern_total_len =
1443                                         msg->dt.kern_total_len;
1444                                 io->scsiio.kern_data_resid =
1445                                         msg->dt.kern_data_resid;
1446                                 io->scsiio.kern_rel_offset =
1447                                         msg->dt.kern_rel_offset;
1448                                 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR;
1449                                 io->io_hdr.flags |= msg->dt.flags &
1450                                     CTL_FLAG_BUS_ADDR;
1451                         } else
1452                                 sgl = (struct ctl_sg_entry *)
1453                                         io->scsiio.kern_data_ptr;
1454
1455                         for (i = msg->dt.sent_sg_entries, j = 0;
1456                              i < (msg->dt.sent_sg_entries +
1457                              msg->dt.cur_sg_entries); i++, j++) {
1458                                 sgl[i].addr = msg->dt.sg_list[j].addr;
1459                                 sgl[i].len = msg->dt.sg_list[j].len;
1460
1461 #if 0
1462                                 printf("%s: DATAMOVE: %p,%lu j=%d, i=%d\n",
1463                                     __func__, sgl[i].addr, sgl[i].len, j, i);
1464 #endif
1465                         }
1466
1467                         /*
1468                          * If this is the last piece of the I/O, we've got
1469                          * the full S/G list.  Queue processing in the thread.
1470                          * Otherwise wait for the next piece.
1471                          */
1472                         if (msg->dt.sg_last != 0)
1473                                 ctl_enqueue_isc(io);
1474                         break;
1475                 }
1476                 /* Performed on the Serializing (primary) SC, XFER mode only */
1477                 case CTL_MSG_DATAMOVE_DONE: {
1478                         if (msg->hdr.serializing_sc == NULL) {
1479                                 printf("%s: serializing_sc == NULL!\n",
1480                                        __func__);
1481                                 /* XXX KDM now what? */
1482                                 break;
1483                         }
1484                         /*
1485                          * We grab the sense information here in case
1486                          * there was a failure, so we can return status
1487                          * back to the initiator.
1488                          */
1489                         io = msg->hdr.serializing_sc;
1490                         io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
1491                         io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG;
1492                         io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
1493                         io->io_hdr.port_status = msg->scsi.fetd_status;
1494                         io->scsiio.residual = msg->scsi.residual;
1495                         if (msg->hdr.status != CTL_STATUS_NONE) {
1496                                 io->io_hdr.status = msg->hdr.status;
1497                                 io->scsiio.scsi_status = msg->scsi.scsi_status;
1498                                 io->scsiio.sense_len = msg->scsi.sense_len;
1499                                 io->scsiio.sense_residual =msg->scsi.sense_residual;
1500                                 memcpy(&io->scsiio.sense_data,
1501                                     &msg->scsi.sense_data,
1502                                     msg->scsi.sense_len);
1503                                 if (msg->hdr.status == CTL_SUCCESS)
1504                                         io->io_hdr.flags |= CTL_FLAG_STATUS_SENT;
1505                         }
1506                         ctl_enqueue_isc(io);
1507                         break;
1508                 }
1509
1510                 /* Preformed on Originating SC, SER_ONLY mode */
1511                 case CTL_MSG_R2R:
1512                         io = msg->hdr.original_sc;
1513                         if (io == NULL) {
1514                                 printf("%s: original_sc == NULL!\n",
1515                                     __func__);
1516                                 break;
1517                         }
1518                         io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
1519                         io->io_hdr.msg_type = CTL_MSG_R2R;
1520                         io->io_hdr.serializing_sc = msg->hdr.serializing_sc;
1521                         ctl_enqueue_isc(io);
1522                         break;
1523
1524                 /*
1525                  * Performed on Serializing(i.e. primary SC) SC in SER_ONLY
1526                  * mode.
1527                  * Performed on the Originating (i.e. secondary) SC in XFER
1528                  * mode
1529                  */
1530                 case CTL_MSG_FINISH_IO:
1531                         if (softc->ha_mode == CTL_HA_MODE_XFER)
1532                                 ctl_isc_handler_finish_xfer(softc, msg);
1533                         else
1534                                 ctl_isc_handler_finish_ser_only(softc, msg);
1535                         break;
1536
1537                 /* Preformed on Originating SC */
1538                 case CTL_MSG_BAD_JUJU:
1539                         io = msg->hdr.original_sc;
1540                         if (io == NULL) {
1541                                 printf("%s: Bad JUJU!, original_sc is NULL!\n",
1542                                        __func__);
1543                                 break;
1544                         }
1545                         ctl_copy_sense_data(msg, io);
1546                         /*
1547                          * IO should have already been cleaned up on other
1548                          * SC so clear this flag so we won't send a message
1549                          * back to finish the IO there.
1550                          */
1551                         io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
1552                         io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
1553
1554                         /* io = msg->hdr.serializing_sc; */
1555                         io->io_hdr.msg_type = CTL_MSG_BAD_JUJU;
1556                         ctl_enqueue_isc(io);
1557                         break;
1558
1559                 /* Handle resets sent from the other side */
1560                 case CTL_MSG_MANAGE_TASKS: {
1561                         struct ctl_taskio *taskio;
1562                         taskio = (struct ctl_taskio *)ctl_alloc_io(
1563                             softc->othersc_pool);
1564                         ctl_zero_io((union ctl_io *)taskio);
1565                         taskio->io_hdr.io_type = CTL_IO_TASK;
1566                         taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC;
1567                         taskio->io_hdr.nexus = msg->hdr.nexus;
1568                         taskio->task_action = msg->task.task_action;
1569                         taskio->tag_num = msg->task.tag_num;
1570                         taskio->tag_type = msg->task.tag_type;
1571 #ifdef CTL_TIME_IO
1572                         taskio->io_hdr.start_time = time_uptime;
1573                         getbinuptime(&taskio->io_hdr.start_bt);
1574 #endif /* CTL_TIME_IO */
1575                         ctl_run_task((union ctl_io *)taskio);
1576                         break;
1577                 }
1578                 /* Persistent Reserve action which needs attention */
1579                 case CTL_MSG_PERS_ACTION:
1580                         presio = (struct ctl_prio *)ctl_alloc_io(
1581                             softc->othersc_pool);
1582                         ctl_zero_io((union ctl_io *)presio);
1583                         presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION;
1584                         presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC;
1585                         presio->io_hdr.nexus = msg->hdr.nexus;
1586                         presio->pr_msg = msg->pr;
1587                         ctl_enqueue_isc((union ctl_io *)presio);
1588                         break;
1589                 case CTL_MSG_UA:
1590                         ctl_isc_ua(softc, msg, param);
1591                         break;
1592                 case CTL_MSG_PORT_SYNC:
1593                         ctl_isc_port_sync(softc, msg, param);
1594                         break;
1595                 case CTL_MSG_LUN_SYNC:
1596                         ctl_isc_lun_sync(softc, msg, param);
1597                         break;
1598                 case CTL_MSG_IID_SYNC:
1599                         ctl_isc_iid_sync(softc, msg, param);
1600                         break;
1601                 case CTL_MSG_LOGIN:
1602                         ctl_isc_login(softc, msg, param);
1603                         break;
1604                 case CTL_MSG_MODE_SYNC:
1605                         ctl_isc_mode_sync(softc, msg, param);
1606                         break;
1607                 default:
1608                         printf("Received HA message of unknown type %d\n",
1609                             msg->hdr.msg_type);
1610                         ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
1611                         break;
1612                 }
1613                 if (msg != &msgbuf)
1614                         free(msg, M_CTL);
1615         } else if (event == CTL_HA_EVT_LINK_CHANGE) {
1616                 printf("CTL: HA link status changed from %d to %d\n",
1617                     softc->ha_link, param);
1618                 if (param == softc->ha_link)
1619                         return;
1620                 if (softc->ha_link == CTL_HA_LINK_ONLINE) {
1621                         softc->ha_link = param;
1622                         ctl_isc_ha_link_down(softc);
1623                 } else {
1624                         softc->ha_link = param;
1625                         if (softc->ha_link == CTL_HA_LINK_ONLINE)
1626                                 ctl_isc_ha_link_up(softc);
1627                 }
1628                 return;
1629         } else {
1630                 printf("ctl_isc_event_handler: Unknown event %d\n", event);
1631                 return;
1632         }
1633 }
1634
1635 static void
1636 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest)
1637 {
1638
1639         memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data,
1640             src->scsi.sense_len);
1641         dest->scsiio.scsi_status = src->scsi.scsi_status;
1642         dest->scsiio.sense_len = src->scsi.sense_len;
1643         dest->io_hdr.status = src->hdr.status;
1644 }
1645
1646 static void
1647 ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest)
1648 {
1649
1650         memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data,
1651             src->scsiio.sense_len);
1652         dest->scsi.scsi_status = src->scsiio.scsi_status;
1653         dest->scsi.sense_len = src->scsiio.sense_len;
1654         dest->hdr.status = src->io_hdr.status;
1655 }
1656
1657 void
1658 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
1659 {
1660         struct ctl_softc *softc = lun->ctl_softc;
1661         ctl_ua_type *pu;
1662
1663         if (initidx < softc->init_min || initidx >= softc->init_max)
1664                 return;
1665         mtx_assert(&lun->lun_lock, MA_OWNED);
1666         pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT];
1667         if (pu == NULL)
1668                 return;
1669         pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua;
1670 }
1671
1672 void
1673 ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua)
1674 {
1675         int i;
1676
1677         mtx_assert(&lun->lun_lock, MA_OWNED);
1678         if (lun->pending_ua[port] == NULL)
1679                 return;
1680         for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
1681                 if (port * CTL_MAX_INIT_PER_PORT + i == except)
1682                         continue;
1683                 lun->pending_ua[port][i] |= ua;
1684         }
1685 }
1686
1687 void
1688 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
1689 {
1690         struct ctl_softc *softc = lun->ctl_softc;
1691         int i;
1692
1693         mtx_assert(&lun->lun_lock, MA_OWNED);
1694         for (i = softc->port_min; i < softc->port_max; i++)
1695                 ctl_est_ua_port(lun, i, except, ua);
1696 }
1697
1698 void
1699 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
1700 {
1701         struct ctl_softc *softc = lun->ctl_softc;
1702         ctl_ua_type *pu;
1703
1704         if (initidx < softc->init_min || initidx >= softc->init_max)
1705                 return;
1706         mtx_assert(&lun->lun_lock, MA_OWNED);
1707         pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT];
1708         if (pu == NULL)
1709                 return;
1710         pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua;
1711 }
1712
1713 void
1714 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
1715 {
1716         struct ctl_softc *softc = lun->ctl_softc;
1717         int i, j;
1718
1719         mtx_assert(&lun->lun_lock, MA_OWNED);
1720         for (i = softc->port_min; i < softc->port_max; i++) {
1721                 if (lun->pending_ua[i] == NULL)
1722                         continue;
1723                 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
1724                         if (i * CTL_MAX_INIT_PER_PORT + j == except)
1725                                 continue;
1726                         lun->pending_ua[i][j] &= ~ua;
1727                 }
1728         }
1729 }
1730
1731 void
1732 ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx,
1733     ctl_ua_type ua_type)
1734 {
1735         struct ctl_lun *lun;
1736
1737         mtx_assert(&ctl_softc->ctl_lock, MA_OWNED);
1738         STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) {
1739                 mtx_lock(&lun->lun_lock);
1740                 ctl_clr_ua(lun, initidx, ua_type);
1741                 mtx_unlock(&lun->lun_lock);
1742         }
1743 }
1744
1745 static int
1746 ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS)
1747 {
1748         struct ctl_softc *softc = (struct ctl_softc *)arg1;
1749         struct ctl_lun *lun;
1750         struct ctl_lun_req ireq;
1751         int error, value;
1752
1753         value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1;
1754         error = sysctl_handle_int(oidp, &value, 0, req);
1755         if ((error != 0) || (req->newptr == NULL))
1756                 return (error);
1757
1758         mtx_lock(&softc->ctl_lock);
1759         if (value == 0)
1760                 softc->flags |= CTL_FLAG_ACTIVE_SHELF;
1761         else
1762                 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF;
1763         STAILQ_FOREACH(lun, &softc->lun_list, links) {
1764                 mtx_unlock(&softc->ctl_lock);
1765                 bzero(&ireq, sizeof(ireq));
1766                 ireq.reqtype = CTL_LUNREQ_MODIFY;
1767                 ireq.reqdata.modify.lun_id = lun->lun;
1768                 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0,
1769                     curthread);
1770                 if (ireq.status != CTL_LUN_OK) {
1771                         printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n",
1772                             __func__, ireq.status, ireq.error_str);
1773                 }
1774                 mtx_lock(&softc->ctl_lock);
1775         }
1776         mtx_unlock(&softc->ctl_lock);
1777         return (0);
1778 }
1779
1780 static int
1781 ctl_init(void)
1782 {
1783         struct make_dev_args args;
1784         struct ctl_softc *softc;
1785         void *other_pool;
1786         int i, error;
1787
1788         softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF,
1789                                M_WAITOK | M_ZERO);
1790
1791         make_dev_args_init(&args);
1792         args.mda_devsw = &ctl_cdevsw;
1793         args.mda_uid = UID_ROOT;
1794         args.mda_gid = GID_OPERATOR;
1795         args.mda_mode = 0600;
1796         args.mda_si_drv1 = softc;
1797         error = make_dev_s(&args, &softc->dev, "cam/ctl");
1798         if (error != 0) {
1799                 free(control_softc, M_DEVBUF);
1800                 return (error);
1801         }
1802
1803         sysctl_ctx_init(&softc->sysctl_ctx);
1804         softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
1805                 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl",
1806                 CTLFLAG_RD, 0, "CAM Target Layer");
1807
1808         if (softc->sysctl_tree == NULL) {
1809                 printf("%s: unable to allocate sysctl tree\n", __func__);
1810                 destroy_dev(softc->dev);
1811                 free(control_softc, M_DEVBUF);
1812                 control_softc = NULL;
1813                 return (ENOMEM);
1814         }
1815
1816         mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF);
1817         softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io),
1818             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1819         softc->flags = 0;
1820
1821         SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1822             OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0,
1823             "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)");
1824
1825         /*
1826          * In Copan's HA scheme, the "master" and "slave" roles are
1827          * figured out through the slot the controller is in.  Although it
1828          * is an active/active system, someone has to be in charge.
1829          */
1830         SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1831             OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0,
1832             "HA head ID (0 - no HA)");
1833         if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) {
1834                 softc->flags |= CTL_FLAG_ACTIVE_SHELF;
1835                 softc->is_single = 1;
1836                 softc->port_cnt = CTL_MAX_PORTS;
1837                 softc->port_min = 0;
1838         } else {
1839                 softc->port_cnt = CTL_MAX_PORTS / NUM_HA_SHELVES;
1840                 softc->port_min = (softc->ha_id - 1) * softc->port_cnt;
1841         }
1842         softc->port_max = softc->port_min + softc->port_cnt;
1843         softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT;
1844         softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT;
1845
1846         SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1847             OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0,
1848             "HA link state (0 - offline, 1 - unknown, 2 - online)");
1849
1850         STAILQ_INIT(&softc->lun_list);
1851         STAILQ_INIT(&softc->pending_lun_queue);
1852         STAILQ_INIT(&softc->fe_list);
1853         STAILQ_INIT(&softc->port_list);
1854         STAILQ_INIT(&softc->be_list);
1855         ctl_tpc_init(softc);
1856
1857         if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC,
1858                             &other_pool) != 0)
1859         {
1860                 printf("ctl: can't allocate %d entry other SC pool, "
1861                        "exiting\n", CTL_POOL_ENTRIES_OTHER_SC);
1862                 return (ENOMEM);
1863         }
1864         softc->othersc_pool = other_pool;
1865
1866         if (worker_threads <= 0)
1867                 worker_threads = max(1, mp_ncpus / 4);
1868         if (worker_threads > CTL_MAX_THREADS)
1869                 worker_threads = CTL_MAX_THREADS;
1870
1871         for (i = 0; i < worker_threads; i++) {
1872                 struct ctl_thread *thr = &softc->threads[i];
1873
1874                 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF);
1875                 thr->ctl_softc = softc;
1876                 STAILQ_INIT(&thr->incoming_queue);
1877                 STAILQ_INIT(&thr->rtr_queue);
1878                 STAILQ_INIT(&thr->done_queue);
1879                 STAILQ_INIT(&thr->isc_queue);
1880
1881                 error = kproc_kthread_add(ctl_work_thread, thr,
1882                     &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i);
1883                 if (error != 0) {
1884                         printf("error creating CTL work thread!\n");
1885                         ctl_pool_free(other_pool);
1886                         return (error);
1887                 }
1888         }
1889         error = kproc_kthread_add(ctl_lun_thread, softc,
1890             &softc->ctl_proc, NULL, 0, 0, "ctl", "lun");
1891         if (error != 0) {
1892                 printf("error creating CTL lun thread!\n");
1893                 ctl_pool_free(other_pool);
1894                 return (error);
1895         }
1896         error = kproc_kthread_add(ctl_thresh_thread, softc,
1897             &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh");
1898         if (error != 0) {
1899                 printf("error creating CTL threshold thread!\n");
1900                 ctl_pool_free(other_pool);
1901                 return (error);
1902         }
1903
1904         SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree),
1905             OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN,
1906             softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head");
1907
1908         if (softc->is_single == 0) {
1909                 ctl_frontend_register(&ha_frontend);
1910                 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) {
1911                         printf("ctl_init: ctl_ha_msg_init failed.\n");
1912                         softc->is_single = 1;
1913                 } else
1914                 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler)
1915                     != CTL_HA_STATUS_SUCCESS) {
1916                         printf("ctl_init: ctl_ha_msg_register failed.\n");
1917                         softc->is_single = 1;
1918                 }
1919         }
1920         return (0);
1921 }
1922
1923 void
1924 ctl_shutdown(void)
1925 {
1926         struct ctl_softc *softc = control_softc;
1927         struct ctl_lun *lun, *next_lun;
1928
1929         if (softc->is_single == 0) {
1930                 ctl_ha_msg_shutdown(softc);
1931                 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL)
1932                     != CTL_HA_STATUS_SUCCESS)
1933                         printf("%s: ctl_ha_msg_deregister failed.\n", __func__);
1934                 if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS)
1935                         printf("%s: ctl_ha_msg_destroy failed.\n", __func__);
1936                 ctl_frontend_deregister(&ha_frontend);
1937         }
1938
1939         mtx_lock(&softc->ctl_lock);
1940
1941         STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun)
1942                 ctl_free_lun(lun);
1943
1944         mtx_unlock(&softc->ctl_lock);
1945
1946 #if 0
1947         ctl_shutdown_thread(softc->work_thread);
1948         mtx_destroy(&softc->queue_lock);
1949 #endif
1950
1951         ctl_tpc_shutdown(softc);
1952         uma_zdestroy(softc->io_zone);
1953         mtx_destroy(&softc->ctl_lock);
1954
1955         destroy_dev(softc->dev);
1956
1957         sysctl_ctx_free(&softc->sysctl_ctx);
1958
1959         free(control_softc, M_DEVBUF);
1960         control_softc = NULL;
1961 }
1962
1963 static int
1964 ctl_module_event_handler(module_t mod, int what, void *arg)
1965 {
1966
1967         switch (what) {
1968         case MOD_LOAD:
1969                 return (ctl_init());
1970         case MOD_UNLOAD:
1971                 return (EBUSY);
1972         default:
1973                 return (EOPNOTSUPP);
1974         }
1975 }
1976
1977 /*
1978  * XXX KDM should we do some access checks here?  Bump a reference count to
1979  * prevent a CTL module from being unloaded while someone has it open?
1980  */
1981 static int
1982 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td)
1983 {
1984         return (0);
1985 }
1986
1987 static int
1988 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td)
1989 {
1990         return (0);
1991 }
1992
1993 /*
1994  * Remove an initiator by port number and initiator ID.
1995  * Returns 0 for success, -1 for failure.
1996  */
1997 int
1998 ctl_remove_initiator(struct ctl_port *port, int iid)
1999 {
2000         struct ctl_softc *softc = port->ctl_softc;
2001
2002         mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
2003
2004         if (iid > CTL_MAX_INIT_PER_PORT) {
2005                 printf("%s: initiator ID %u > maximun %u!\n",
2006                        __func__, iid, CTL_MAX_INIT_PER_PORT);
2007                 return (-1);
2008         }
2009
2010         mtx_lock(&softc->ctl_lock);
2011         port->wwpn_iid[iid].in_use--;
2012         port->wwpn_iid[iid].last_use = time_uptime;
2013         mtx_unlock(&softc->ctl_lock);
2014         ctl_isc_announce_iid(port, iid);
2015
2016         return (0);
2017 }
2018
2019 /*
2020  * Add an initiator to the initiator map.
2021  * Returns iid for success, < 0 for failure.
2022  */
2023 int
2024 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name)
2025 {
2026         struct ctl_softc *softc = port->ctl_softc;
2027         time_t best_time;
2028         int i, best;
2029
2030         mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
2031
2032         if (iid >= CTL_MAX_INIT_PER_PORT) {
2033                 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n",
2034                        __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT);
2035                 free(name, M_CTL);
2036                 return (-1);
2037         }
2038
2039         mtx_lock(&softc->ctl_lock);
2040
2041         if (iid < 0 && (wwpn != 0 || name != NULL)) {
2042                 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
2043                         if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) {
2044                                 iid = i;
2045                                 break;
2046                         }
2047                         if (name != NULL && port->wwpn_iid[i].name != NULL &&
2048                             strcmp(name, port->wwpn_iid[i].name) == 0) {
2049                                 iid = i;
2050                                 break;
2051                         }
2052                 }
2053         }
2054
2055         if (iid < 0) {
2056                 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
2057                         if (port->wwpn_iid[i].in_use == 0 &&
2058                             port->wwpn_iid[i].wwpn == 0 &&
2059                             port->wwpn_iid[i].name == NULL) {
2060                                 iid = i;
2061                                 break;
2062                         }
2063                 }
2064         }
2065
2066         if (iid < 0) {
2067                 best = -1;
2068                 best_time = INT32_MAX;
2069                 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
2070                         if (port->wwpn_iid[i].in_use == 0) {
2071                                 if (port->wwpn_iid[i].last_use < best_time) {
2072                                         best = i;
2073                                         best_time = port->wwpn_iid[i].last_use;
2074                                 }
2075                         }
2076                 }
2077                 iid = best;
2078         }
2079
2080         if (iid < 0) {
2081                 mtx_unlock(&softc->ctl_lock);
2082                 free(name, M_CTL);
2083                 return (-2);
2084         }
2085
2086         if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) {
2087                 /*
2088                  * This is not an error yet.
2089                  */
2090                 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) {
2091 #if 0
2092                         printf("%s: port %d iid %u WWPN %#jx arrived"
2093                             " again\n", __func__, port->targ_port,
2094                             iid, (uintmax_t)wwpn);
2095 #endif
2096                         goto take;
2097                 }
2098                 if (name != NULL && port->wwpn_iid[iid].name != NULL &&
2099                     strcmp(name, port->wwpn_iid[iid].name) == 0) {
2100 #if 0
2101                         printf("%s: port %d iid %u name '%s' arrived"
2102                             " again\n", __func__, port->targ_port,
2103                             iid, name);
2104 #endif
2105                         goto take;
2106                 }
2107
2108                 /*
2109                  * This is an error, but what do we do about it?  The
2110                  * driver is telling us we have a new WWPN for this
2111                  * initiator ID, so we pretty much need to use it.
2112                  */
2113                 printf("%s: port %d iid %u WWPN %#jx '%s' arrived,"
2114                     " but WWPN %#jx '%s' is still at that address\n",
2115                     __func__, port->targ_port, iid, wwpn, name,
2116                     (uintmax_t)port->wwpn_iid[iid].wwpn,
2117                     port->wwpn_iid[iid].name);
2118
2119                 /*
2120                  * XXX KDM clear have_ca and ua_pending on each LUN for
2121                  * this initiator.
2122                  */
2123         }
2124 take:
2125         free(port->wwpn_iid[iid].name, M_CTL);
2126         port->wwpn_iid[iid].name = name;
2127         port->wwpn_iid[iid].wwpn = wwpn;
2128         port->wwpn_iid[iid].in_use++;
2129         mtx_unlock(&softc->ctl_lock);
2130         ctl_isc_announce_iid(port, iid);
2131
2132         return (iid);
2133 }
2134
2135 static int
2136 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf)
2137 {
2138         int len;
2139
2140         switch (port->port_type) {
2141         case CTL_PORT_FC:
2142         {
2143                 struct scsi_transportid_fcp *id =
2144                     (struct scsi_transportid_fcp *)buf;
2145                 if (port->wwpn_iid[iid].wwpn == 0)
2146                         return (0);
2147                 memset(id, 0, sizeof(*id));
2148                 id->format_protocol = SCSI_PROTO_FC;
2149                 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name);
2150                 return (sizeof(*id));
2151         }
2152         case CTL_PORT_ISCSI:
2153         {
2154                 struct scsi_transportid_iscsi_port *id =
2155                     (struct scsi_transportid_iscsi_port *)buf;
2156                 if (port->wwpn_iid[iid].name == NULL)
2157                         return (0);
2158                 memset(id, 0, 256);
2159                 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT |
2160                     SCSI_PROTO_ISCSI;
2161                 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1;
2162                 len = roundup2(min(len, 252), 4);
2163                 scsi_ulto2b(len, id->additional_length);
2164                 return (sizeof(*id) + len);
2165         }
2166         case CTL_PORT_SAS:
2167         {
2168                 struct scsi_transportid_sas *id =
2169                     (struct scsi_transportid_sas *)buf;
2170                 if (port->wwpn_iid[iid].wwpn == 0)
2171                         return (0);
2172                 memset(id, 0, sizeof(*id));
2173                 id->format_protocol = SCSI_PROTO_SAS;
2174                 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address);
2175                 return (sizeof(*id));
2176         }
2177         default:
2178         {
2179                 struct scsi_transportid_spi *id =
2180                     (struct scsi_transportid_spi *)buf;
2181                 memset(id, 0, sizeof(*id));
2182                 id->format_protocol = SCSI_PROTO_SPI;
2183                 scsi_ulto2b(iid, id->scsi_addr);
2184                 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id);
2185                 return (sizeof(*id));
2186         }
2187         }
2188 }
2189
2190 /*
2191  * Serialize a command that went down the "wrong" side, and so was sent to
2192  * this controller for execution.  The logic is a little different than the
2193  * standard case in ctl_scsiio_precheck().  Errors in this case need to get
2194  * sent back to the other side, but in the success case, we execute the
2195  * command on this side (XFER mode) or tell the other side to execute it
2196  * (SER_ONLY mode).
2197  */
2198 static int
2199 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
2200 {
2201         struct ctl_softc *softc = control_softc;
2202         union ctl_ha_msg msg_info;
2203         struct ctl_port *port;
2204         struct ctl_lun *lun;
2205         const struct ctl_cmd_entry *entry;
2206         int retval = 0;
2207         uint32_t targ_lun;
2208
2209         targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
2210         mtx_lock(&softc->ctl_lock);
2211
2212         /* Make sure that we know about this port. */
2213         port = ctl_io_port(&ctsio->io_hdr);
2214         if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) {
2215                 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0,
2216                                          /*retry_count*/ 1);
2217                 goto badjuju;
2218         }
2219
2220         /* Make sure that we know about this LUN. */
2221         if ((targ_lun < CTL_MAX_LUNS) &&
2222             ((lun = softc->ctl_luns[targ_lun]) != NULL)) {
2223                 mtx_lock(&lun->lun_lock);
2224                 mtx_unlock(&softc->ctl_lock);
2225                 /*
2226                  * If the LUN is invalid, pretend that it doesn't exist.
2227                  * It will go away as soon as all pending I/O has been
2228                  * completed.
2229                  */
2230                 if (lun->flags & CTL_LUN_DISABLED) {
2231                         mtx_unlock(&lun->lun_lock);
2232                         lun = NULL;
2233                 }
2234         } else {
2235                 mtx_unlock(&softc->ctl_lock);
2236                 lun = NULL;
2237         }
2238         if (lun == NULL) {
2239                 /*
2240                  * The other node would not send this request to us unless
2241                  * received announce that we are primary node for this LUN.
2242                  * If this LUN does not exist now, it is probably result of
2243                  * a race, so respond to initiator in the most opaque way.
2244                  */
2245                 ctl_set_busy(ctsio);
2246                 goto badjuju;
2247         }
2248
2249         entry = ctl_get_cmd_entry(ctsio, NULL);
2250         if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) {
2251                 mtx_unlock(&lun->lun_lock);
2252                 goto badjuju;
2253         }
2254
2255         ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun;
2256         ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = lun->be_lun;
2257
2258         /*
2259          * Every I/O goes into the OOA queue for a
2260          * particular LUN, and stays there until completion.
2261          */
2262 #ifdef CTL_TIME_IO
2263         if (TAILQ_EMPTY(&lun->ooa_queue))
2264                 lun->idle_time += getsbinuptime() - lun->last_busy;
2265 #endif
2266         TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
2267
2268         switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
2269                 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq,
2270                  ooa_links))) {
2271         case CTL_ACTION_BLOCK:
2272                 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
2273                 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
2274                                   blocked_links);
2275                 mtx_unlock(&lun->lun_lock);
2276                 break;
2277         case CTL_ACTION_PASS:
2278         case CTL_ACTION_SKIP:
2279                 if (softc->ha_mode == CTL_HA_MODE_XFER) {
2280                         ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
2281                         ctl_enqueue_rtr((union ctl_io *)ctsio);
2282                         mtx_unlock(&lun->lun_lock);
2283                 } else {
2284                         ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
2285                         mtx_unlock(&lun->lun_lock);
2286
2287                         /* send msg back to other side */
2288                         msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
2289                         msg_info.hdr.serializing_sc = (union ctl_io *)ctsio;
2290                         msg_info.hdr.msg_type = CTL_MSG_R2R;
2291                         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
2292                             sizeof(msg_info.hdr), M_WAITOK);
2293                 }
2294                 break;
2295         case CTL_ACTION_OVERLAP:
2296                 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
2297                 mtx_unlock(&lun->lun_lock);
2298                 ctl_set_overlapped_cmd(ctsio);
2299                 goto badjuju;
2300         case CTL_ACTION_OVERLAP_TAG:
2301                 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
2302                 mtx_unlock(&lun->lun_lock);
2303                 ctl_set_overlapped_tag(ctsio, ctsio->tag_num);
2304                 goto badjuju;
2305         case CTL_ACTION_ERROR:
2306         default:
2307                 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
2308                 mtx_unlock(&lun->lun_lock);
2309
2310                 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0,
2311                                          /*retry_count*/ 0);
2312 badjuju:
2313                 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info);
2314                 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
2315                 msg_info.hdr.serializing_sc = NULL;
2316                 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
2317                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
2318                     sizeof(msg_info.scsi), M_WAITOK);
2319                 retval = 1;
2320                 break;
2321         }
2322         return (retval);
2323 }
2324
2325 /*
2326  * Returns 0 for success, errno for failure.
2327  */
2328 static void
2329 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
2330                    struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries)
2331 {
2332         union ctl_io *io;
2333
2334         mtx_lock(&lun->lun_lock);
2335         for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL);
2336              (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
2337              ooa_links)) {
2338                 struct ctl_ooa_entry *entry;
2339
2340                 /*
2341                  * If we've got more than we can fit, just count the
2342                  * remaining entries.
2343                  */
2344                 if (*cur_fill_num >= ooa_hdr->alloc_num)
2345                         continue;
2346
2347                 entry = &kern_entries[*cur_fill_num];
2348
2349                 entry->tag_num = io->scsiio.tag_num;
2350                 entry->lun_num = lun->lun;
2351 #ifdef CTL_TIME_IO
2352                 entry->start_bt = io->io_hdr.start_bt;
2353 #endif
2354                 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len);
2355                 entry->cdb_len = io->scsiio.cdb_len;
2356                 if (io->io_hdr.flags & CTL_FLAG_BLOCKED)
2357                         entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED;
2358
2359                 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG)
2360                         entry->cmd_flags |= CTL_OOACMD_FLAG_DMA;
2361
2362                 if (io->io_hdr.flags & CTL_FLAG_ABORT)
2363                         entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT;
2364
2365                 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR)
2366                         entry->cmd_flags |= CTL_OOACMD_FLAG_RTR;
2367
2368                 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED)
2369                         entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED;
2370         }
2371         mtx_unlock(&lun->lun_lock);
2372 }
2373
2374 static void *
2375 ctl_copyin_alloc(void *user_addr, int len, char *error_str,
2376                  size_t error_str_len)
2377 {
2378         void *kptr;
2379
2380         kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO);
2381
2382         if (copyin(user_addr, kptr, len) != 0) {
2383                 snprintf(error_str, error_str_len, "Error copying %d bytes "
2384                          "from user address %p to kernel address %p", len,
2385                          user_addr, kptr);
2386                 free(kptr, M_CTL);
2387                 return (NULL);
2388         }
2389
2390         return (kptr);
2391 }
2392
2393 static void
2394 ctl_free_args(int num_args, struct ctl_be_arg *args)
2395 {
2396         int i;
2397
2398         if (args == NULL)
2399                 return;
2400
2401         for (i = 0; i < num_args; i++) {
2402                 free(args[i].kname, M_CTL);
2403                 free(args[i].kvalue, M_CTL);
2404         }
2405
2406         free(args, M_CTL);
2407 }
2408
2409 static struct ctl_be_arg *
2410 ctl_copyin_args(int num_args, struct ctl_be_arg *uargs,
2411                 char *error_str, size_t error_str_len)
2412 {
2413         struct ctl_be_arg *args;
2414         int i;
2415
2416         args = ctl_copyin_alloc(uargs, num_args * sizeof(*args),
2417                                 error_str, error_str_len);
2418
2419         if (args == NULL)
2420                 goto bailout;
2421
2422         for (i = 0; i < num_args; i++) {
2423                 args[i].kname = NULL;
2424                 args[i].kvalue = NULL;
2425         }
2426
2427         for (i = 0; i < num_args; i++) {
2428                 uint8_t *tmpptr;
2429
2430                 args[i].kname = ctl_copyin_alloc(args[i].name,
2431                         args[i].namelen, error_str, error_str_len);
2432                 if (args[i].kname == NULL)
2433                         goto bailout;
2434
2435                 if (args[i].kname[args[i].namelen - 1] != '\0') {
2436                         snprintf(error_str, error_str_len, "Argument %d "
2437                                  "name is not NUL-terminated", i);
2438                         goto bailout;
2439                 }
2440
2441                 if (args[i].flags & CTL_BEARG_RD) {
2442                         tmpptr = ctl_copyin_alloc(args[i].value,
2443                                 args[i].vallen, error_str, error_str_len);
2444                         if (tmpptr == NULL)
2445                                 goto bailout;
2446                         if ((args[i].flags & CTL_BEARG_ASCII)
2447                          && (tmpptr[args[i].vallen - 1] != '\0')) {
2448                                 snprintf(error_str, error_str_len, "Argument "
2449                                     "%d value is not NUL-terminated", i);
2450                                 goto bailout;
2451                         }
2452                         args[i].kvalue = tmpptr;
2453                 } else {
2454                         args[i].kvalue = malloc(args[i].vallen,
2455                             M_CTL, M_WAITOK | M_ZERO);
2456                 }
2457         }
2458
2459         return (args);
2460 bailout:
2461
2462         ctl_free_args(num_args, args);
2463
2464         return (NULL);
2465 }
2466
2467 static void
2468 ctl_copyout_args(int num_args, struct ctl_be_arg *args)
2469 {
2470         int i;
2471
2472         for (i = 0; i < num_args; i++) {
2473                 if (args[i].flags & CTL_BEARG_WR)
2474                         copyout(args[i].kvalue, args[i].value, args[i].vallen);
2475         }
2476 }
2477
2478 /*
2479  * Escape characters that are illegal or not recommended in XML.
2480  */
2481 int
2482 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size)
2483 {
2484         char *end = str + size;
2485         int retval;
2486
2487         retval = 0;
2488
2489         for (; *str && str < end; str++) {
2490                 switch (*str) {
2491                 case '&':
2492                         retval = sbuf_printf(sb, "&amp;");
2493                         break;
2494                 case '>':
2495                         retval = sbuf_printf(sb, "&gt;");
2496                         break;
2497                 case '<':
2498                         retval = sbuf_printf(sb, "&lt;");
2499                         break;
2500                 default:
2501                         retval = sbuf_putc(sb, *str);
2502                         break;
2503                 }
2504
2505                 if (retval != 0)
2506                         break;
2507
2508         }
2509
2510         return (retval);
2511 }
2512
2513 static void
2514 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb)
2515 {
2516         struct scsi_vpd_id_descriptor *desc;
2517         int i;
2518
2519         if (id == NULL || id->len < 4)
2520                 return;
2521         desc = (struct scsi_vpd_id_descriptor *)id->data;
2522         switch (desc->id_type & SVPD_ID_TYPE_MASK) {
2523         case SVPD_ID_TYPE_T10:
2524                 sbuf_printf(sb, "t10.");
2525                 break;
2526         case SVPD_ID_TYPE_EUI64:
2527                 sbuf_printf(sb, "eui.");
2528                 break;
2529         case SVPD_ID_TYPE_NAA:
2530                 sbuf_printf(sb, "naa.");
2531                 break;
2532         case SVPD_ID_TYPE_SCSI_NAME:
2533                 break;
2534         }
2535         switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) {
2536         case SVPD_ID_CODESET_BINARY:
2537                 for (i = 0; i < desc->length; i++)
2538                         sbuf_printf(sb, "%02x", desc->identifier[i]);
2539                 break;
2540         case SVPD_ID_CODESET_ASCII:
2541                 sbuf_printf(sb, "%.*s", (int)desc->length,
2542                     (char *)desc->identifier);
2543                 break;
2544         case SVPD_ID_CODESET_UTF8:
2545                 sbuf_printf(sb, "%s", (char *)desc->identifier);
2546                 break;
2547         }
2548 }
2549
2550 static int
2551 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
2552           struct thread *td)
2553 {
2554         struct ctl_softc *softc = dev->si_drv1;
2555         struct ctl_lun *lun;
2556         int retval;
2557
2558         retval = 0;
2559
2560         switch (cmd) {
2561         case CTL_IO:
2562                 retval = ctl_ioctl_io(dev, cmd, addr, flag, td);
2563                 break;
2564         case CTL_ENABLE_PORT:
2565         case CTL_DISABLE_PORT:
2566         case CTL_SET_PORT_WWNS: {
2567                 struct ctl_port *port;
2568                 struct ctl_port_entry *entry;
2569
2570                 entry = (struct ctl_port_entry *)addr;
2571                 
2572                 mtx_lock(&softc->ctl_lock);
2573                 STAILQ_FOREACH(port, &softc->port_list, links) {
2574                         int action, done;
2575
2576                         if (port->targ_port < softc->port_min ||
2577                             port->targ_port >= softc->port_max)
2578                                 continue;
2579
2580                         action = 0;
2581                         done = 0;
2582                         if ((entry->port_type == CTL_PORT_NONE)
2583                          && (entry->targ_port == port->targ_port)) {
2584                                 /*
2585                                  * If the user only wants to enable or
2586                                  * disable or set WWNs on a specific port,
2587                                  * do the operation and we're done.
2588                                  */
2589                                 action = 1;
2590                                 done = 1;
2591                         } else if (entry->port_type & port->port_type) {
2592                                 /*
2593                                  * Compare the user's type mask with the
2594                                  * particular frontend type to see if we
2595                                  * have a match.
2596                                  */
2597                                 action = 1;
2598                                 done = 0;
2599
2600                                 /*
2601                                  * Make sure the user isn't trying to set
2602                                  * WWNs on multiple ports at the same time.
2603                                  */
2604                                 if (cmd == CTL_SET_PORT_WWNS) {
2605                                         printf("%s: Can't set WWNs on "
2606                                                "multiple ports\n", __func__);
2607                                         retval = EINVAL;
2608                                         break;
2609                                 }
2610                         }
2611                         if (action == 0)
2612                                 continue;
2613
2614                         /*
2615                          * XXX KDM we have to drop the lock here, because
2616                          * the online/offline operations can potentially
2617                          * block.  We need to reference count the frontends
2618                          * so they can't go away,
2619                          */
2620                         if (cmd == CTL_ENABLE_PORT) {
2621                                 mtx_unlock(&softc->ctl_lock);
2622                                 ctl_port_online(port);
2623                                 mtx_lock(&softc->ctl_lock);
2624                         } else if (cmd == CTL_DISABLE_PORT) {
2625                                 mtx_unlock(&softc->ctl_lock);
2626                                 ctl_port_offline(port);
2627                                 mtx_lock(&softc->ctl_lock);
2628                         } else if (cmd == CTL_SET_PORT_WWNS) {
2629                                 ctl_port_set_wwns(port,
2630                                     (entry->flags & CTL_PORT_WWNN_VALID) ?
2631                                     1 : 0, entry->wwnn,
2632                                     (entry->flags & CTL_PORT_WWPN_VALID) ?
2633                                     1 : 0, entry->wwpn);
2634                         }
2635                         if (done != 0)
2636                                 break;
2637                 }
2638                 mtx_unlock(&softc->ctl_lock);
2639                 break;
2640         }
2641         case CTL_GET_OOA: {
2642                 struct ctl_ooa *ooa_hdr;
2643                 struct ctl_ooa_entry *entries;
2644                 uint32_t cur_fill_num;
2645
2646                 ooa_hdr = (struct ctl_ooa *)addr;
2647
2648                 if ((ooa_hdr->alloc_len == 0)
2649                  || (ooa_hdr->alloc_num == 0)) {
2650                         printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u "
2651                                "must be non-zero\n", __func__,
2652                                ooa_hdr->alloc_len, ooa_hdr->alloc_num);
2653                         retval = EINVAL;
2654                         break;
2655                 }
2656
2657                 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num *
2658                     sizeof(struct ctl_ooa_entry))) {
2659                         printf("%s: CTL_GET_OOA: alloc len %u must be alloc "
2660                                "num %d * sizeof(struct ctl_ooa_entry) %zd\n",
2661                                __func__, ooa_hdr->alloc_len,
2662                                ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry));
2663                         retval = EINVAL;
2664                         break;
2665                 }
2666
2667                 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO);
2668                 if (entries == NULL) {
2669                         printf("%s: could not allocate %d bytes for OOA "
2670                                "dump\n", __func__, ooa_hdr->alloc_len);
2671                         retval = ENOMEM;
2672                         break;
2673                 }
2674
2675                 mtx_lock(&softc->ctl_lock);
2676                 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0)
2677                  && ((ooa_hdr->lun_num >= CTL_MAX_LUNS)
2678                   || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) {
2679                         mtx_unlock(&softc->ctl_lock);
2680                         free(entries, M_CTL);
2681                         printf("%s: CTL_GET_OOA: invalid LUN %ju\n",
2682                                __func__, (uintmax_t)ooa_hdr->lun_num);
2683                         retval = EINVAL;
2684                         break;
2685                 }
2686
2687                 cur_fill_num = 0;
2688
2689                 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) {
2690                         STAILQ_FOREACH(lun, &softc->lun_list, links) {
2691                                 ctl_ioctl_fill_ooa(lun, &cur_fill_num,
2692                                     ooa_hdr, entries);
2693                         }
2694                 } else {
2695                         lun = softc->ctl_luns[ooa_hdr->lun_num];
2696                         ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr,
2697                             entries);
2698                 }
2699                 mtx_unlock(&softc->ctl_lock);
2700
2701                 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num);
2702                 ooa_hdr->fill_len = ooa_hdr->fill_num *
2703                         sizeof(struct ctl_ooa_entry);
2704                 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len);
2705                 if (retval != 0) {
2706                         printf("%s: error copying out %d bytes for OOA dump\n", 
2707                                __func__, ooa_hdr->fill_len);
2708                 }
2709
2710                 getbinuptime(&ooa_hdr->cur_bt);
2711
2712                 if (cur_fill_num > ooa_hdr->alloc_num) {
2713                         ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num;
2714                         ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE;
2715                 } else {
2716                         ooa_hdr->dropped_num = 0;
2717                         ooa_hdr->status = CTL_OOA_OK;
2718                 }
2719
2720                 free(entries, M_CTL);
2721                 break;
2722         }
2723         case CTL_DELAY_IO: {
2724                 struct ctl_io_delay_info *delay_info;
2725
2726                 delay_info = (struct ctl_io_delay_info *)addr;
2727
2728 #ifdef CTL_IO_DELAY
2729                 mtx_lock(&softc->ctl_lock);
2730
2731                 if ((delay_info->lun_id >= CTL_MAX_LUNS)
2732                  || (softc->ctl_luns[delay_info->lun_id] == NULL)) {
2733                         delay_info->status = CTL_DELAY_STATUS_INVALID_LUN;
2734                 } else {
2735                         lun = softc->ctl_luns[delay_info->lun_id];
2736                         mtx_lock(&lun->lun_lock);
2737
2738                         delay_info->status = CTL_DELAY_STATUS_OK;
2739
2740                         switch (delay_info->delay_type) {
2741                         case CTL_DELAY_TYPE_CONT:
2742                                 break;
2743                         case CTL_DELAY_TYPE_ONESHOT:
2744                                 break;
2745                         default:
2746                                 delay_info->status =
2747                                         CTL_DELAY_STATUS_INVALID_TYPE;
2748                                 break;
2749                         }
2750
2751                         switch (delay_info->delay_loc) {
2752                         case CTL_DELAY_LOC_DATAMOVE:
2753                                 lun->delay_info.datamove_type =
2754                                         delay_info->delay_type;
2755                                 lun->delay_info.datamove_delay =
2756                                         delay_info->delay_secs;
2757                                 break;
2758                         case CTL_DELAY_LOC_DONE:
2759                                 lun->delay_info.done_type =
2760                                         delay_info->delay_type;
2761                                 lun->delay_info.done_delay =
2762                                         delay_info->delay_secs;
2763                                 break;
2764                         default:
2765                                 delay_info->status =
2766                                         CTL_DELAY_STATUS_INVALID_LOC;
2767                                 break;
2768                         }
2769                         mtx_unlock(&lun->lun_lock);
2770                 }
2771
2772                 mtx_unlock(&softc->ctl_lock);
2773 #else
2774                 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED;
2775 #endif /* CTL_IO_DELAY */
2776                 break;
2777         }
2778         case CTL_GETSTATS: {
2779                 struct ctl_stats *stats;
2780                 int i;
2781
2782                 stats = (struct ctl_stats *)addr;
2783
2784                 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) >
2785                      stats->alloc_len) {
2786                         stats->status = CTL_SS_NEED_MORE_SPACE;
2787                         stats->num_luns = softc->num_luns;
2788                         break;
2789                 }
2790                 /*
2791                  * XXX KDM no locking here.  If the LUN list changes,
2792                  * things can blow up.
2793                  */
2794                 i = 0;
2795                 STAILQ_FOREACH(lun, &softc->lun_list, links) {
2796                         retval = copyout(&lun->stats, &stats->lun_stats[i++],
2797                                          sizeof(lun->stats));
2798                         if (retval != 0)
2799                                 break;
2800                 }
2801                 stats->num_luns = softc->num_luns;
2802                 stats->fill_len = sizeof(struct ctl_lun_io_stats) *
2803                                  softc->num_luns;
2804                 stats->status = CTL_SS_OK;
2805 #ifdef CTL_TIME_IO
2806                 stats->flags = CTL_STATS_FLAG_TIME_VALID;
2807 #else
2808                 stats->flags = CTL_STATS_FLAG_NONE;
2809 #endif
2810                 getnanouptime(&stats->timestamp);
2811                 break;
2812         }
2813         case CTL_ERROR_INJECT: {
2814                 struct ctl_error_desc *err_desc, *new_err_desc;
2815
2816                 err_desc = (struct ctl_error_desc *)addr;
2817
2818                 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL,
2819                                       M_WAITOK | M_ZERO);
2820                 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc));
2821
2822                 mtx_lock(&softc->ctl_lock);
2823                 lun = softc->ctl_luns[err_desc->lun_id];
2824                 if (lun == NULL) {
2825                         mtx_unlock(&softc->ctl_lock);
2826                         free(new_err_desc, M_CTL);
2827                         printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n",
2828                                __func__, (uintmax_t)err_desc->lun_id);
2829                         retval = EINVAL;
2830                         break;
2831                 }
2832                 mtx_lock(&lun->lun_lock);
2833                 mtx_unlock(&softc->ctl_lock);
2834
2835                 /*
2836                  * We could do some checking here to verify the validity
2837                  * of the request, but given the complexity of error
2838                  * injection requests, the checking logic would be fairly
2839                  * complex.
2840                  *
2841                  * For now, if the request is invalid, it just won't get
2842                  * executed and might get deleted.
2843                  */
2844                 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links);
2845
2846                 /*
2847                  * XXX KDM check to make sure the serial number is unique,
2848                  * in case we somehow manage to wrap.  That shouldn't
2849                  * happen for a very long time, but it's the right thing to
2850                  * do.
2851                  */
2852                 new_err_desc->serial = lun->error_serial;
2853                 err_desc->serial = lun->error_serial;
2854                 lun->error_serial++;
2855
2856                 mtx_unlock(&lun->lun_lock);
2857                 break;
2858         }
2859         case CTL_ERROR_INJECT_DELETE: {
2860                 struct ctl_error_desc *delete_desc, *desc, *desc2;
2861                 int delete_done;
2862
2863                 delete_desc = (struct ctl_error_desc *)addr;
2864                 delete_done = 0;
2865
2866                 mtx_lock(&softc->ctl_lock);
2867                 lun = softc->ctl_luns[delete_desc->lun_id];
2868                 if (lun == NULL) {
2869                         mtx_unlock(&softc->ctl_lock);
2870                         printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n",
2871                                __func__, (uintmax_t)delete_desc->lun_id);
2872                         retval = EINVAL;
2873                         break;
2874                 }
2875                 mtx_lock(&lun->lun_lock);
2876                 mtx_unlock(&softc->ctl_lock);
2877                 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
2878                         if (desc->serial != delete_desc->serial)
2879                                 continue;
2880
2881                         STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc,
2882                                       links);
2883                         free(desc, M_CTL);
2884                         delete_done = 1;
2885                 }
2886                 mtx_unlock(&lun->lun_lock);
2887                 if (delete_done == 0) {
2888                         printf("%s: CTL_ERROR_INJECT_DELETE: can't find "
2889                                "error serial %ju on LUN %u\n", __func__, 
2890                                delete_desc->serial, delete_desc->lun_id);
2891                         retval = EINVAL;
2892                         break;
2893                 }
2894                 break;
2895         }
2896         case CTL_DUMP_STRUCTS: {
2897                 int i, j, k;
2898                 struct ctl_port *port;
2899                 struct ctl_frontend *fe;
2900
2901                 mtx_lock(&softc->ctl_lock);
2902                 printf("CTL Persistent Reservation information start:\n");
2903                 for (i = 0; i < CTL_MAX_LUNS; i++) {
2904                         lun = softc->ctl_luns[i];
2905
2906                         if ((lun == NULL)
2907                          || ((lun->flags & CTL_LUN_DISABLED) != 0))
2908                                 continue;
2909
2910                         for (j = 0; j < CTL_MAX_PORTS; j++) {
2911                                 if (lun->pr_keys[j] == NULL)
2912                                         continue;
2913                                 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){
2914                                         if (lun->pr_keys[j][k] == 0)
2915                                                 continue;
2916                                         printf("  LUN %d port %d iid %d key "
2917                                                "%#jx\n", i, j, k,
2918                                                (uintmax_t)lun->pr_keys[j][k]);
2919                                 }
2920                         }
2921                 }
2922                 printf("CTL Persistent Reservation information end\n");
2923                 printf("CTL Ports:\n");
2924                 STAILQ_FOREACH(port, &softc->port_list, links) {
2925                         printf("  Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN "
2926                                "%#jx WWPN %#jx\n", port->targ_port, port->port_name,
2927                                port->frontend->name, port->port_type,
2928                                port->physical_port, port->virtual_port,
2929                                (uintmax_t)port->wwnn, (uintmax_t)port->wwpn);
2930                         for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
2931                                 if (port->wwpn_iid[j].in_use == 0 &&
2932                                     port->wwpn_iid[j].wwpn == 0 &&
2933                                     port->wwpn_iid[j].name == NULL)
2934                                         continue;
2935
2936                                 printf("    iid %u use %d WWPN %#jx '%s'\n",
2937                                     j, port->wwpn_iid[j].in_use,
2938                                     (uintmax_t)port->wwpn_iid[j].wwpn,
2939                                     port->wwpn_iid[j].name);
2940                         }
2941                 }
2942                 printf("CTL Port information end\n");
2943                 mtx_unlock(&softc->ctl_lock);
2944                 /*
2945                  * XXX KDM calling this without a lock.  We'd likely want
2946                  * to drop the lock before calling the frontend's dump
2947                  * routine anyway.
2948                  */
2949                 printf("CTL Frontends:\n");
2950                 STAILQ_FOREACH(fe, &softc->fe_list, links) {
2951                         printf("  Frontend '%s'\n", fe->name);
2952                         if (fe->fe_dump != NULL)
2953                                 fe->fe_dump();
2954                 }
2955                 printf("CTL Frontend information end\n");
2956                 break;
2957         }
2958         case CTL_LUN_REQ: {
2959                 struct ctl_lun_req *lun_req;
2960                 struct ctl_backend_driver *backend;
2961
2962                 lun_req = (struct ctl_lun_req *)addr;
2963
2964                 backend = ctl_backend_find(lun_req->backend);
2965                 if (backend == NULL) {
2966                         lun_req->status = CTL_LUN_ERROR;
2967                         snprintf(lun_req->error_str,
2968                                  sizeof(lun_req->error_str),
2969                                  "Backend \"%s\" not found.",
2970                                  lun_req->backend);
2971                         break;
2972                 }
2973                 if (lun_req->num_be_args > 0) {
2974                         lun_req->kern_be_args = ctl_copyin_args(
2975                                 lun_req->num_be_args,
2976                                 lun_req->be_args,
2977                                 lun_req->error_str,
2978                                 sizeof(lun_req->error_str));
2979                         if (lun_req->kern_be_args == NULL) {
2980                                 lun_req->status = CTL_LUN_ERROR;
2981                                 break;
2982                         }
2983                 }
2984
2985                 retval = backend->ioctl(dev, cmd, addr, flag, td);
2986
2987                 if (lun_req->num_be_args > 0) {
2988                         ctl_copyout_args(lun_req->num_be_args,
2989                                       lun_req->kern_be_args);
2990                         ctl_free_args(lun_req->num_be_args,
2991                                       lun_req->kern_be_args);
2992                 }
2993                 break;
2994         }
2995         case CTL_LUN_LIST: {
2996                 struct sbuf *sb;
2997                 struct ctl_lun_list *list;
2998                 struct ctl_option *opt;
2999
3000                 list = (struct ctl_lun_list *)addr;
3001
3002                 /*
3003                  * Allocate a fixed length sbuf here, based on the length
3004                  * of the user's buffer.  We could allocate an auto-extending
3005                  * buffer, and then tell the user how much larger our
3006                  * amount of data is than his buffer, but that presents
3007                  * some problems:
3008                  *
3009                  * 1.  The sbuf(9) routines use a blocking malloc, and so
3010                  *     we can't hold a lock while calling them with an
3011                  *     auto-extending buffer.
3012                  *
3013                  * 2.  There is not currently a LUN reference counting
3014                  *     mechanism, outside of outstanding transactions on
3015                  *     the LUN's OOA queue.  So a LUN could go away on us
3016                  *     while we're getting the LUN number, backend-specific
3017                  *     information, etc.  Thus, given the way things
3018                  *     currently work, we need to hold the CTL lock while
3019                  *     grabbing LUN information.
3020                  *
3021                  * So, from the user's standpoint, the best thing to do is
3022                  * allocate what he thinks is a reasonable buffer length,
3023                  * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error,
3024                  * double the buffer length and try again.  (And repeat
3025                  * that until he succeeds.)
3026                  */
3027                 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN);
3028                 if (sb == NULL) {
3029                         list->status = CTL_LUN_LIST_ERROR;
3030                         snprintf(list->error_str, sizeof(list->error_str),
3031                                  "Unable to allocate %d bytes for LUN list",
3032                                  list->alloc_len);
3033                         break;
3034                 }
3035
3036                 sbuf_printf(sb, "<ctllunlist>\n");
3037
3038                 mtx_lock(&softc->ctl_lock);
3039                 STAILQ_FOREACH(lun, &softc->lun_list, links) {
3040                         mtx_lock(&lun->lun_lock);
3041                         retval = sbuf_printf(sb, "<lun id=\"%ju\">\n",
3042                                              (uintmax_t)lun->lun);
3043
3044                         /*
3045                          * Bail out as soon as we see that we've overfilled
3046                          * the buffer.
3047                          */
3048                         if (retval != 0)
3049                                 break;
3050
3051                         retval = sbuf_printf(sb, "\t<backend_type>%s"
3052                                              "</backend_type>\n",
3053                                              (lun->backend == NULL) ?  "none" :
3054                                              lun->backend->name);
3055
3056                         if (retval != 0)
3057                                 break;
3058
3059                         retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n",
3060                                              lun->be_lun->lun_type);
3061
3062                         if (retval != 0)
3063                                 break;
3064
3065                         if (lun->backend == NULL) {
3066                                 retval = sbuf_printf(sb, "</lun>\n");
3067                                 if (retval != 0)
3068                                         break;
3069                                 continue;
3070                         }
3071
3072                         retval = sbuf_printf(sb, "\t<size>%ju</size>\n",
3073                                              (lun->be_lun->maxlba > 0) ?
3074                                              lun->be_lun->maxlba + 1 : 0);
3075
3076                         if (retval != 0)
3077                                 break;
3078
3079                         retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n",
3080                                              lun->be_lun->blocksize);
3081
3082                         if (retval != 0)
3083                                 break;
3084
3085                         retval = sbuf_printf(sb, "\t<serial_number>");
3086
3087                         if (retval != 0)
3088                                 break;
3089
3090                         retval = ctl_sbuf_printf_esc(sb,
3091                             lun->be_lun->serial_num,
3092                             sizeof(lun->be_lun->serial_num));
3093
3094                         if (retval != 0)
3095                                 break;
3096
3097                         retval = sbuf_printf(sb, "</serial_number>\n");
3098                 
3099                         if (retval != 0)
3100                                 break;
3101
3102                         retval = sbuf_printf(sb, "\t<device_id>");
3103
3104                         if (retval != 0)
3105                                 break;
3106
3107                         retval = ctl_sbuf_printf_esc(sb,
3108                             lun->be_lun->device_id,
3109                             sizeof(lun->be_lun->device_id));
3110
3111                         if (retval != 0)
3112                                 break;
3113
3114                         retval = sbuf_printf(sb, "</device_id>\n");
3115
3116                         if (retval != 0)
3117                                 break;
3118
3119                         if (lun->backend->lun_info != NULL) {
3120                                 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb);
3121                                 if (retval != 0)
3122                                         break;
3123                         }
3124                         STAILQ_FOREACH(opt, &lun->be_lun->options, links) {
3125                                 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n",
3126                                     opt->name, opt->value, opt->name);
3127                                 if (retval != 0)
3128                                         break;
3129                         }
3130
3131                         retval = sbuf_printf(sb, "</lun>\n");
3132
3133                         if (retval != 0)
3134                                 break;
3135                         mtx_unlock(&lun->lun_lock);
3136                 }
3137                 if (lun != NULL)
3138                         mtx_unlock(&lun->lun_lock);
3139                 mtx_unlock(&softc->ctl_lock);
3140
3141                 if ((retval != 0)
3142                  || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) {
3143                         retval = 0;
3144                         sbuf_delete(sb);
3145                         list->status = CTL_LUN_LIST_NEED_MORE_SPACE;
3146                         snprintf(list->error_str, sizeof(list->error_str),
3147                                  "Out of space, %d bytes is too small",
3148                                  list->alloc_len);
3149                         break;
3150                 }
3151
3152                 sbuf_finish(sb);
3153
3154                 retval = copyout(sbuf_data(sb), list->lun_xml,
3155                                  sbuf_len(sb) + 1);
3156
3157                 list->fill_len = sbuf_len(sb) + 1;
3158                 list->status = CTL_LUN_LIST_OK;
3159                 sbuf_delete(sb);
3160                 break;
3161         }
3162         case CTL_ISCSI: {
3163                 struct ctl_iscsi *ci;
3164                 struct ctl_frontend *fe;
3165
3166                 ci = (struct ctl_iscsi *)addr;
3167
3168                 fe = ctl_frontend_find("iscsi");
3169                 if (fe == NULL) {
3170                         ci->status = CTL_ISCSI_ERROR;
3171                         snprintf(ci->error_str, sizeof(ci->error_str),
3172                             "Frontend \"iscsi\" not found.");
3173                         break;
3174                 }
3175
3176                 retval = fe->ioctl(dev, cmd, addr, flag, td);
3177                 break;
3178         }
3179         case CTL_PORT_REQ: {
3180                 struct ctl_req *req;
3181                 struct ctl_frontend *fe;
3182
3183                 req = (struct ctl_req *)addr;
3184
3185                 fe = ctl_frontend_find(req->driver);
3186                 if (fe == NULL) {
3187                         req->status = CTL_LUN_ERROR;
3188                         snprintf(req->error_str, sizeof(req->error_str),
3189                             "Frontend \"%s\" not found.", req->driver);
3190                         break;
3191                 }
3192                 if (req->num_args > 0) {
3193                         req->kern_args = ctl_copyin_args(req->num_args,
3194                             req->args, req->error_str, sizeof(req->error_str));
3195                         if (req->kern_args == NULL) {
3196                                 req->status = CTL_LUN_ERROR;
3197                                 break;
3198                         }
3199                 }
3200
3201                 if (fe->ioctl)
3202                         retval = fe->ioctl(dev, cmd, addr, flag, td);
3203                 else
3204                         retval = ENODEV;
3205
3206                 if (req->num_args > 0) {
3207                         ctl_copyout_args(req->num_args, req->kern_args);
3208                         ctl_free_args(req->num_args, req->kern_args);
3209                 }
3210                 break;
3211         }
3212         case CTL_PORT_LIST: {
3213                 struct sbuf *sb;
3214                 struct ctl_port *port;
3215                 struct ctl_lun_list *list;
3216                 struct ctl_option *opt;
3217                 int j;
3218                 uint32_t plun;
3219
3220                 list = (struct ctl_lun_list *)addr;
3221
3222                 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN);
3223                 if (sb == NULL) {
3224                         list->status = CTL_LUN_LIST_ERROR;
3225                         snprintf(list->error_str, sizeof(list->error_str),
3226                                  "Unable to allocate %d bytes for LUN list",
3227                                  list->alloc_len);
3228                         break;
3229                 }
3230
3231                 sbuf_printf(sb, "<ctlportlist>\n");
3232
3233                 mtx_lock(&softc->ctl_lock);
3234                 STAILQ_FOREACH(port, &softc->port_list, links) {
3235                         retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n",
3236                                              (uintmax_t)port->targ_port);
3237
3238                         /*
3239                          * Bail out as soon as we see that we've overfilled
3240                          * the buffer.
3241                          */
3242                         if (retval != 0)
3243                                 break;
3244
3245                         retval = sbuf_printf(sb, "\t<frontend_type>%s"
3246                             "</frontend_type>\n", port->frontend->name);
3247                         if (retval != 0)
3248                                 break;
3249
3250                         retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n",
3251                                              port->port_type);
3252                         if (retval != 0)
3253                                 break;
3254
3255                         retval = sbuf_printf(sb, "\t<online>%s</online>\n",
3256                             (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO");
3257                         if (retval != 0)
3258                                 break;
3259
3260                         retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n",
3261                             port->port_name);
3262                         if (retval != 0)
3263                                 break;
3264
3265                         retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n",
3266                             port->physical_port);
3267                         if (retval != 0)
3268                                 break;
3269
3270                         retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n",
3271                             port->virtual_port);
3272                         if (retval != 0)
3273                                 break;
3274
3275                         if (port->target_devid != NULL) {
3276                                 sbuf_printf(sb, "\t<target>");
3277                                 ctl_id_sbuf(port->target_devid, sb);
3278                                 sbuf_printf(sb, "</target>\n");
3279                         }
3280
3281                         if (port->port_devid != NULL) {
3282                                 sbuf_printf(sb, "\t<port>");
3283                                 ctl_id_sbuf(port->port_devid, sb);
3284                                 sbuf_printf(sb, "</port>\n");
3285                         }
3286
3287                         if (port->port_info != NULL) {
3288                                 retval = port->port_info(port->onoff_arg, sb);
3289                                 if (retval != 0)
3290                                         break;
3291                         }
3292                         STAILQ_FOREACH(opt, &port->options, links) {
3293                                 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n",
3294                                     opt->name, opt->value, opt->name);
3295                                 if (retval != 0)
3296                                         break;
3297                         }
3298
3299                         if (port->lun_map != NULL) {
3300                                 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n");
3301                                 for (j = 0; j < CTL_MAX_LUNS; j++) {
3302                                         plun = ctl_lun_map_from_port(port, j);
3303                                         if (plun >= CTL_MAX_LUNS)
3304                                                 continue;
3305                                         sbuf_printf(sb,
3306                                             "\t<lun id=\"%u\">%u</lun>\n",
3307                                             j, plun);
3308                                 }
3309                         }
3310
3311                         for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
3312                                 if (port->wwpn_iid[j].in_use == 0 ||
3313                                     (port->wwpn_iid[j].wwpn == 0 &&
3314                                      port->wwpn_iid[j].name == NULL))
3315                                         continue;
3316
3317                                 if (port->wwpn_iid[j].name != NULL)
3318                                         retval = sbuf_printf(sb,
3319                                             "\t<initiator id=\"%u\">%s</initiator>\n",
3320                                             j, port->wwpn_iid[j].name);
3321                                 else
3322                                         retval = sbuf_printf(sb,
3323                                             "\t<initiator id=\"%u\">naa.%08jx</initiator>\n",
3324                                             j, port->wwpn_iid[j].wwpn);
3325                                 if (retval != 0)
3326                                         break;
3327                         }
3328                         if (retval != 0)
3329                                 break;
3330
3331                         retval = sbuf_printf(sb, "</targ_port>\n");
3332                         if (retval != 0)
3333                                 break;
3334                 }
3335                 mtx_unlock(&softc->ctl_lock);
3336
3337                 if ((retval != 0)
3338                  || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) {
3339                         retval = 0;
3340                         sbuf_delete(sb);
3341                         list->status = CTL_LUN_LIST_NEED_MORE_SPACE;
3342                         snprintf(list->error_str, sizeof(list->error_str),
3343                                  "Out of space, %d bytes is too small",
3344                                  list->alloc_len);
3345                         break;
3346                 }
3347
3348                 sbuf_finish(sb);
3349
3350                 retval = copyout(sbuf_data(sb), list->lun_xml,
3351                                  sbuf_len(sb) + 1);
3352
3353                 list->fill_len = sbuf_len(sb) + 1;
3354                 list->status = CTL_LUN_LIST_OK;
3355                 sbuf_delete(sb);
3356                 break;
3357         }
3358         case CTL_LUN_MAP: {
3359                 struct ctl_lun_map *lm  = (struct ctl_lun_map *)addr;
3360                 struct ctl_port *port;
3361
3362                 mtx_lock(&softc->ctl_lock);
3363                 if (lm->port < softc->port_min ||
3364                     lm->port >= softc->port_max ||
3365                     (port = softc->ctl_ports[lm->port]) == NULL) {
3366                         mtx_unlock(&softc->ctl_lock);
3367                         return (ENXIO);
3368                 }
3369                 if (port->status & CTL_PORT_STATUS_ONLINE) {
3370                         STAILQ_FOREACH(lun, &softc->lun_list, links) {
3371                                 if (ctl_lun_map_to_port(port, lun->lun) >=
3372                                     CTL_MAX_LUNS)
3373                                         continue;
3374                                 mtx_lock(&lun->lun_lock);
3375                                 ctl_est_ua_port(lun, lm->port, -1,
3376                                     CTL_UA_LUN_CHANGE);
3377                                 mtx_unlock(&lun->lun_lock);
3378                         }
3379                 }
3380                 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps
3381                 if (lm->plun < CTL_MAX_LUNS) {
3382                         if (lm->lun == UINT32_MAX)
3383                                 retval = ctl_lun_map_unset(port, lm->plun);
3384                         else if (lm->lun < CTL_MAX_LUNS &&
3385                             softc->ctl_luns[lm->lun] != NULL)
3386                                 retval = ctl_lun_map_set(port, lm->plun, lm->lun);
3387                         else
3388                                 return (ENXIO);
3389                 } else if (lm->plun == UINT32_MAX) {
3390                         if (lm->lun == UINT32_MAX)
3391                                 retval = ctl_lun_map_deinit(port);
3392                         else
3393                                 retval = ctl_lun_map_init(port);
3394                 } else
3395                         return (ENXIO);
3396                 if (port->status & CTL_PORT_STATUS_ONLINE)
3397                         ctl_isc_announce_port(port);
3398                 break;
3399         }
3400         default: {
3401                 /* XXX KDM should we fix this? */
3402 #if 0
3403                 struct ctl_backend_driver *backend;
3404                 unsigned int type;
3405                 int found;
3406
3407                 found = 0;
3408
3409                 /*
3410                  * We encode the backend type as the ioctl type for backend
3411                  * ioctls.  So parse it out here, and then search for a
3412                  * backend of this type.
3413                  */
3414                 type = _IOC_TYPE(cmd);
3415
3416                 STAILQ_FOREACH(backend, &softc->be_list, links) {
3417                         if (backend->type == type) {
3418                                 found = 1;
3419                                 break;
3420                         }
3421                 }
3422                 if (found == 0) {
3423                         printf("ctl: unknown ioctl command %#lx or backend "
3424                                "%d\n", cmd, type);
3425                         retval = EINVAL;
3426                         break;
3427                 }
3428                 retval = backend->ioctl(dev, cmd, addr, flag, td);
3429 #endif
3430                 retval = ENOTTY;
3431                 break;
3432         }
3433         }
3434         return (retval);
3435 }
3436
3437 uint32_t
3438 ctl_get_initindex(struct ctl_nexus *nexus)
3439 {
3440         return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT));
3441 }
3442
3443 int
3444 ctl_lun_map_init(struct ctl_port *port)
3445 {
3446         struct ctl_softc *softc = port->ctl_softc;
3447         struct ctl_lun *lun;
3448         uint32_t i;
3449
3450         if (port->lun_map == NULL)
3451                 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS,
3452                     M_CTL, M_NOWAIT);
3453         if (port->lun_map == NULL)
3454                 return (ENOMEM);
3455         for (i = 0; i < CTL_MAX_LUNS; i++)
3456                 port->lun_map[i] = UINT32_MAX;
3457         if (port->status & CTL_PORT_STATUS_ONLINE) {
3458                 if (port->lun_disable != NULL) {
3459                         STAILQ_FOREACH(lun, &softc->lun_list, links)
3460                                 port->lun_disable(port->targ_lun_arg, lun->lun);
3461                 }
3462                 ctl_isc_announce_port(port);
3463         }
3464         return (0);
3465 }
3466
3467 int
3468 ctl_lun_map_deinit(struct ctl_port *port)
3469 {
3470         struct ctl_softc *softc = port->ctl_softc;
3471         struct ctl_lun *lun;
3472
3473         if (port->lun_map == NULL)
3474                 return (0);
3475         free(port->lun_map, M_CTL);
3476         port->lun_map = NULL;
3477         if (port->status & CTL_PORT_STATUS_ONLINE) {
3478                 if (port->lun_enable != NULL) {
3479                         STAILQ_FOREACH(lun, &softc->lun_list, links)
3480                                 port->lun_enable(port->targ_lun_arg, lun->lun);
3481                 }
3482                 ctl_isc_announce_port(port);
3483         }
3484         return (0);
3485 }
3486
3487 int
3488 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun)
3489 {
3490         int status;
3491         uint32_t old;
3492
3493         if (port->lun_map == NULL) {
3494                 status = ctl_lun_map_init(port);
3495                 if (status != 0)
3496                         return (status);
3497         }
3498         old = port->lun_map[plun];
3499         port->lun_map[plun] = glun;
3500         if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS) {
3501                 if (port->lun_enable != NULL)
3502                         port->lun_enable(port->targ_lun_arg, plun);
3503                 ctl_isc_announce_port(port);
3504         }
3505         return (0);
3506 }
3507
3508 int
3509 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun)
3510 {
3511         uint32_t old;
3512
3513         if (port->lun_map == NULL)
3514                 return (0);
3515         old = port->lun_map[plun];
3516         port->lun_map[plun] = UINT32_MAX;
3517         if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS) {
3518                 if (port->lun_disable != NULL)
3519                         port->lun_disable(port->targ_lun_arg, plun);
3520                 ctl_isc_announce_port(port);
3521         }
3522         return (0);
3523 }
3524
3525 uint32_t
3526 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id)
3527 {
3528
3529         if (port == NULL)
3530                 return (UINT32_MAX);
3531         if (port->lun_map == NULL || lun_id >= CTL_MAX_LUNS)
3532                 return (lun_id);
3533         return (port->lun_map[lun_id]);
3534 }
3535
3536 uint32_t
3537 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id)
3538 {
3539         uint32_t i;
3540
3541         if (port == NULL)
3542                 return (UINT32_MAX);
3543         if (port->lun_map == NULL)
3544                 return (lun_id);
3545         for (i = 0; i < CTL_MAX_LUNS; i++) {
3546                 if (port->lun_map[i] == lun_id)
3547                         return (i);
3548         }
3549         return (UINT32_MAX);
3550 }
3551
3552 uint32_t
3553 ctl_decode_lun(uint64_t encoded)
3554 {
3555         uint8_t lun[8];
3556         uint32_t result = 0xffffffff;
3557
3558         be64enc(lun, encoded);
3559         switch (lun[0] & RPL_LUNDATA_ATYP_MASK) {
3560         case RPL_LUNDATA_ATYP_PERIPH:
3561                 if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 &&
3562                     lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0)
3563                         result = lun[1];
3564                 break;
3565         case RPL_LUNDATA_ATYP_FLAT:
3566                 if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 &&
3567                     lun[6] == 0 && lun[7] == 0)
3568                         result = ((lun[0] & 0x3f) << 8) + lun[1];
3569                 break;
3570         case RPL_LUNDATA_ATYP_EXTLUN:
3571                 switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) {
3572                 case 0x02:
3573                         switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) {
3574                         case 0x00:
3575                                 result = lun[1];
3576                                 break;
3577                         case 0x10:
3578                                 result = (lun[1] << 16) + (lun[2] << 8) +
3579                                     lun[3];
3580                                 break;
3581                         case 0x20:
3582                                 if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0)
3583                                         result = (lun[2] << 24) +
3584                                             (lun[3] << 16) + (lun[4] << 8) +
3585                                             lun[5];
3586                                 break;
3587                         }
3588                         break;
3589                 case RPL_LUNDATA_EXT_EAM_NOT_SPEC:
3590                         result = 0xffffffff;
3591                         break;
3592                 }
3593                 break;
3594         }
3595         return (result);
3596 }
3597
3598 uint64_t
3599 ctl_encode_lun(uint32_t decoded)
3600 {
3601         uint64_t l = decoded;
3602
3603         if (l <= 0xff)
3604                 return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48));
3605         if (l <= 0x3fff)
3606                 return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48));
3607         if (l <= 0xffffff)
3608                 return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) |
3609                     (l << 32));
3610         return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16));
3611 }
3612
3613 static struct ctl_port *
3614 ctl_io_port(struct ctl_io_hdr *io_hdr)
3615 {
3616
3617         return (control_softc->ctl_ports[io_hdr->nexus.targ_port]);
3618 }
3619
3620 int
3621 ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last)
3622 {
3623         int i;
3624
3625         for (i = first; i < last; i++) {
3626                 if ((mask[i / 32] & (1 << (i % 32))) == 0)
3627                         return (i);
3628         }
3629         return (-1);
3630 }
3631
3632 int
3633 ctl_set_mask(uint32_t *mask, uint32_t bit)
3634 {
3635         uint32_t chunk, piece;
3636
3637         chunk = bit >> 5;
3638         piece = bit % (sizeof(uint32_t) * 8);
3639
3640         if ((mask[chunk] & (1 << piece)) != 0)
3641                 return (-1);
3642         else
3643                 mask[chunk] |= (1 << piece);
3644
3645         return (0);
3646 }
3647
3648 int
3649 ctl_clear_mask(uint32_t *mask, uint32_t bit)
3650 {
3651         uint32_t chunk, piece;
3652
3653         chunk = bit >> 5;
3654         piece = bit % (sizeof(uint32_t) * 8);
3655
3656         if ((mask[chunk] & (1 << piece)) == 0)
3657                 return (-1);
3658         else
3659                 mask[chunk] &= ~(1 << piece);
3660
3661         return (0);
3662 }
3663
3664 int
3665 ctl_is_set(uint32_t *mask, uint32_t bit)
3666 {
3667         uint32_t chunk, piece;
3668
3669         chunk = bit >> 5;
3670         piece = bit % (sizeof(uint32_t) * 8);
3671
3672         if ((mask[chunk] & (1 << piece)) == 0)
3673                 return (0);
3674         else
3675                 return (1);
3676 }
3677
3678 static uint64_t
3679 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx)
3680 {
3681         uint64_t *t;
3682
3683         t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT];
3684         if (t == NULL)
3685                 return (0);
3686         return (t[residx % CTL_MAX_INIT_PER_PORT]);
3687 }
3688
3689 static void
3690 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx)
3691 {
3692         uint64_t *t;
3693
3694         t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT];
3695         if (t == NULL)
3696                 return;
3697         t[residx % CTL_MAX_INIT_PER_PORT] = 0;
3698 }
3699
3700 static void
3701 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx)
3702 {
3703         uint64_t *p;
3704         u_int i;
3705
3706         i = residx/CTL_MAX_INIT_PER_PORT;
3707         if (lun->pr_keys[i] != NULL)
3708                 return;
3709         mtx_unlock(&lun->lun_lock);
3710         p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL,
3711             M_WAITOK | M_ZERO);
3712         mtx_lock(&lun->lun_lock);
3713         if (lun->pr_keys[i] == NULL)
3714                 lun->pr_keys[i] = p;
3715         else
3716                 free(p, M_CTL);
3717 }
3718
3719 static void
3720 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key)
3721 {
3722         uint64_t *t;
3723
3724         t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT];
3725         KASSERT(t != NULL, ("prkey %d is not allocated", residx));
3726         t[residx % CTL_MAX_INIT_PER_PORT] = key;
3727 }
3728
3729 /*
3730  * ctl_softc, pool_name, total_ctl_io are passed in.
3731  * npool is passed out.
3732  */
3733 int
3734 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name,
3735                 uint32_t total_ctl_io, void **npool)
3736 {
3737 #ifdef IO_POOLS
3738         struct ctl_io_pool *pool;
3739
3740         pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL,
3741                                             M_NOWAIT | M_ZERO);
3742         if (pool == NULL)
3743                 return (ENOMEM);
3744
3745         snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name);
3746         pool->ctl_softc = ctl_softc;
3747         pool->zone = uma_zsecond_create(pool->name, NULL,
3748             NULL, NULL, NULL, ctl_softc->io_zone);
3749         /* uma_prealloc(pool->zone, total_ctl_io); */
3750
3751         *npool = pool;
3752 #else
3753         *npool = ctl_softc->io_zone;
3754 #endif
3755         return (0);
3756 }
3757
3758 void
3759 ctl_pool_free(struct ctl_io_pool *pool)
3760 {
3761
3762         if (pool == NULL)
3763                 return;
3764
3765 #ifdef IO_POOLS
3766         uma_zdestroy(pool->zone);
3767         free(pool, M_CTL);
3768 #endif
3769 }
3770
3771 union ctl_io *
3772 ctl_alloc_io(void *pool_ref)
3773 {
3774         union ctl_io *io;
3775 #ifdef IO_POOLS
3776         struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
3777
3778         io = uma_zalloc(pool->zone, M_WAITOK);
3779 #else
3780         io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK);
3781 #endif
3782         if (io != NULL)
3783                 io->io_hdr.pool = pool_ref;
3784         return (io);
3785 }
3786
3787 union ctl_io *
3788 ctl_alloc_io_nowait(void *pool_ref)
3789 {
3790         union ctl_io *io;
3791 #ifdef IO_POOLS
3792         struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
3793
3794         io = uma_zalloc(pool->zone, M_NOWAIT);
3795 #else
3796         io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT);
3797 #endif
3798         if (io != NULL)
3799                 io->io_hdr.pool = pool_ref;
3800         return (io);
3801 }
3802
3803 void
3804 ctl_free_io(union ctl_io *io)
3805 {
3806 #ifdef IO_POOLS
3807         struct ctl_io_pool *pool;
3808 #endif
3809
3810         if (io == NULL)
3811                 return;
3812
3813 #ifdef IO_POOLS
3814         pool = (struct ctl_io_pool *)io->io_hdr.pool;
3815         uma_zfree(pool->zone, io);
3816 #else
3817         uma_zfree((uma_zone_t)io->io_hdr.pool, io);
3818 #endif
3819 }
3820
3821 void
3822 ctl_zero_io(union ctl_io *io)
3823 {
3824         void *pool_ref;
3825
3826         if (io == NULL)
3827                 return;
3828
3829         /*
3830          * May need to preserve linked list pointers at some point too.
3831          */
3832         pool_ref = io->io_hdr.pool;
3833         memset(io, 0, sizeof(*io));
3834         io->io_hdr.pool = pool_ref;
3835 }
3836
3837 int
3838 ctl_expand_number(const char *buf, uint64_t *num)
3839 {
3840         char *endptr;
3841         uint64_t number;
3842         unsigned shift;
3843
3844         number = strtoq(buf, &endptr, 0);
3845
3846         switch (tolower((unsigned char)*endptr)) {
3847         case 'e':
3848                 shift = 60;
3849                 break;
3850         case 'p':
3851                 shift = 50;
3852                 break;
3853         case 't':
3854                 shift = 40;
3855                 break;
3856         case 'g':
3857                 shift = 30;
3858                 break;
3859         case 'm':
3860                 shift = 20;
3861                 break;
3862         case 'k':
3863                 shift = 10;
3864                 break;
3865         case 'b':
3866         case '\0': /* No unit. */
3867                 *num = number;
3868                 return (0);
3869         default:
3870                 /* Unrecognized unit. */
3871                 return (-1);
3872         }
3873
3874         if ((number << shift) >> shift != number) {
3875                 /* Overflow */
3876                 return (-1);
3877         }
3878         *num = number << shift;
3879         return (0);
3880 }
3881
3882
3883 /*
3884  * This routine could be used in the future to load default and/or saved
3885  * mode page parameters for a particuar lun.
3886  */
3887 static int
3888 ctl_init_page_index(struct ctl_lun *lun)
3889 {
3890         int i, page_code;
3891         struct ctl_page_index *page_index;
3892         const char *value;
3893         uint64_t ival;
3894
3895         memcpy(&lun->mode_pages.index, page_index_template,
3896                sizeof(page_index_template));
3897
3898         for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
3899
3900                 page_index = &lun->mode_pages.index[i];
3901                 if (lun->be_lun->lun_type == T_DIRECT &&
3902                     (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
3903                         continue;
3904                 if (lun->be_lun->lun_type == T_PROCESSOR &&
3905                     (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
3906                         continue;
3907                 if (lun->be_lun->lun_type == T_CDROM &&
3908                     (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
3909                         continue;
3910
3911                 page_code = page_index->page_code & SMPH_PC_MASK;
3912                 switch (page_code) {
3913                 case SMS_RW_ERROR_RECOVERY_PAGE: {
3914                         KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
3915                             ("subpage %#x for page %#x is incorrect!",
3916                             page_index->subpage, page_code));
3917                         memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT],
3918                                &rw_er_page_default,
3919                                sizeof(rw_er_page_default));
3920                         memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE],
3921                                &rw_er_page_changeable,
3922                                sizeof(rw_er_page_changeable));
3923                         memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT],
3924                                &rw_er_page_default,
3925                                sizeof(rw_er_page_default));
3926                         memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED],
3927                                &rw_er_page_default,
3928                                sizeof(rw_er_page_default));
3929                         page_index->page_data =
3930                                 (uint8_t *)lun->mode_pages.rw_er_page;
3931                         break;
3932                 }
3933                 case SMS_FORMAT_DEVICE_PAGE: {
3934                         struct scsi_format_page *format_page;
3935
3936                         KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
3937                             ("subpage %#x for page %#x is incorrect!",
3938                             page_index->subpage, page_code));
3939
3940                         /*
3941                          * Sectors per track are set above.  Bytes per
3942                          * sector need to be set here on a per-LUN basis.
3943                          */
3944                         memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT],
3945                                &format_page_default,
3946                                sizeof(format_page_default));
3947                         memcpy(&lun->mode_pages.format_page[
3948                                CTL_PAGE_CHANGEABLE], &format_page_changeable,
3949                                sizeof(format_page_changeable));
3950                         memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT],
3951                                &format_page_default,
3952                                sizeof(format_page_default));
3953                         memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED],
3954                                &format_page_default,
3955                                sizeof(format_page_default));
3956
3957                         format_page = &lun->mode_pages.format_page[
3958                                 CTL_PAGE_CURRENT];
3959                         scsi_ulto2b(lun->be_lun->blocksize,
3960                                     format_page->bytes_per_sector);
3961
3962                         format_page = &lun->mode_pages.format_page[
3963                                 CTL_PAGE_DEFAULT];
3964                         scsi_ulto2b(lun->be_lun->blocksize,
3965                                     format_page->bytes_per_sector);
3966
3967                         format_page = &lun->mode_pages.format_page[
3968                                 CTL_PAGE_SAVED];
3969                         scsi_ulto2b(lun->be_lun->blocksize,
3970                                     format_page->bytes_per_sector);
3971
3972                         page_index->page_data =
3973                                 (uint8_t *)lun->mode_pages.format_page;
3974                         break;
3975                 }
3976                 case SMS_RIGID_DISK_PAGE: {
3977                         struct scsi_rigid_disk_page *rigid_disk_page;
3978                         uint32_t sectors_per_cylinder;
3979                         uint64_t cylinders;
3980 #ifndef __XSCALE__
3981                         int shift;
3982 #endif /* !__XSCALE__ */
3983
3984                         KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
3985                             ("subpage %#x for page %#x is incorrect!",
3986                             page_index->subpage, page_code));
3987
3988                         /*
3989                          * Rotation rate and sectors per track are set
3990                          * above.  We calculate the cylinders here based on
3991                          * capacity.  Due to the number of heads and
3992                          * sectors per track we're using, smaller arrays
3993                          * may turn out to have 0 cylinders.  Linux and
3994                          * FreeBSD don't pay attention to these mode pages
3995                          * to figure out capacity, but Solaris does.  It
3996                          * seems to deal with 0 cylinders just fine, and
3997                          * works out a fake geometry based on the capacity.
3998                          */
3999                         memcpy(&lun->mode_pages.rigid_disk_page[
4000                                CTL_PAGE_DEFAULT], &rigid_disk_page_default,
4001                                sizeof(rigid_disk_page_default));
4002                         memcpy(&lun->mode_pages.rigid_disk_page[
4003                                CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable,
4004                                sizeof(rigid_disk_page_changeable));
4005
4006                         sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK *
4007                                 CTL_DEFAULT_HEADS;
4008
4009                         /*
4010                          * The divide method here will be more accurate,
4011                          * probably, but results in floating point being
4012                          * used in the kernel on i386 (__udivdi3()).  On the
4013                          * XScale, though, __udivdi3() is implemented in
4014                          * software.
4015                          *
4016                          * The shift method for cylinder calculation is
4017                          * accurate if sectors_per_cylinder is a power of
4018                          * 2.  Otherwise it might be slightly off -- you
4019                          * might have a bit of a truncation problem.
4020                          */
4021 #ifdef  __XSCALE__
4022                         cylinders = (lun->be_lun->maxlba + 1) /
4023                                 sectors_per_cylinder;
4024 #else
4025                         for (shift = 31; shift > 0; shift--) {
4026                                 if (sectors_per_cylinder & (1 << shift))
4027                                         break;
4028                         }
4029                         cylinders = (lun->be_lun->maxlba + 1) >> shift;
4030 #endif
4031
4032                         /*
4033                          * We've basically got 3 bytes, or 24 bits for the
4034                          * cylinder size in the mode page.  If we're over,
4035                          * just round down to 2^24.
4036                          */
4037                         if (cylinders > 0xffffff)
4038                                 cylinders = 0xffffff;
4039
4040                         rigid_disk_page = &lun->mode_pages.rigid_disk_page[
4041                                 CTL_PAGE_DEFAULT];
4042                         scsi_ulto3b(cylinders, rigid_disk_page->cylinders);
4043
4044                         if ((value = ctl_get_opt(&lun->be_lun->options,
4045                             "rpm")) != NULL) {
4046                                 scsi_ulto2b(strtol(value, NULL, 0),
4047                                      rigid_disk_page->rotation_rate);
4048                         }
4049
4050                         memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT],
4051                                &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT],
4052                                sizeof(rigid_disk_page_default));
4053                         memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED],
4054                                &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT],
4055                                sizeof(rigid_disk_page_default));
4056
4057                         page_index->page_data =
4058                                 (uint8_t *)lun->mode_pages.rigid_disk_page;
4059                         break;
4060                 }
4061                 case SMS_CACHING_PAGE: {
4062                         struct scsi_caching_page *caching_page;
4063
4064                         KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
4065                             ("subpage %#x for page %#x is incorrect!",
4066                             page_index->subpage, page_code));
4067                         memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT],
4068                                &caching_page_default,
4069                                sizeof(caching_page_default));
4070                         memcpy(&lun->mode_pages.caching_page[
4071                                CTL_PAGE_CHANGEABLE], &caching_page_changeable,
4072                                sizeof(caching_page_changeable));
4073                         memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED],
4074                                &caching_page_default,
4075                                sizeof(caching_page_default));
4076                         caching_page = &lun->mode_pages.caching_page[
4077                             CTL_PAGE_SAVED];
4078                         value = ctl_get_opt(&lun->be_lun->options, "writecache");
4079                         if (value != NULL && strcmp(value, "off") == 0)
4080                                 caching_page->flags1 &= ~SCP_WCE;
4081                         value = ctl_get_opt(&lun->be_lun->options, "readcache");
4082                         if (value != NULL && strcmp(value, "off") == 0)
4083                                 caching_page->flags1 |= SCP_RCD;
4084                         memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT],
4085                                &lun->mode_pages.caching_page[CTL_PAGE_SAVED],
4086                                sizeof(caching_page_default));
4087                         page_index->page_data =
4088                                 (uint8_t *)lun->mode_pages.caching_page;
4089                         break;
4090                 }
4091                 case SMS_CONTROL_MODE_PAGE: {
4092                         switch (page_index->subpage) {
4093                         case SMS_SUBPAGE_PAGE_0: {
4094                                 struct scsi_control_page *control_page;
4095
4096                                 memcpy(&lun->mode_pages.control_page[
4097                                     CTL_PAGE_DEFAULT],
4098                                        &control_page_default,
4099                                        sizeof(control_page_default));
4100                                 memcpy(&lun->mode_pages.control_page[
4101                                     CTL_PAGE_CHANGEABLE],
4102                                        &control_page_changeable,
4103                                        sizeof(control_page_changeable));
4104                                 memcpy(&lun->mode_pages.control_page[
4105                                     CTL_PAGE_SAVED],
4106                                        &control_page_default,
4107                                        sizeof(control_page_default));
4108                                 control_page = &lun->mode_pages.control_page[
4109                                     CTL_PAGE_SAVED];
4110                                 value = ctl_get_opt(&lun->be_lun->options,
4111                                     "reordering");
4112                                 if (value != NULL &&
4113                                     strcmp(value, "unrestricted") == 0) {
4114                                         control_page->queue_flags &=
4115                                             ~SCP_QUEUE_ALG_MASK;
4116                                         control_page->queue_flags |=
4117                                             SCP_QUEUE_ALG_UNRESTRICTED;
4118                                 }
4119                                 memcpy(&lun->mode_pages.control_page[
4120                                     CTL_PAGE_CURRENT],
4121                                        &lun->mode_pages.control_page[
4122                                     CTL_PAGE_SAVED],
4123                                        sizeof(control_page_default));
4124                                 page_index->page_data =
4125                                     (uint8_t *)lun->mode_pages.control_page;
4126                                 break;
4127                         }
4128                         case 0x01:
4129                                 memcpy(&lun->mode_pages.control_ext_page[
4130                                     CTL_PAGE_DEFAULT],
4131                                        &control_ext_page_default,
4132                                        sizeof(control_ext_page_default));
4133                                 memcpy(&lun->mode_pages.control_ext_page[
4134                                     CTL_PAGE_CHANGEABLE],
4135                                        &control_ext_page_changeable,
4136                                        sizeof(control_ext_page_changeable));
4137                                 memcpy(&lun->mode_pages.control_ext_page[
4138                                     CTL_PAGE_SAVED],
4139                                        &control_ext_page_default,
4140                                        sizeof(control_ext_page_default));
4141                                 memcpy(&lun->mode_pages.control_ext_page[
4142                                     CTL_PAGE_CURRENT],
4143                                        &lun->mode_pages.control_ext_page[
4144                                     CTL_PAGE_SAVED],
4145                                        sizeof(control_ext_page_default));
4146                                 page_index->page_data =
4147                                     (uint8_t *)lun->mode_pages.control_ext_page;
4148                                 break;
4149                         default:
4150                                 panic("subpage %#x for page %#x is incorrect!",
4151                                       page_index->subpage, page_code);
4152                         }
4153                         break;
4154                 }
4155                 case SMS_INFO_EXCEPTIONS_PAGE: {
4156                         switch (page_index->subpage) {
4157                         case SMS_SUBPAGE_PAGE_0:
4158                                 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT],
4159                                        &ie_page_default,
4160                                        sizeof(ie_page_default));
4161                                 memcpy(&lun->mode_pages.ie_page[
4162                                        CTL_PAGE_CHANGEABLE], &ie_page_changeable,
4163                                        sizeof(ie_page_changeable));
4164                                 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT],
4165                                        &ie_page_default,
4166                                        sizeof(ie_page_default));
4167                                 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED],
4168                                        &ie_page_default,
4169                                        sizeof(ie_page_default));
4170                                 page_index->page_data =
4171                                         (uint8_t *)lun->mode_pages.ie_page;
4172                                 break;
4173                         case 0x02: {
4174                                 struct ctl_logical_block_provisioning_page *page;
4175
4176                                 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT],
4177                                        &lbp_page_default,
4178                                        sizeof(lbp_page_default));
4179                                 memcpy(&lun->mode_pages.lbp_page[
4180                                        CTL_PAGE_CHANGEABLE], &lbp_page_changeable,
4181                                        sizeof(lbp_page_changeable));
4182                                 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED],
4183                                        &lbp_page_default,
4184                                        sizeof(lbp_page_default));
4185                                 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED];
4186                                 value = ctl_get_opt(&lun->be_lun->options,
4187                                     "avail-threshold");
4188                                 if (value != NULL &&
4189                                     ctl_expand_number(value, &ival) == 0) {
4190                                         page->descr[0].flags |= SLBPPD_ENABLED |
4191                                             SLBPPD_ARMING_DEC;
4192                                         if (lun->be_lun->blocksize)
4193                                                 ival /= lun->be_lun->blocksize;
4194                                         else
4195                                                 ival /= 512;
4196                                         scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
4197                                             page->descr[0].count);
4198                                 }
4199                                 value = ctl_get_opt(&lun->be_lun->options,
4200                                     "used-threshold");
4201                                 if (value != NULL &&
4202                                     ctl_expand_number(value, &ival) == 0) {
4203                                         page->descr[1].flags |= SLBPPD_ENABLED |
4204                                             SLBPPD_ARMING_INC;
4205                                         if (lun->be_lun->blocksize)
4206                                                 ival /= lun->be_lun->blocksize;
4207                                         else
4208                                                 ival /= 512;
4209                                         scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
4210                                             page->descr[1].count);
4211                                 }
4212                                 value = ctl_get_opt(&lun->be_lun->options,
4213                                     "pool-avail-threshold");
4214                                 if (value != NULL &&
4215                                     ctl_expand_number(value, &ival) == 0) {
4216                                         page->descr[2].flags |= SLBPPD_ENABLED |
4217                                             SLBPPD_ARMING_DEC;
4218                                         if (lun->be_lun->blocksize)
4219                                                 ival /= lun->be_lun->blocksize;
4220                                         else
4221                                                 ival /= 512;
4222                                         scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
4223                                             page->descr[2].count);
4224                                 }
4225                                 value = ctl_get_opt(&lun->be_lun->options,
4226                                     "pool-used-threshold");
4227                                 if (value != NULL &&
4228                                     ctl_expand_number(value, &ival) == 0) {
4229                                         page->descr[3].flags |= SLBPPD_ENABLED |
4230                                             SLBPPD_ARMING_INC;
4231                                         if (lun->be_lun->blocksize)
4232                                                 ival /= lun->be_lun->blocksize;
4233                                         else
4234                                                 ival /= 512;
4235                                         scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
4236                                             page->descr[3].count);
4237                                 }
4238                                 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT],
4239                                        &lun->mode_pages.lbp_page[CTL_PAGE_SAVED],
4240                                        sizeof(lbp_page_default));
4241                                 page_index->page_data =
4242                                         (uint8_t *)lun->mode_pages.lbp_page;
4243                                 break;
4244                         }
4245                         default:
4246                                 panic("subpage %#x for page %#x is incorrect!",
4247                                       page_index->subpage, page_code);
4248                         }
4249                         break;
4250                 }
4251                 case SMS_CDDVD_CAPS_PAGE:{
4252                         KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0,
4253                             ("subpage %#x for page %#x is incorrect!",
4254                             page_index->subpage, page_code));
4255                         memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT],
4256                                &cddvd_page_default,
4257                                sizeof(cddvd_page_default));
4258                         memcpy(&lun->mode_pages.cddvd_page[
4259                                CTL_PAGE_CHANGEABLE], &cddvd_page_changeable,
4260                                sizeof(cddvd_page_changeable));
4261                         memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED],
4262                                &cddvd_page_default,
4263                                sizeof(cddvd_page_default));
4264                         memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT],
4265                                &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED],
4266                                sizeof(cddvd_page_default));
4267                         page_index->page_data =
4268                                 (uint8_t *)lun->mode_pages.cddvd_page;
4269                         break;
4270                 }
4271                 case SMS_VENDOR_SPECIFIC_PAGE:{
4272                         switch (page_index->subpage) {
4273                         case DBGCNF_SUBPAGE_CODE: {
4274                                 memcpy(&lun->mode_pages.debugconf_subpage[
4275                                        CTL_PAGE_CURRENT],
4276                                        &debugconf_page_default,
4277                                        sizeof(debugconf_page_default));
4278                                 memcpy(&lun->mode_pages.debugconf_subpage[
4279                                        CTL_PAGE_CHANGEABLE],
4280                                        &debugconf_page_changeable,
4281                                        sizeof(debugconf_page_changeable));
4282                                 memcpy(&lun->mode_pages.debugconf_subpage[
4283                                        CTL_PAGE_DEFAULT],
4284                                        &debugconf_page_default,
4285                                        sizeof(debugconf_page_default));
4286                                 memcpy(&lun->mode_pages.debugconf_subpage[
4287                                        CTL_PAGE_SAVED],
4288                                        &debugconf_page_default,
4289                                        sizeof(debugconf_page_default));
4290                                 page_index->page_data =
4291                                     (uint8_t *)lun->mode_pages.debugconf_subpage;
4292                                 break;
4293                         }
4294                         default:
4295                                 panic("subpage %#x for page %#x is incorrect!",
4296                                       page_index->subpage, page_code);
4297                         }
4298                         break;
4299                 }
4300                 default:
4301                         panic("invalid page code value %#x", page_code);
4302                 }
4303         }
4304
4305         return (CTL_RETVAL_COMPLETE);
4306 }
4307
4308 static int
4309 ctl_init_log_page_index(struct ctl_lun *lun)
4310 {
4311         struct ctl_page_index *page_index;
4312         int i, j, k, prev;
4313
4314         memcpy(&lun->log_pages.index, log_page_index_template,
4315                sizeof(log_page_index_template));
4316
4317         prev = -1;
4318         for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) {
4319
4320                 page_index = &lun->log_pages.index[i];
4321                 if (lun->be_lun->lun_type == T_DIRECT &&
4322                     (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
4323                         continue;
4324                 if (lun->be_lun->lun_type == T_PROCESSOR &&
4325                     (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
4326                         continue;
4327                 if (lun->be_lun->lun_type == T_CDROM &&
4328                     (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
4329                         continue;
4330
4331                 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING &&
4332                     lun->backend->lun_attr == NULL)
4333                         continue;
4334
4335                 if (page_index->page_code != prev) {
4336                         lun->log_pages.pages_page[j] = page_index->page_code;
4337                         prev = page_index->page_code;
4338                         j++;
4339                 }
4340                 lun->log_pages.subpages_page[k*2] = page_index->page_code;
4341                 lun->log_pages.subpages_page[k*2+1] = page_index->subpage;
4342                 k++;
4343         }
4344         lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0];
4345         lun->log_pages.index[0].page_len = j;
4346         lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0];
4347         lun->log_pages.index[1].page_len = k * 2;
4348         lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0];
4349         lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS;
4350         lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page;
4351         lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page);
4352
4353         return (CTL_RETVAL_COMPLETE);
4354 }
4355
4356 static int
4357 hex2bin(const char *str, uint8_t *buf, int buf_size)
4358 {
4359         int i;
4360         u_char c;
4361
4362         memset(buf, 0, buf_size);
4363         while (isspace(str[0]))
4364                 str++;
4365         if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X'))
4366                 str += 2;
4367         buf_size *= 2;
4368         for (i = 0; str[i] != 0 && i < buf_size; i++) {
4369                 c = str[i];
4370                 if (isdigit(c))
4371                         c -= '0';
4372                 else if (isalpha(c))
4373                         c -= isupper(c) ? 'A' - 10 : 'a' - 10;
4374                 else
4375                         break;
4376                 if (c >= 16)
4377                         break;
4378                 if ((i & 1) == 0)
4379                         buf[i / 2] |= (c << 4);
4380                 else
4381                         buf[i / 2] |= c;
4382         }
4383         return ((i + 1) / 2);
4384 }
4385
4386 /*
4387  * LUN allocation.
4388  *
4389  * Requirements:
4390  * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he
4391  *   wants us to allocate the LUN and he can block.
4392  * - ctl_softc is always set
4393  * - be_lun is set if the LUN has a backend (needed for disk LUNs)
4394  *
4395  * Returns 0 for success, non-zero (errno) for failure.
4396  */
4397 static int
4398 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
4399               struct ctl_be_lun *const be_lun)
4400 {
4401         struct ctl_lun *nlun, *lun;
4402         struct scsi_vpd_id_descriptor *desc;
4403         struct scsi_vpd_id_t10 *t10id;
4404         const char *eui, *naa, *scsiname, *vendor, *value;
4405         int lun_number, i, lun_malloced;
4406         int devidlen, idlen1, idlen2 = 0, len;
4407
4408         if (be_lun == NULL)
4409                 return (EINVAL);
4410
4411         /*
4412          * We currently only support Direct Access or Processor LUN types.
4413          */
4414         switch (be_lun->lun_type) {
4415         case T_DIRECT:
4416         case T_PROCESSOR:
4417         case T_CDROM:
4418                 break;
4419         case T_SEQUENTIAL:
4420         case T_CHANGER:
4421         default:
4422                 be_lun->lun_config_status(be_lun->be_lun,
4423                                           CTL_LUN_CONFIG_FAILURE);
4424                 break;
4425         }
4426         if (ctl_lun == NULL) {
4427                 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK);
4428                 lun_malloced = 1;
4429         } else {
4430                 lun_malloced = 0;
4431                 lun = ctl_lun;
4432         }
4433
4434         memset(lun, 0, sizeof(*lun));
4435         if (lun_malloced)
4436                 lun->flags = CTL_LUN_MALLOCED;
4437
4438         /* Generate LUN ID. */
4439         devidlen = max(CTL_DEVID_MIN_LEN,
4440             strnlen(be_lun->device_id, CTL_DEVID_LEN));
4441         idlen1 = sizeof(*t10id) + devidlen;
4442         len = sizeof(struct scsi_vpd_id_descriptor) + idlen1;
4443         scsiname = ctl_get_opt(&be_lun->options, "scsiname");
4444         if (scsiname != NULL) {
4445                 idlen2 = roundup2(strlen(scsiname) + 1, 4);
4446                 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2;
4447         }
4448         eui = ctl_get_opt(&be_lun->options, "eui");
4449         if (eui != NULL) {
4450                 len += sizeof(struct scsi_vpd_id_descriptor) + 16;
4451         }
4452         naa = ctl_get_opt(&be_lun->options, "naa");
4453         if (naa != NULL) {
4454                 len += sizeof(struct scsi_vpd_id_descriptor) + 16;
4455         }
4456         lun->lun_devid = malloc(sizeof(struct ctl_devid) + len,
4457             M_CTL, M_WAITOK | M_ZERO);
4458         desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data;
4459         desc->proto_codeset = SVPD_ID_CODESET_ASCII;
4460         desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10;
4461         desc->length = idlen1;
4462         t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0];
4463         memset(t10id->vendor, ' ', sizeof(t10id->vendor));
4464         if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) {
4465                 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor));
4466         } else {
4467                 strncpy(t10id->vendor, vendor,
4468                     min(sizeof(t10id->vendor), strlen(vendor)));
4469         }
4470         strncpy((char *)t10id->vendor_spec_id,
4471             (char *)be_lun->device_id, devidlen);
4472         if (scsiname != NULL) {
4473                 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
4474                     desc->length);
4475                 desc->proto_codeset = SVPD_ID_CODESET_UTF8;
4476                 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
4477                     SVPD_ID_TYPE_SCSI_NAME;
4478                 desc->length = idlen2;
4479                 strlcpy(desc->identifier, scsiname, idlen2);
4480         }
4481         if (eui != NULL) {
4482                 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
4483                     desc->length);
4484                 desc->proto_codeset = SVPD_ID_CODESET_BINARY;
4485                 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
4486                     SVPD_ID_TYPE_EUI64;
4487                 desc->length = hex2bin(eui, desc->identifier, 16);
4488                 desc->length = desc->length > 12 ? 16 :
4489                     (desc->length > 8 ? 12 : 8);
4490                 len -= 16 - desc->length;
4491         }
4492         if (naa != NULL) {
4493                 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
4494                     desc->length);
4495                 desc->proto_codeset = SVPD_ID_CODESET_BINARY;
4496                 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
4497                     SVPD_ID_TYPE_NAA;
4498                 desc->length = hex2bin(naa, desc->identifier, 16);
4499                 desc->length = desc->length > 8 ? 16 : 8;
4500                 len -= 16 - desc->length;
4501         }
4502         lun->lun_devid->len = len;
4503
4504         mtx_lock(&ctl_softc->ctl_lock);
4505         /*
4506          * See if the caller requested a particular LUN number.  If so, see
4507          * if it is available.  Otherwise, allocate the first available LUN.
4508          */
4509         if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) {
4510                 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1))
4511                  || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) {
4512                         mtx_unlock(&ctl_softc->ctl_lock);
4513                         if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) {
4514                                 printf("ctl: requested LUN ID %d is higher "
4515                                        "than CTL_MAX_LUNS - 1 (%d)\n",
4516                                        be_lun->req_lun_id, CTL_MAX_LUNS - 1);
4517                         } else {
4518                                 /*
4519                                  * XXX KDM return an error, or just assign
4520                                  * another LUN ID in this case??
4521                                  */
4522                                 printf("ctl: requested LUN ID %d is already "
4523                                        "in use\n", be_lun->req_lun_id);
4524                         }
4525                         if (lun->flags & CTL_LUN_MALLOCED)
4526                                 free(lun, M_CTL);
4527                         be_lun->lun_config_status(be_lun->be_lun,
4528                                                   CTL_LUN_CONFIG_FAILURE);
4529                         return (ENOSPC);
4530                 }
4531                 lun_number = be_lun->req_lun_id;
4532         } else {
4533                 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, CTL_MAX_LUNS);
4534                 if (lun_number == -1) {
4535                         mtx_unlock(&ctl_softc->ctl_lock);
4536                         printf("ctl: can't allocate LUN, out of LUNs\n");
4537                         if (lun->flags & CTL_LUN_MALLOCED)
4538                                 free(lun, M_CTL);
4539                         be_lun->lun_config_status(be_lun->be_lun,
4540                                                   CTL_LUN_CONFIG_FAILURE);
4541                         return (ENOSPC);
4542                 }
4543         }
4544         ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number);
4545
4546         mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF);
4547         lun->lun = lun_number;
4548         lun->be_lun = be_lun;
4549         /*
4550          * The processor LUN is always enabled.  Disk LUNs come on line
4551          * disabled, and must be enabled by the backend.
4552          */
4553         lun->flags |= CTL_LUN_DISABLED;
4554         lun->backend = be_lun->be;
4555         be_lun->ctl_lun = lun;
4556         be_lun->lun_id = lun_number;
4557         atomic_add_int(&be_lun->be->num_luns, 1);
4558         if (be_lun->flags & CTL_LUN_FLAG_EJECTED)
4559                 lun->flags |= CTL_LUN_EJECTED;
4560         if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA)
4561                 lun->flags |= CTL_LUN_NO_MEDIA;
4562         if (be_lun->flags & CTL_LUN_FLAG_STOPPED)
4563                 lun->flags |= CTL_LUN_STOPPED;
4564
4565         if (be_lun->flags & CTL_LUN_FLAG_PRIMARY)
4566                 lun->flags |= CTL_LUN_PRIMARY_SC;
4567
4568         value = ctl_get_opt(&be_lun->options, "removable");
4569         if (value != NULL) {
4570                 if (strcmp(value, "on") == 0)
4571                         lun->flags |= CTL_LUN_REMOVABLE;
4572         } else if (be_lun->lun_type == T_CDROM)
4573                 lun->flags |= CTL_LUN_REMOVABLE;
4574
4575         lun->ctl_softc = ctl_softc;
4576 #ifdef CTL_TIME_IO
4577         lun->last_busy = getsbinuptime();
4578 #endif
4579         TAILQ_INIT(&lun->ooa_queue);
4580         TAILQ_INIT(&lun->blocked_queue);
4581         STAILQ_INIT(&lun->error_list);
4582         ctl_tpc_lun_init(lun);
4583
4584         /*
4585          * Initialize the mode and log page index.
4586          */
4587         ctl_init_page_index(lun);
4588         ctl_init_log_page_index(lun);
4589
4590         /*
4591          * Now, before we insert this lun on the lun list, set the lun
4592          * inventory changed UA for all other luns.
4593          */
4594         STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) {
4595                 mtx_lock(&nlun->lun_lock);
4596                 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE);
4597                 mtx_unlock(&nlun->lun_lock);
4598         }
4599
4600         STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links);
4601
4602         ctl_softc->ctl_luns[lun_number] = lun;
4603
4604         ctl_softc->num_luns++;
4605
4606         /* Setup statistics gathering */
4607         lun->stats.device_type = be_lun->lun_type;
4608         lun->stats.lun_number = lun_number;
4609         lun->stats.blocksize = be_lun->blocksize;
4610         if (be_lun->blocksize == 0)
4611                 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE;
4612         for (i = 0;i < CTL_MAX_PORTS;i++)
4613                 lun->stats.ports[i].targ_port = i;
4614
4615         mtx_unlock(&ctl_softc->ctl_lock);
4616
4617         lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK);
4618         return (0);
4619 }
4620
4621 /*
4622  * Delete a LUN.
4623  * Assumptions:
4624  * - LUN has already been marked invalid and any pending I/O has been taken
4625  *   care of.
4626  */
4627 static int
4628 ctl_free_lun(struct ctl_lun *lun)
4629 {
4630         struct ctl_softc *softc;
4631         struct ctl_lun *nlun;
4632         int i;
4633
4634         softc = lun->ctl_softc;
4635
4636         mtx_assert(&softc->ctl_lock, MA_OWNED);
4637
4638         STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links);
4639
4640         ctl_clear_mask(softc->ctl_lun_mask, lun->lun);
4641
4642         softc->ctl_luns[lun->lun] = NULL;
4643
4644         if (!TAILQ_EMPTY(&lun->ooa_queue))
4645                 panic("Freeing a LUN %p with outstanding I/O!!\n", lun);
4646
4647         softc->num_luns--;
4648
4649         /*
4650          * Tell the backend to free resources, if this LUN has a backend.
4651          */
4652         atomic_subtract_int(&lun->be_lun->be->num_luns, 1);
4653         lun->be_lun->lun_shutdown(lun->be_lun->be_lun);
4654
4655         ctl_tpc_lun_shutdown(lun);
4656         mtx_destroy(&lun->lun_lock);
4657         free(lun->lun_devid, M_CTL);
4658         for (i = 0; i < CTL_MAX_PORTS; i++)
4659                 free(lun->pending_ua[i], M_CTL);
4660         for (i = 0; i < CTL_MAX_PORTS; i++)
4661                 free(lun->pr_keys[i], M_CTL);
4662         free(lun->write_buffer, M_CTL);
4663         if (lun->flags & CTL_LUN_MALLOCED)
4664                 free(lun, M_CTL);
4665
4666         STAILQ_FOREACH(nlun, &softc->lun_list, links) {
4667                 mtx_lock(&nlun->lun_lock);
4668                 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE);
4669                 mtx_unlock(&nlun->lun_lock);
4670         }
4671
4672         return (0);
4673 }
4674
4675 static void
4676 ctl_create_lun(struct ctl_be_lun *be_lun)
4677 {
4678
4679         /*
4680          * ctl_alloc_lun() should handle all potential failure cases.
4681          */
4682         ctl_alloc_lun(control_softc, NULL, be_lun);
4683 }
4684
4685 int
4686 ctl_add_lun(struct ctl_be_lun *be_lun)
4687 {
4688         struct ctl_softc *softc = control_softc;
4689
4690         mtx_lock(&softc->ctl_lock);
4691         STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links);
4692         mtx_unlock(&softc->ctl_lock);
4693         wakeup(&softc->pending_lun_queue);
4694
4695         return (0);
4696 }
4697
4698 int
4699 ctl_enable_lun(struct ctl_be_lun *be_lun)
4700 {
4701         struct ctl_softc *softc;
4702         struct ctl_port *port, *nport;
4703         struct ctl_lun *lun;
4704         int retval;
4705
4706         lun = (struct ctl_lun *)be_lun->ctl_lun;
4707         softc = lun->ctl_softc;
4708
4709         mtx_lock(&softc->ctl_lock);
4710         mtx_lock(&lun->lun_lock);
4711         if ((lun->flags & CTL_LUN_DISABLED) == 0) {
4712                 /*
4713                  * eh?  Why did we get called if the LUN is already
4714                  * enabled?
4715                  */
4716                 mtx_unlock(&lun->lun_lock);
4717                 mtx_unlock(&softc->ctl_lock);
4718                 return (0);
4719         }
4720         lun->flags &= ~CTL_LUN_DISABLED;
4721         mtx_unlock(&lun->lun_lock);
4722
4723         STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) {
4724                 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 ||
4725                     port->lun_map != NULL || port->lun_enable == NULL)
4726                         continue;
4727
4728                 /*
4729                  * Drop the lock while we call the FETD's enable routine.
4730                  * This can lead to a callback into CTL (at least in the
4731                  * case of the internal initiator frontend.
4732                  */
4733                 mtx_unlock(&softc->ctl_lock);
4734                 retval = port->lun_enable(port->targ_lun_arg, lun->lun);
4735                 mtx_lock(&softc->ctl_lock);
4736                 if (retval != 0) {
4737                         printf("%s: FETD %s port %d returned error "
4738                                "%d for lun_enable on lun %jd\n",
4739                                __func__, port->port_name, port->targ_port,
4740                                retval, (intmax_t)lun->lun);
4741                 }
4742         }
4743
4744         mtx_unlock(&softc->ctl_lock);
4745         ctl_isc_announce_lun(lun);
4746
4747         return (0);
4748 }
4749
4750 int
4751 ctl_disable_lun(struct ctl_be_lun *be_lun)
4752 {
4753         struct ctl_softc *softc;
4754         struct ctl_port *port;
4755         struct ctl_lun *lun;
4756         int retval;
4757
4758         lun = (struct ctl_lun *)be_lun->ctl_lun;
4759         softc = lun->ctl_softc;
4760
4761         mtx_lock(&softc->ctl_lock);
4762         mtx_lock(&lun->lun_lock);
4763         if (lun->flags & CTL_LUN_DISABLED) {
4764                 mtx_unlock(&lun->lun_lock);
4765                 mtx_unlock(&softc->ctl_lock);
4766                 return (0);
4767         }
4768         lun->flags |= CTL_LUN_DISABLED;
4769         mtx_unlock(&lun->lun_lock);
4770
4771         STAILQ_FOREACH(port, &softc->port_list, links) {
4772                 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 ||
4773                     port->lun_map != NULL || port->lun_disable == NULL)
4774                         continue;
4775
4776                 /*
4777                  * Drop the lock before we call the frontend's disable
4778                  * routine, to avoid lock order reversals.
4779                  *
4780                  * XXX KDM what happens if the frontend list changes while
4781                  * we're traversing it?  It's unlikely, but should be handled.
4782                  */
4783                 mtx_unlock(&softc->ctl_lock);
4784                 retval = port->lun_disable(port->targ_lun_arg, lun->lun);
4785                 mtx_lock(&softc->ctl_lock);
4786                 if (retval != 0) {
4787                         printf("%s: FETD %s port %d returned error "
4788                                "%d for lun_disable on lun %jd\n",
4789                                __func__, port->port_name, port->targ_port,
4790                                retval, (intmax_t)lun->lun);
4791                 }
4792         }
4793
4794         mtx_unlock(&softc->ctl_lock);
4795         ctl_isc_announce_lun(lun);
4796
4797         return (0);
4798 }
4799
4800 int
4801 ctl_start_lun(struct ctl_be_lun *be_lun)
4802 {
4803         struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4804
4805         mtx_lock(&lun->lun_lock);
4806         lun->flags &= ~CTL_LUN_STOPPED;
4807         mtx_unlock(&lun->lun_lock);
4808         return (0);
4809 }
4810
4811 int
4812 ctl_stop_lun(struct ctl_be_lun *be_lun)
4813 {
4814         struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4815
4816         mtx_lock(&lun->lun_lock);
4817         lun->flags |= CTL_LUN_STOPPED;
4818         mtx_unlock(&lun->lun_lock);
4819         return (0);
4820 }
4821
4822 int
4823 ctl_lun_no_media(struct ctl_be_lun *be_lun)
4824 {
4825         struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4826
4827         mtx_lock(&lun->lun_lock);
4828         lun->flags |= CTL_LUN_NO_MEDIA;
4829         mtx_unlock(&lun->lun_lock);
4830         return (0);
4831 }
4832
4833 int
4834 ctl_lun_has_media(struct ctl_be_lun *be_lun)
4835 {
4836         struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4837         union ctl_ha_msg msg;
4838
4839         mtx_lock(&lun->lun_lock);
4840         lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED);
4841         if (lun->flags & CTL_LUN_REMOVABLE)
4842                 ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE);
4843         mtx_unlock(&lun->lun_lock);
4844         if ((lun->flags & CTL_LUN_REMOVABLE) &&
4845             lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
4846                 bzero(&msg.ua, sizeof(msg.ua));
4847                 msg.hdr.msg_type = CTL_MSG_UA;
4848                 msg.hdr.nexus.initid = -1;
4849                 msg.hdr.nexus.targ_port = -1;
4850                 msg.hdr.nexus.targ_lun = lun->lun;
4851                 msg.hdr.nexus.targ_mapped_lun = lun->lun;
4852                 msg.ua.ua_all = 1;
4853                 msg.ua.ua_set = 1;
4854                 msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE;
4855                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua),
4856                     M_WAITOK);
4857         }
4858         return (0);
4859 }
4860
4861 int
4862 ctl_lun_ejected(struct ctl_be_lun *be_lun)
4863 {
4864         struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4865
4866         mtx_lock(&lun->lun_lock);
4867         lun->flags |= CTL_LUN_EJECTED;
4868         mtx_unlock(&lun->lun_lock);
4869         return (0);
4870 }
4871
4872 int
4873 ctl_lun_primary(struct ctl_be_lun *be_lun)
4874 {
4875         struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4876
4877         mtx_lock(&lun->lun_lock);
4878         lun->flags |= CTL_LUN_PRIMARY_SC;
4879         ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
4880         mtx_unlock(&lun->lun_lock);
4881         ctl_isc_announce_lun(lun);
4882         return (0);
4883 }
4884
4885 int
4886 ctl_lun_secondary(struct ctl_be_lun *be_lun)
4887 {
4888         struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4889
4890         mtx_lock(&lun->lun_lock);
4891         lun->flags &= ~CTL_LUN_PRIMARY_SC;
4892         ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
4893         mtx_unlock(&lun->lun_lock);
4894         ctl_isc_announce_lun(lun);
4895         return (0);
4896 }
4897
4898 int
4899 ctl_invalidate_lun(struct ctl_be_lun *be_lun)
4900 {
4901         struct ctl_softc *softc;
4902         struct ctl_lun *lun;
4903
4904         lun = (struct ctl_lun *)be_lun->ctl_lun;
4905         softc = lun->ctl_softc;
4906
4907         mtx_lock(&lun->lun_lock);
4908
4909         /*
4910          * The LUN needs to be disabled before it can be marked invalid.
4911          */
4912         if ((lun->flags & CTL_LUN_DISABLED) == 0) {
4913                 mtx_unlock(&lun->lun_lock);
4914                 return (-1);
4915         }
4916         /*
4917          * Mark the LUN invalid.
4918          */
4919         lun->flags |= CTL_LUN_INVALID;
4920
4921         /*
4922          * If there is nothing in the OOA queue, go ahead and free the LUN.
4923          * If we have something in the OOA queue, we'll free it when the
4924          * last I/O completes.
4925          */
4926         if (TAILQ_EMPTY(&lun->ooa_queue)) {
4927                 mtx_unlock(&lun->lun_lock);
4928                 mtx_lock(&softc->ctl_lock);
4929                 ctl_free_lun(lun);
4930                 mtx_unlock(&softc->ctl_lock);
4931         } else
4932                 mtx_unlock(&lun->lun_lock);
4933
4934         return (0);
4935 }
4936
4937 void
4938 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun)
4939 {
4940         struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4941         union ctl_ha_msg msg;
4942
4943         mtx_lock(&lun->lun_lock);
4944         ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE);
4945         mtx_unlock(&lun->lun_lock);
4946         if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
4947                 /* Send msg to other side. */
4948                 bzero(&msg.ua, sizeof(msg.ua));
4949                 msg.hdr.msg_type = CTL_MSG_UA;
4950                 msg.hdr.nexus.initid = -1;
4951                 msg.hdr.nexus.targ_port = -1;
4952                 msg.hdr.nexus.targ_lun = lun->lun;
4953                 msg.hdr.nexus.targ_mapped_lun = lun->lun;
4954                 msg.ua.ua_all = 1;
4955                 msg.ua.ua_set = 1;
4956                 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE;
4957                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua),
4958                     M_WAITOK);
4959         }
4960 }
4961
4962 /*
4963  * Backend "memory move is complete" callback for requests that never
4964  * make it down to say RAIDCore's configuration code.
4965  */
4966 int
4967 ctl_config_move_done(union ctl_io *io)
4968 {
4969         int retval;
4970
4971         CTL_DEBUG_PRINT(("ctl_config_move_done\n"));
4972         KASSERT(io->io_hdr.io_type == CTL_IO_SCSI,
4973             ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type));
4974
4975         if ((io->io_hdr.port_status != 0) &&
4976             ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
4977              (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
4978                 /*
4979                  * For hardware error sense keys, the sense key
4980                  * specific value is defined to be a retry count,
4981                  * but we use it to pass back an internal FETD
4982                  * error code.  XXX KDM  Hopefully the FETD is only
4983                  * using 16 bits for an error code, since that's
4984                  * all the space we have in the sks field.
4985                  */
4986                 ctl_set_internal_failure(&io->scsiio,
4987                                          /*sks_valid*/ 1,
4988                                          /*retry_count*/
4989                                          io->io_hdr.port_status);
4990         }
4991
4992         if (ctl_debug & CTL_DEBUG_CDB_DATA)
4993                 ctl_data_print(io);
4994         if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) ||
4995             ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
4996              (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) ||
4997             ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) {
4998                 /*
4999                  * XXX KDM just assuming a single pointer here, and not a
5000                  * S/G list.  If we start using S/G lists for config data,
5001                  * we'll need to know how to clean them up here as well.
5002                  */
5003                 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
5004                         free(io->scsiio.kern_data_ptr, M_CTL);
5005                 ctl_done(io);
5006                 retval = CTL_RETVAL_COMPLETE;
5007         } else {
5008                 /*
5009                  * XXX KDM now we need to continue data movement.  Some
5010                  * options:
5011                  * - call ctl_scsiio() again?  We don't do this for data
5012                  *   writes, because for those at least we know ahead of
5013                  *   time where the write will go and how long it is.  For
5014                  *   config writes, though, that information is largely
5015                  *   contained within the write itself, thus we need to
5016                  *   parse out the data again.
5017                  *
5018                  * - Call some other function once the data is in?
5019                  */
5020
5021                 /*
5022                  * XXX KDM call ctl_scsiio() again for now, and check flag
5023                  * bits to see whether we're allocated or not.
5024                  */
5025                 retval = ctl_scsiio(&io->scsiio);
5026         }
5027         return (retval);
5028 }
5029
5030 /*
5031  * This gets called by a backend driver when it is done with a
5032  * data_submit method.
5033  */
5034 void
5035 ctl_data_submit_done(union ctl_io *io)
5036 {
5037         /*
5038          * If the IO_CONT flag is set, we need to call the supplied
5039          * function to continue processing the I/O, instead of completing
5040          * the I/O just yet.
5041          *
5042          * If there is an error, though, we don't want to keep processing.
5043          * Instead, just send status back to the initiator.
5044          */
5045         if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) &&
5046             (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 &&
5047             ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
5048              (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
5049                 io->scsiio.io_cont(io);
5050                 return;
5051         }
5052         ctl_done(io);
5053 }
5054
5055 /*
5056  * This gets called by a backend driver when it is done with a
5057  * configuration write.
5058  */
5059 void
5060 ctl_config_write_done(union ctl_io *io)
5061 {
5062         uint8_t *buf;
5063
5064         /*
5065          * If the IO_CONT flag is set, we need to call the supplied
5066          * function to continue processing the I/O, instead of completing
5067          * the I/O just yet.
5068          *
5069          * If there is an error, though, we don't want to keep processing.
5070          * Instead, just send status back to the initiator.
5071          */
5072         if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) &&
5073             (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 &&
5074             ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
5075              (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
5076                 io->scsiio.io_cont(io);
5077                 return;
5078         }
5079         /*
5080          * Since a configuration write can be done for commands that actually
5081          * have data allocated, like write buffer, and commands that have
5082          * no data, like start/stop unit, we need to check here.
5083          */
5084         if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
5085                 buf = io->scsiio.kern_data_ptr;
5086         else
5087                 buf = NULL;
5088         ctl_done(io);
5089         if (buf)
5090                 free(buf, M_CTL);
5091 }
5092
5093 void
5094 ctl_config_read_done(union ctl_io *io)
5095 {
5096         uint8_t *buf;
5097
5098         /*
5099          * If there is some error -- we are done, skip data transfer.
5100          */
5101         if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 ||
5102             ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
5103              (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) {
5104                 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
5105                         buf = io->scsiio.kern_data_ptr;
5106                 else
5107                         buf = NULL;
5108                 ctl_done(io);
5109                 if (buf)
5110                         free(buf, M_CTL);
5111                 return;
5112         }
5113
5114         /*
5115          * If the IO_CONT flag is set, we need to call the supplied
5116          * function to continue processing the I/O, instead of completing
5117          * the I/O just yet.
5118          */
5119         if (io->io_hdr.flags & CTL_FLAG_IO_CONT) {
5120                 io->scsiio.io_cont(io);
5121                 return;
5122         }
5123
5124         ctl_datamove(io);
5125 }
5126
5127 /*
5128  * SCSI release command.
5129  */
5130 int
5131 ctl_scsi_release(struct ctl_scsiio *ctsio)
5132 {
5133         struct ctl_lun *lun;
5134         uint32_t residx;
5135
5136         CTL_DEBUG_PRINT(("ctl_scsi_release\n"));
5137
5138         residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5139         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5140
5141         /*
5142          * XXX KDM right now, we only support LUN reservation.  We don't
5143          * support 3rd party reservations, or extent reservations, which
5144          * might actually need the parameter list.  If we've gotten this
5145          * far, we've got a LUN reservation.  Anything else got kicked out
5146          * above.  So, according to SPC, ignore the length.
5147          */
5148
5149         mtx_lock(&lun->lun_lock);
5150
5151         /*
5152          * According to SPC, it is not an error for an intiator to attempt
5153          * to release a reservation on a LUN that isn't reserved, or that
5154          * is reserved by another initiator.  The reservation can only be
5155          * released, though, by the initiator who made it or by one of
5156          * several reset type events.
5157          */
5158         if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx))
5159                         lun->flags &= ~CTL_LUN_RESERVED;
5160
5161         mtx_unlock(&lun->lun_lock);
5162
5163         ctl_set_success(ctsio);
5164         ctl_done((union ctl_io *)ctsio);
5165         return (CTL_RETVAL_COMPLETE);
5166 }
5167
5168 int
5169 ctl_scsi_reserve(struct ctl_scsiio *ctsio)
5170 {
5171         struct ctl_lun *lun;
5172         uint32_t residx;
5173
5174         CTL_DEBUG_PRINT(("ctl_reserve\n"));
5175
5176         residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5177         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5178
5179         /*
5180          * XXX KDM right now, we only support LUN reservation.  We don't
5181          * support 3rd party reservations, or extent reservations, which
5182          * might actually need the parameter list.  If we've gotten this
5183          * far, we've got a LUN reservation.  Anything else got kicked out
5184          * above.  So, according to SPC, ignore the length.
5185          */
5186
5187         mtx_lock(&lun->lun_lock);
5188         if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) {
5189                 ctl_set_reservation_conflict(ctsio);
5190                 goto bailout;
5191         }
5192
5193         /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */
5194         if (lun->flags & CTL_LUN_PR_RESERVED) {
5195                 ctl_set_success(ctsio);
5196                 goto bailout;
5197         }
5198
5199         lun->flags |= CTL_LUN_RESERVED;
5200         lun->res_idx = residx;
5201         ctl_set_success(ctsio);
5202
5203 bailout:
5204         mtx_unlock(&lun->lun_lock);
5205         ctl_done((union ctl_io *)ctsio);
5206         return (CTL_RETVAL_COMPLETE);
5207 }
5208
5209 int
5210 ctl_start_stop(struct ctl_scsiio *ctsio)
5211 {
5212         struct scsi_start_stop_unit *cdb;
5213         struct ctl_lun *lun;
5214         int retval;
5215
5216         CTL_DEBUG_PRINT(("ctl_start_stop\n"));
5217
5218         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5219         cdb = (struct scsi_start_stop_unit *)ctsio->cdb;
5220
5221         if ((cdb->how & SSS_PC_MASK) == 0) {
5222                 if ((lun->flags & CTL_LUN_PR_RESERVED) &&
5223                     (cdb->how & SSS_START) == 0) {
5224                         uint32_t residx;
5225
5226                         residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5227                         if (ctl_get_prkey(lun, residx) == 0 ||
5228                             (lun->pr_res_idx != residx && lun->pr_res_type < 4)) {
5229
5230                                 ctl_set_reservation_conflict(ctsio);
5231                                 ctl_done((union ctl_io *)ctsio);
5232                                 return (CTL_RETVAL_COMPLETE);
5233                         }
5234                 }
5235
5236                 if ((cdb->how & SSS_LOEJ) &&
5237                     (lun->flags & CTL_LUN_REMOVABLE) == 0) {
5238                         ctl_set_invalid_field(ctsio,
5239                                               /*sks_valid*/ 1,
5240                                               /*command*/ 1,
5241                                               /*field*/ 4,
5242                                               /*bit_valid*/ 1,
5243                                               /*bit*/ 1);
5244                         ctl_done((union ctl_io *)ctsio);
5245                         return (CTL_RETVAL_COMPLETE);
5246                 }
5247
5248                 if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) &&
5249                     lun->prevent_count > 0) {
5250                         /* "Medium removal prevented" */
5251                         ctl_set_sense(ctsio, /*current_error*/ 1,
5252                             /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ?
5253                              SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST,
5254                             /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE);
5255                         ctl_done((union ctl_io *)ctsio);
5256                         return (CTL_RETVAL_COMPLETE);
5257                 }
5258         }
5259
5260         retval = lun->backend->config_write((union ctl_io *)ctsio);
5261         return (retval);
5262 }
5263
5264 int
5265 ctl_prevent_allow(struct ctl_scsiio *ctsio)
5266 {
5267         struct ctl_lun *lun;
5268         struct scsi_prevent *cdb;
5269         int retval;
5270         uint32_t initidx;
5271
5272         CTL_DEBUG_PRINT(("ctl_prevent_allow\n"));
5273
5274         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5275         cdb = (struct scsi_prevent *)ctsio->cdb;
5276
5277         if ((lun->flags & CTL_LUN_REMOVABLE) == 0) {
5278                 ctl_set_invalid_opcode(ctsio);
5279                 ctl_done((union ctl_io *)ctsio);
5280                 return (CTL_RETVAL_COMPLETE);
5281         }
5282
5283         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5284         mtx_lock(&lun->lun_lock);
5285         if ((cdb->how & PR_PREVENT) &&
5286             ctl_is_set(lun->prevent, initidx) == 0) {
5287                 ctl_set_mask(lun->prevent, initidx);
5288                 lun->prevent_count++;
5289         } else if ((cdb->how & PR_PREVENT) == 0 &&
5290             ctl_is_set(lun->prevent, initidx)) {
5291                 ctl_clear_mask(lun->prevent, initidx);
5292                 lun->prevent_count--;
5293         }
5294         mtx_unlock(&lun->lun_lock);
5295         retval = lun->backend->config_write((union ctl_io *)ctsio);
5296         return (retval);
5297 }
5298
5299 /*
5300  * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but
5301  * we don't really do anything with the LBA and length fields if the user
5302  * passes them in.  Instead we'll just flush out the cache for the entire
5303  * LUN.
5304  */
5305 int
5306 ctl_sync_cache(struct ctl_scsiio *ctsio)
5307 {
5308         struct ctl_lun *lun;
5309         struct ctl_softc *softc;
5310         struct ctl_lba_len_flags *lbalen;
5311         uint64_t starting_lba;
5312         uint32_t block_count;
5313         int retval;
5314         uint8_t byte2;
5315
5316         CTL_DEBUG_PRINT(("ctl_sync_cache\n"));
5317
5318         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5319         softc = lun->ctl_softc;
5320         retval = 0;
5321
5322         switch (ctsio->cdb[0]) {
5323         case SYNCHRONIZE_CACHE: {
5324                 struct scsi_sync_cache *cdb;
5325                 cdb = (struct scsi_sync_cache *)ctsio->cdb;
5326
5327                 starting_lba = scsi_4btoul(cdb->begin_lba);
5328                 block_count = scsi_2btoul(cdb->lb_count);
5329                 byte2 = cdb->byte2;
5330                 break;
5331         }
5332         case SYNCHRONIZE_CACHE_16: {
5333                 struct scsi_sync_cache_16 *cdb;
5334                 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb;
5335
5336                 starting_lba = scsi_8btou64(cdb->begin_lba);
5337                 block_count = scsi_4btoul(cdb->lb_count);
5338                 byte2 = cdb->byte2;
5339                 break;
5340         }
5341         default:
5342                 ctl_set_invalid_opcode(ctsio);
5343                 ctl_done((union ctl_io *)ctsio);
5344                 goto bailout;
5345                 break; /* NOTREACHED */
5346         }
5347
5348         /*
5349          * We check the LBA and length, but don't do anything with them.
5350          * A SYNCHRONIZE CACHE will cause the entire cache for this lun to
5351          * get flushed.  This check will just help satisfy anyone who wants
5352          * to see an error for an out of range LBA.
5353          */
5354         if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) {
5355                 ctl_set_lba_out_of_range(ctsio);
5356                 ctl_done((union ctl_io *)ctsio);
5357                 goto bailout;
5358         }
5359
5360         lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
5361         lbalen->lba = starting_lba;
5362         lbalen->len = block_count;
5363         lbalen->flags = byte2;
5364         retval = lun->backend->config_write((union ctl_io *)ctsio);
5365
5366 bailout:
5367         return (retval);
5368 }
5369
5370 int
5371 ctl_format(struct ctl_scsiio *ctsio)
5372 {
5373         struct scsi_format *cdb;
5374         struct ctl_lun *lun;
5375         int length, defect_list_len;
5376
5377         CTL_DEBUG_PRINT(("ctl_format\n"));
5378
5379         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5380
5381         cdb = (struct scsi_format *)ctsio->cdb;
5382
5383         length = 0;
5384         if (cdb->byte2 & SF_FMTDATA) {
5385                 if (cdb->byte2 & SF_LONGLIST)
5386                         length = sizeof(struct scsi_format_header_long);
5387                 else
5388                         length = sizeof(struct scsi_format_header_short);
5389         }
5390
5391         if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
5392          && (length > 0)) {
5393                 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
5394                 ctsio->kern_data_len = length;
5395                 ctsio->kern_total_len = length;
5396                 ctsio->kern_data_resid = 0;
5397                 ctsio->kern_rel_offset = 0;
5398                 ctsio->kern_sg_entries = 0;
5399                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5400                 ctsio->be_move_done = ctl_config_move_done;
5401                 ctl_datamove((union ctl_io *)ctsio);
5402
5403                 return (CTL_RETVAL_COMPLETE);
5404         }
5405
5406         defect_list_len = 0;
5407
5408         if (cdb->byte2 & SF_FMTDATA) {
5409                 if (cdb->byte2 & SF_LONGLIST) {
5410                         struct scsi_format_header_long *header;
5411
5412                         header = (struct scsi_format_header_long *)
5413                                 ctsio->kern_data_ptr;
5414
5415                         defect_list_len = scsi_4btoul(header->defect_list_len);
5416                         if (defect_list_len != 0) {
5417                                 ctl_set_invalid_field(ctsio,
5418                                                       /*sks_valid*/ 1,
5419                                                       /*command*/ 0,
5420                                                       /*field*/ 2,
5421                                                       /*bit_valid*/ 0,
5422                                                       /*bit*/ 0);
5423                                 goto bailout;
5424                         }
5425                 } else {
5426                         struct scsi_format_header_short *header;
5427
5428                         header = (struct scsi_format_header_short *)
5429                                 ctsio->kern_data_ptr;
5430
5431                         defect_list_len = scsi_2btoul(header->defect_list_len);
5432                         if (defect_list_len != 0) {
5433                                 ctl_set_invalid_field(ctsio,
5434                                                       /*sks_valid*/ 1,
5435                                                       /*command*/ 0,
5436                                                       /*field*/ 2,
5437                                                       /*bit_valid*/ 0,
5438                                                       /*bit*/ 0);
5439                                 goto bailout;
5440                         }
5441                 }
5442         }
5443
5444         ctl_set_success(ctsio);
5445 bailout:
5446
5447         if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
5448                 free(ctsio->kern_data_ptr, M_CTL);
5449                 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
5450         }
5451
5452         ctl_done((union ctl_io *)ctsio);
5453         return (CTL_RETVAL_COMPLETE);
5454 }
5455
5456 int
5457 ctl_read_buffer(struct ctl_scsiio *ctsio)
5458 {
5459         struct ctl_lun *lun;
5460         uint64_t buffer_offset;
5461         uint32_t len;
5462         uint8_t byte2;
5463         static uint8_t descr[4];
5464         static uint8_t echo_descr[4] = { 0 };
5465
5466         CTL_DEBUG_PRINT(("ctl_read_buffer\n"));
5467         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5468         switch (ctsio->cdb[0]) {
5469         case READ_BUFFER: {
5470                 struct scsi_read_buffer *cdb;
5471
5472                 cdb = (struct scsi_read_buffer *)ctsio->cdb;
5473                 buffer_offset = scsi_3btoul(cdb->offset);
5474                 len = scsi_3btoul(cdb->length);
5475                 byte2 = cdb->byte2;
5476                 break;
5477         }
5478         case READ_BUFFER_16: {
5479                 struct scsi_read_buffer_16 *cdb;
5480
5481                 cdb = (struct scsi_read_buffer_16 *)ctsio->cdb;
5482                 buffer_offset = scsi_8btou64(cdb->offset);
5483                 len = scsi_4btoul(cdb->length);
5484                 byte2 = cdb->byte2;
5485                 break;
5486         }
5487         default: /* This shouldn't happen. */
5488                 ctl_set_invalid_opcode(ctsio);
5489                 ctl_done((union ctl_io *)ctsio);
5490                 return (CTL_RETVAL_COMPLETE);
5491         }
5492
5493         if ((byte2 & RWB_MODE) != RWB_MODE_DATA &&
5494             (byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR &&
5495             (byte2 & RWB_MODE) != RWB_MODE_DESCR) {
5496                 ctl_set_invalid_field(ctsio,
5497                                       /*sks_valid*/ 1,
5498                                       /*command*/ 1,
5499                                       /*field*/ 1,
5500                                       /*bit_valid*/ 1,
5501                                       /*bit*/ 4);
5502                 ctl_done((union ctl_io *)ctsio);
5503                 return (CTL_RETVAL_COMPLETE);
5504         }
5505
5506         if (buffer_offset > CTL_WRITE_BUFFER_SIZE ||
5507             buffer_offset + len > CTL_WRITE_BUFFER_SIZE) {
5508                 ctl_set_invalid_field(ctsio,
5509                                       /*sks_valid*/ 1,
5510                                       /*command*/ 1,
5511                                       /*field*/ 6,
5512                                       /*bit_valid*/ 0,
5513                                       /*bit*/ 0);
5514                 ctl_done((union ctl_io *)ctsio);
5515                 return (CTL_RETVAL_COMPLETE);
5516         }
5517
5518         if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) {
5519                 descr[0] = 0;
5520                 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]);
5521                 ctsio->kern_data_ptr = descr;
5522                 len = min(len, sizeof(descr));
5523         } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) {
5524                 ctsio->kern_data_ptr = echo_descr;
5525                 len = min(len, sizeof(echo_descr));
5526         } else {
5527                 if (lun->write_buffer == NULL) {
5528                         lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE,
5529                             M_CTL, M_WAITOK);
5530                 }
5531                 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
5532         }
5533         ctsio->kern_data_len = len;
5534         ctsio->kern_total_len = len;
5535         ctsio->kern_data_resid = 0;
5536         ctsio->kern_rel_offset = 0;
5537         ctsio->kern_sg_entries = 0;
5538         ctl_set_success(ctsio);
5539         ctsio->be_move_done = ctl_config_move_done;
5540         ctl_datamove((union ctl_io *)ctsio);
5541         return (CTL_RETVAL_COMPLETE);
5542 }
5543
5544 int
5545 ctl_write_buffer(struct ctl_scsiio *ctsio)
5546 {
5547         struct scsi_write_buffer *cdb;
5548         struct ctl_lun *lun;
5549         int buffer_offset, len;
5550
5551         CTL_DEBUG_PRINT(("ctl_write_buffer\n"));
5552
5553         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5554         cdb = (struct scsi_write_buffer *)ctsio->cdb;
5555
5556         if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) {
5557                 ctl_set_invalid_field(ctsio,
5558                                       /*sks_valid*/ 1,
5559                                       /*command*/ 1,
5560                                       /*field*/ 1,
5561                                       /*bit_valid*/ 1,
5562                                       /*bit*/ 4);
5563                 ctl_done((union ctl_io *)ctsio);
5564                 return (CTL_RETVAL_COMPLETE);
5565         }
5566
5567         len = scsi_3btoul(cdb->length);
5568         buffer_offset = scsi_3btoul(cdb->offset);
5569
5570         if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) {
5571                 ctl_set_invalid_field(ctsio,
5572                                       /*sks_valid*/ 1,
5573                                       /*command*/ 1,
5574                                       /*field*/ 6,
5575                                       /*bit_valid*/ 0,
5576                                       /*bit*/ 0);
5577                 ctl_done((union ctl_io *)ctsio);
5578                 return (CTL_RETVAL_COMPLETE);
5579         }
5580
5581         /*
5582          * If we've got a kernel request that hasn't been malloced yet,
5583          * malloc it and tell the caller the data buffer is here.
5584          */
5585         if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
5586                 if (lun->write_buffer == NULL) {
5587                         lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE,
5588                             M_CTL, M_WAITOK);
5589                 }
5590                 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
5591                 ctsio->kern_data_len = len;
5592                 ctsio->kern_total_len = len;
5593                 ctsio->kern_data_resid = 0;
5594                 ctsio->kern_rel_offset = 0;
5595                 ctsio->kern_sg_entries = 0;
5596                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5597                 ctsio->be_move_done = ctl_config_move_done;
5598                 ctl_datamove((union ctl_io *)ctsio);
5599
5600                 return (CTL_RETVAL_COMPLETE);
5601         }
5602
5603         ctl_set_success(ctsio);
5604         ctl_done((union ctl_io *)ctsio);
5605         return (CTL_RETVAL_COMPLETE);
5606 }
5607
5608 int
5609 ctl_write_same(struct ctl_scsiio *ctsio)
5610 {
5611         struct ctl_lun *lun;
5612         struct ctl_lba_len_flags *lbalen;
5613         uint64_t lba;
5614         uint32_t num_blocks;
5615         int len, retval;
5616         uint8_t byte2;
5617
5618         CTL_DEBUG_PRINT(("ctl_write_same\n"));
5619
5620         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5621
5622         switch (ctsio->cdb[0]) {
5623         case WRITE_SAME_10: {
5624                 struct scsi_write_same_10 *cdb;
5625
5626                 cdb = (struct scsi_write_same_10 *)ctsio->cdb;
5627
5628                 lba = scsi_4btoul(cdb->addr);
5629                 num_blocks = scsi_2btoul(cdb->length);
5630                 byte2 = cdb->byte2;
5631                 break;
5632         }
5633         case WRITE_SAME_16: {
5634                 struct scsi_write_same_16 *cdb;
5635
5636                 cdb = (struct scsi_write_same_16 *)ctsio->cdb;
5637
5638                 lba = scsi_8btou64(cdb->addr);
5639                 num_blocks = scsi_4btoul(cdb->length);
5640                 byte2 = cdb->byte2;
5641                 break;
5642         }
5643         default:
5644                 /*
5645                  * We got a command we don't support.  This shouldn't
5646                  * happen, commands should be filtered out above us.
5647                  */
5648                 ctl_set_invalid_opcode(ctsio);
5649                 ctl_done((union ctl_io *)ctsio);
5650
5651                 return (CTL_RETVAL_COMPLETE);
5652                 break; /* NOTREACHED */
5653         }
5654
5655         /* ANCHOR flag can be used only together with UNMAP */
5656         if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) {
5657                 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
5658                     /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0);
5659                 ctl_done((union ctl_io *)ctsio);
5660                 return (CTL_RETVAL_COMPLETE);
5661         }
5662
5663         /*
5664          * The first check is to make sure we're in bounds, the second
5665          * check is to catch wrap-around problems.  If the lba + num blocks
5666          * is less than the lba, then we've wrapped around and the block
5667          * range is invalid anyway.
5668          */
5669         if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
5670          || ((lba + num_blocks) < lba)) {
5671                 ctl_set_lba_out_of_range(ctsio);
5672                 ctl_done((union ctl_io *)ctsio);
5673                 return (CTL_RETVAL_COMPLETE);
5674         }
5675
5676         /* Zero number of blocks means "to the last logical block" */
5677         if (num_blocks == 0) {
5678                 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) {
5679                         ctl_set_invalid_field(ctsio,
5680                                               /*sks_valid*/ 0,
5681                                               /*command*/ 1,
5682                                               /*field*/ 0,
5683                                               /*bit_valid*/ 0,
5684                                               /*bit*/ 0);
5685                         ctl_done((union ctl_io *)ctsio);
5686                         return (CTL_RETVAL_COMPLETE);
5687                 }
5688                 num_blocks = (lun->be_lun->maxlba + 1) - lba;
5689         }
5690
5691         len = lun->be_lun->blocksize;
5692
5693         /*
5694          * If we've got a kernel request that hasn't been malloced yet,
5695          * malloc it and tell the caller the data buffer is here.
5696          */
5697         if ((byte2 & SWS_NDOB) == 0 &&
5698             (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
5699                 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);;
5700                 ctsio->kern_data_len = len;
5701                 ctsio->kern_total_len = len;
5702                 ctsio->kern_data_resid = 0;
5703                 ctsio->kern_rel_offset = 0;
5704                 ctsio->kern_sg_entries = 0;
5705                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5706                 ctsio->be_move_done = ctl_config_move_done;
5707                 ctl_datamove((union ctl_io *)ctsio);
5708
5709                 return (CTL_RETVAL_COMPLETE);
5710         }
5711
5712         lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
5713         lbalen->lba = lba;
5714         lbalen->len = num_blocks;
5715         lbalen->flags = byte2;
5716         retval = lun->backend->config_write((union ctl_io *)ctsio);
5717
5718         return (retval);
5719 }
5720
5721 int
5722 ctl_unmap(struct ctl_scsiio *ctsio)
5723 {
5724         struct ctl_lun *lun;
5725         struct scsi_unmap *cdb;
5726         struct ctl_ptr_len_flags *ptrlen;
5727         struct scsi_unmap_header *hdr;
5728         struct scsi_unmap_desc *buf, *end, *endnz, *range;
5729         uint64_t lba;
5730         uint32_t num_blocks;
5731         int len, retval;
5732         uint8_t byte2;
5733
5734         CTL_DEBUG_PRINT(("ctl_unmap\n"));
5735
5736         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5737         cdb = (struct scsi_unmap *)ctsio->cdb;
5738
5739         len = scsi_2btoul(cdb->length);
5740         byte2 = cdb->byte2;
5741
5742         /*
5743          * If we've got a kernel request that hasn't been malloced yet,
5744          * malloc it and tell the caller the data buffer is here.
5745          */
5746         if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
5747                 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);;
5748                 ctsio->kern_data_len = len;
5749                 ctsio->kern_total_len = len;
5750                 ctsio->kern_data_resid = 0;
5751                 ctsio->kern_rel_offset = 0;
5752                 ctsio->kern_sg_entries = 0;
5753                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5754                 ctsio->be_move_done = ctl_config_move_done;
5755                 ctl_datamove((union ctl_io *)ctsio);
5756
5757                 return (CTL_RETVAL_COMPLETE);
5758         }
5759
5760         len = ctsio->kern_total_len - ctsio->kern_data_resid;
5761         hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr;
5762         if (len < sizeof (*hdr) ||
5763             len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) ||
5764             len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) ||
5765             scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) {
5766                 ctl_set_invalid_field(ctsio,
5767                                       /*sks_valid*/ 0,
5768                                       /*command*/ 0,
5769                                       /*field*/ 0,
5770                                       /*bit_valid*/ 0,
5771                                       /*bit*/ 0);
5772                 goto done;
5773         }
5774         len = scsi_2btoul(hdr->desc_length);
5775         buf = (struct scsi_unmap_desc *)(hdr + 1);
5776         end = buf + len / sizeof(*buf);
5777
5778         endnz = buf;
5779         for (range = buf; range < end; range++) {
5780                 lba = scsi_8btou64(range->lba);
5781                 num_blocks = scsi_4btoul(range->length);
5782                 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
5783                  || ((lba + num_blocks) < lba)) {
5784                         ctl_set_lba_out_of_range(ctsio);
5785                         ctl_done((union ctl_io *)ctsio);
5786                         return (CTL_RETVAL_COMPLETE);
5787                 }
5788                 if (num_blocks != 0)
5789                         endnz = range + 1;
5790         }
5791
5792         /*
5793          * Block backend can not handle zero last range.
5794          * Filter it out and return if there is nothing left.
5795          */
5796         len = (uint8_t *)endnz - (uint8_t *)buf;
5797         if (len == 0) {
5798                 ctl_set_success(ctsio);
5799                 goto done;
5800         }
5801
5802         mtx_lock(&lun->lun_lock);
5803         ptrlen = (struct ctl_ptr_len_flags *)
5804             &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
5805         ptrlen->ptr = (void *)buf;
5806         ptrlen->len = len;
5807         ptrlen->flags = byte2;
5808         ctl_check_blocked(lun);
5809         mtx_unlock(&lun->lun_lock);
5810
5811         retval = lun->backend->config_write((union ctl_io *)ctsio);
5812         return (retval);
5813
5814 done:
5815         if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
5816                 free(ctsio->kern_data_ptr, M_CTL);
5817                 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
5818         }
5819         ctl_done((union ctl_io *)ctsio);
5820         return (CTL_RETVAL_COMPLETE);
5821 }
5822
5823 /*
5824  * Note that this function currently doesn't actually do anything inside
5825  * CTL to enforce things if the DQue bit is turned on.
5826  *
5827  * Also note that this function can't be used in the default case, because
5828  * the DQue bit isn't set in the changeable mask for the control mode page
5829  * anyway.  This is just here as an example for how to implement a page
5830  * handler, and a placeholder in case we want to allow the user to turn
5831  * tagged queueing on and off.
5832  *
5833  * The D_SENSE bit handling is functional, however, and will turn
5834  * descriptor sense on and off for a given LUN.
5835  */
5836 int
5837 ctl_control_page_handler(struct ctl_scsiio *ctsio,
5838                          struct ctl_page_index *page_index, uint8_t *page_ptr)
5839 {
5840         struct scsi_control_page *current_cp, *saved_cp, *user_cp;
5841         struct ctl_lun *lun;
5842         int set_ua;
5843         uint32_t initidx;
5844
5845         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5846         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5847         set_ua = 0;
5848
5849         user_cp = (struct scsi_control_page *)page_ptr;
5850         current_cp = (struct scsi_control_page *)
5851                 (page_index->page_data + (page_index->page_len *
5852                 CTL_PAGE_CURRENT));
5853         saved_cp = (struct scsi_control_page *)
5854                 (page_index->page_data + (page_index->page_len *
5855                 CTL_PAGE_SAVED));
5856
5857         mtx_lock(&lun->lun_lock);
5858         if (((current_cp->rlec & SCP_DSENSE) == 0)
5859          && ((user_cp->rlec & SCP_DSENSE) != 0)) {
5860                 /*
5861                  * Descriptor sense is currently turned off and the user
5862                  * wants to turn it on.
5863                  */
5864                 current_cp->rlec |= SCP_DSENSE;
5865                 saved_cp->rlec |= SCP_DSENSE;
5866                 lun->flags |= CTL_LUN_SENSE_DESC;
5867                 set_ua = 1;
5868         } else if (((current_cp->rlec & SCP_DSENSE) != 0)
5869                 && ((user_cp->rlec & SCP_DSENSE) == 0)) {
5870                 /*
5871                  * Descriptor sense is currently turned on, and the user
5872                  * wants to turn it off.
5873                  */
5874                 current_cp->rlec &= ~SCP_DSENSE;
5875                 saved_cp->rlec &= ~SCP_DSENSE;
5876                 lun->flags &= ~CTL_LUN_SENSE_DESC;
5877                 set_ua = 1;
5878         }
5879         if ((current_cp->queue_flags & SCP_QUEUE_ALG_MASK) !=
5880             (user_cp->queue_flags & SCP_QUEUE_ALG_MASK)) {
5881                 current_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK;
5882                 current_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK;
5883                 saved_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK;
5884                 saved_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK;
5885                 set_ua = 1;
5886         }
5887         if ((current_cp->eca_and_aen & SCP_SWP) !=
5888             (user_cp->eca_and_aen & SCP_SWP)) {
5889                 current_cp->eca_and_aen &= ~SCP_SWP;
5890                 current_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP;
5891                 saved_cp->eca_and_aen &= ~SCP_SWP;
5892                 saved_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP;
5893                 set_ua = 1;
5894         }
5895         if (set_ua != 0)
5896                 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE);
5897         mtx_unlock(&lun->lun_lock);
5898         if (set_ua) {
5899                 ctl_isc_announce_mode(lun,
5900                     ctl_get_initindex(&ctsio->io_hdr.nexus),
5901                     page_index->page_code, page_index->subpage);
5902         }
5903         return (0);
5904 }
5905
5906 int
5907 ctl_caching_sp_handler(struct ctl_scsiio *ctsio,
5908                      struct ctl_page_index *page_index, uint8_t *page_ptr)
5909 {
5910         struct scsi_caching_page *current_cp, *saved_cp, *user_cp;
5911         struct ctl_lun *lun;
5912         int set_ua;
5913         uint32_t initidx;
5914
5915         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5916         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5917         set_ua = 0;
5918
5919         user_cp = (struct scsi_caching_page *)page_ptr;
5920         current_cp = (struct scsi_caching_page *)
5921                 (page_index->page_data + (page_index->page_len *
5922                 CTL_PAGE_CURRENT));
5923         saved_cp = (struct scsi_caching_page *)
5924                 (page_index->page_data + (page_index->page_len *
5925                 CTL_PAGE_SAVED));
5926
5927         mtx_lock(&lun->lun_lock);
5928         if ((current_cp->flags1 & (SCP_WCE | SCP_RCD)) !=
5929             (user_cp->flags1 & (SCP_WCE | SCP_RCD))) {
5930                 current_cp->flags1 &= ~(SCP_WCE | SCP_RCD);
5931                 current_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD);
5932                 saved_cp->flags1 &= ~(SCP_WCE | SCP_RCD);
5933                 saved_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD);
5934                 set_ua = 1;
5935         }
5936         if (set_ua != 0)
5937                 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE);
5938         mtx_unlock(&lun->lun_lock);
5939         if (set_ua) {
5940                 ctl_isc_announce_mode(lun,
5941                     ctl_get_initindex(&ctsio->io_hdr.nexus),
5942                     page_index->page_code, page_index->subpage);
5943         }
5944         return (0);
5945 }
5946
5947 int
5948 ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio,
5949                                 struct ctl_page_index *page_index,
5950                                 uint8_t *page_ptr)
5951 {
5952         uint8_t *c;
5953         int i;
5954
5955         c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs;
5956         ctl_time_io_secs =
5957                 (c[0] << 8) |
5958                 (c[1] << 0) |
5959                 0;
5960         CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs));
5961         printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs);
5962         printf("page data:");
5963         for (i=0; i<8; i++)
5964                 printf(" %.2x",page_ptr[i]);
5965         printf("\n");
5966         return (0);
5967 }
5968
5969 int
5970 ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio,
5971                                struct ctl_page_index *page_index,
5972                                int pc)
5973 {
5974         struct copan_debugconf_subpage *page;
5975
5976         page = (struct copan_debugconf_subpage *)page_index->page_data +
5977                 (page_index->page_len * pc);
5978
5979         switch (pc) {
5980         case SMS_PAGE_CTRL_CHANGEABLE >> 6:
5981         case SMS_PAGE_CTRL_DEFAULT >> 6:
5982         case SMS_PAGE_CTRL_SAVED >> 6:
5983                 /*
5984                  * We don't update the changable or default bits for this page.
5985                  */
5986                 break;
5987         case SMS_PAGE_CTRL_CURRENT >> 6:
5988                 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8;
5989                 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0;
5990                 break;
5991         default:
5992                 break;
5993         }
5994         return (0);
5995 }
5996
5997
5998 static int
5999 ctl_do_mode_select(union ctl_io *io)
6000 {
6001         struct scsi_mode_page_header *page_header;
6002         struct ctl_page_index *page_index;
6003         struct ctl_scsiio *ctsio;
6004         int page_len, page_len_offset, page_len_size;
6005         union ctl_modepage_info *modepage_info;
6006         struct ctl_lun *lun;
6007         int *len_left, *len_used;
6008         int retval, i;
6009
6010         ctsio = &io->scsiio;
6011         page_index = NULL;
6012         page_len = 0;
6013         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6014
6015         modepage_info = (union ctl_modepage_info *)
6016                 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
6017         len_left = &modepage_info->header.len_left;
6018         len_used = &modepage_info->header.len_used;
6019
6020 do_next_page:
6021
6022         page_header = (struct scsi_mode_page_header *)
6023                 (ctsio->kern_data_ptr + *len_used);
6024
6025         if (*len_left == 0) {
6026                 free(ctsio->kern_data_ptr, M_CTL);
6027                 ctl_set_success(ctsio);
6028                 ctl_done((union ctl_io *)ctsio);
6029                 return (CTL_RETVAL_COMPLETE);
6030         } else if (*len_left < sizeof(struct scsi_mode_page_header)) {
6031
6032                 free(ctsio->kern_data_ptr, M_CTL);
6033                 ctl_set_param_len_error(ctsio);
6034                 ctl_done((union ctl_io *)ctsio);
6035                 return (CTL_RETVAL_COMPLETE);
6036
6037         } else if ((page_header->page_code & SMPH_SPF)
6038                 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) {
6039
6040                 free(ctsio->kern_data_ptr, M_CTL);
6041                 ctl_set_param_len_error(ctsio);
6042                 ctl_done((union ctl_io *)ctsio);
6043                 return (CTL_RETVAL_COMPLETE);
6044         }
6045
6046
6047         /*
6048          * XXX KDM should we do something with the block descriptor?
6049          */
6050         for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6051                 page_index = &lun->mode_pages.index[i];
6052                 if (lun->be_lun->lun_type == T_DIRECT &&
6053                     (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
6054                         continue;
6055                 if (lun->be_lun->lun_type == T_PROCESSOR &&
6056                     (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
6057                         continue;
6058                 if (lun->be_lun->lun_type == T_CDROM &&
6059                     (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
6060                         continue;
6061
6062                 if ((page_index->page_code & SMPH_PC_MASK) !=
6063                     (page_header->page_code & SMPH_PC_MASK))
6064                         continue;
6065
6066                 /*
6067                  * If neither page has a subpage code, then we've got a
6068                  * match.
6069                  */
6070                 if (((page_index->page_code & SMPH_SPF) == 0)
6071                  && ((page_header->page_code & SMPH_SPF) == 0)) {
6072                         page_len = page_header->page_length;
6073                         break;
6074                 }
6075
6076                 /*
6077                  * If both pages have subpages, then the subpage numbers
6078                  * have to match.
6079                  */
6080                 if ((page_index->page_code & SMPH_SPF)
6081                   && (page_header->page_code & SMPH_SPF)) {
6082                         struct scsi_mode_page_header_sp *sph;
6083
6084                         sph = (struct scsi_mode_page_header_sp *)page_header;
6085                         if (page_index->subpage == sph->subpage) {
6086                                 page_len = scsi_2btoul(sph->page_length);
6087                                 break;
6088                         }
6089                 }
6090         }
6091
6092         /*
6093          * If we couldn't find the page, or if we don't have a mode select
6094          * handler for it, send back an error to the user.
6095          */
6096         if ((i >= CTL_NUM_MODE_PAGES)
6097          || (page_index->select_handler == NULL)) {
6098                 ctl_set_invalid_field(ctsio,
6099                                       /*sks_valid*/ 1,
6100                                       /*command*/ 0,
6101                                       /*field*/ *len_used,
6102                                       /*bit_valid*/ 0,
6103                                       /*bit*/ 0);
6104                 free(ctsio->kern_data_ptr, M_CTL);
6105                 ctl_done((union ctl_io *)ctsio);
6106                 return (CTL_RETVAL_COMPLETE);
6107         }
6108
6109         if (page_index->page_code & SMPH_SPF) {
6110                 page_len_offset = 2;
6111                 page_len_size = 2;
6112         } else {
6113                 page_len_size = 1;
6114                 page_len_offset = 1;
6115         }
6116
6117         /*
6118          * If the length the initiator gives us isn't the one we specify in
6119          * the mode page header, or if they didn't specify enough data in
6120          * the CDB to avoid truncating this page, kick out the request.
6121          */
6122         if ((page_len != (page_index->page_len - page_len_offset -
6123                           page_len_size))
6124          || (*len_left < page_index->page_len)) {
6125
6126
6127                 ctl_set_invalid_field(ctsio,
6128                                       /*sks_valid*/ 1,
6129                                       /*command*/ 0,
6130                                       /*field*/ *len_used + page_len_offset,
6131                                       /*bit_valid*/ 0,
6132                                       /*bit*/ 0);
6133                 free(ctsio->kern_data_ptr, M_CTL);
6134                 ctl_done((union ctl_io *)ctsio);
6135                 return (CTL_RETVAL_COMPLETE);
6136         }
6137
6138         /*
6139          * Run through the mode page, checking to make sure that the bits
6140          * the user changed are actually legal for him to change.
6141          */
6142         for (i = 0; i < page_index->page_len; i++) {
6143                 uint8_t *user_byte, *change_mask, *current_byte;
6144                 int bad_bit;
6145                 int j;
6146
6147                 user_byte = (uint8_t *)page_header + i;
6148                 change_mask = page_index->page_data +
6149                               (page_index->page_len * CTL_PAGE_CHANGEABLE) + i;
6150                 current_byte = page_index->page_data +
6151                                (page_index->page_len * CTL_PAGE_CURRENT) + i;
6152
6153                 /*
6154                  * Check to see whether the user set any bits in this byte
6155                  * that he is not allowed to set.
6156                  */
6157                 if ((*user_byte & ~(*change_mask)) ==
6158                     (*current_byte & ~(*change_mask)))
6159                         continue;
6160
6161                 /*
6162                  * Go through bit by bit to determine which one is illegal.
6163                  */
6164                 bad_bit = 0;
6165                 for (j = 7; j >= 0; j--) {
6166                         if ((((1 << i) & ~(*change_mask)) & *user_byte) !=
6167                             (((1 << i) & ~(*change_mask)) & *current_byte)) {
6168                                 bad_bit = i;
6169                                 break;
6170                         }
6171                 }
6172                 ctl_set_invalid_field(ctsio,
6173                                       /*sks_valid*/ 1,
6174                                       /*command*/ 0,
6175                                       /*field*/ *len_used + i,
6176                                       /*bit_valid*/ 1,
6177                                       /*bit*/ bad_bit);
6178                 free(ctsio->kern_data_ptr, M_CTL);
6179                 ctl_done((union ctl_io *)ctsio);
6180                 return (CTL_RETVAL_COMPLETE);
6181         }
6182
6183         /*
6184          * Decrement these before we call the page handler, since we may
6185          * end up getting called back one way or another before the handler
6186          * returns to this context.
6187          */
6188         *len_left -= page_index->page_len;
6189         *len_used += page_index->page_len;
6190
6191         retval = page_index->select_handler(ctsio, page_index,
6192                                             (uint8_t *)page_header);
6193
6194         /*
6195          * If the page handler returns CTL_RETVAL_QUEUED, then we need to
6196          * wait until this queued command completes to finish processing
6197          * the mode page.  If it returns anything other than
6198          * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have
6199          * already set the sense information, freed the data pointer, and
6200          * completed the io for us.
6201          */
6202         if (retval != CTL_RETVAL_COMPLETE)
6203                 goto bailout_no_done;
6204
6205         /*
6206          * If the initiator sent us more than one page, parse the next one.
6207          */
6208         if (*len_left > 0)
6209                 goto do_next_page;
6210
6211         ctl_set_success(ctsio);
6212         free(ctsio->kern_data_ptr, M_CTL);
6213         ctl_done((union ctl_io *)ctsio);
6214
6215 bailout_no_done:
6216
6217         return (CTL_RETVAL_COMPLETE);
6218
6219 }
6220
6221 int
6222 ctl_mode_select(struct ctl_scsiio *ctsio)
6223 {
6224         int param_len, pf, sp;
6225         int header_size, bd_len;
6226         union ctl_modepage_info *modepage_info;
6227
6228         switch (ctsio->cdb[0]) {
6229         case MODE_SELECT_6: {
6230                 struct scsi_mode_select_6 *cdb;
6231
6232                 cdb = (struct scsi_mode_select_6 *)ctsio->cdb;
6233
6234                 pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
6235                 sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
6236                 param_len = cdb->length;
6237                 header_size = sizeof(struct scsi_mode_header_6);
6238                 break;
6239         }
6240         case MODE_SELECT_10: {
6241                 struct scsi_mode_select_10 *cdb;
6242
6243                 cdb = (struct scsi_mode_select_10 *)ctsio->cdb;
6244
6245                 pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
6246                 sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
6247                 param_len = scsi_2btoul(cdb->length);
6248                 header_size = sizeof(struct scsi_mode_header_10);
6249                 break;
6250         }
6251         default:
6252                 ctl_set_invalid_opcode(ctsio);
6253                 ctl_done((union ctl_io *)ctsio);
6254                 return (CTL_RETVAL_COMPLETE);
6255         }
6256
6257         /*
6258          * From SPC-3:
6259          * "A parameter list length of zero indicates that the Data-Out Buffer
6260          * shall be empty. This condition shall not be considered as an error."
6261          */
6262         if (param_len == 0) {
6263                 ctl_set_success(ctsio);
6264                 ctl_done((union ctl_io *)ctsio);
6265                 return (CTL_RETVAL_COMPLETE);
6266         }
6267
6268         /*
6269          * Since we'll hit this the first time through, prior to
6270          * allocation, we don't need to free a data buffer here.
6271          */
6272         if (param_len < header_size) {
6273                 ctl_set_param_len_error(ctsio);
6274                 ctl_done((union ctl_io *)ctsio);
6275                 return (CTL_RETVAL_COMPLETE);
6276         }
6277
6278         /*
6279          * Allocate the data buffer and grab the user's data.  In theory,
6280          * we shouldn't have to sanity check the parameter list length here
6281          * because the maximum size is 64K.  We should be able to malloc
6282          * that much without too many problems.
6283          */
6284         if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
6285                 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
6286                 ctsio->kern_data_len = param_len;
6287                 ctsio->kern_total_len = param_len;
6288                 ctsio->kern_data_resid = 0;
6289                 ctsio->kern_rel_offset = 0;
6290                 ctsio->kern_sg_entries = 0;
6291                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6292                 ctsio->be_move_done = ctl_config_move_done;
6293                 ctl_datamove((union ctl_io *)ctsio);
6294
6295                 return (CTL_RETVAL_COMPLETE);
6296         }
6297
6298         switch (ctsio->cdb[0]) {
6299         case MODE_SELECT_6: {
6300                 struct scsi_mode_header_6 *mh6;
6301
6302                 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr;
6303                 bd_len = mh6->blk_desc_len;
6304                 break;
6305         }
6306         case MODE_SELECT_10: {
6307                 struct scsi_mode_header_10 *mh10;
6308
6309                 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr;
6310                 bd_len = scsi_2btoul(mh10->blk_desc_len);
6311                 break;
6312         }
6313         default:
6314                 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]);
6315         }
6316
6317         if (param_len < (header_size + bd_len)) {
6318                 free(ctsio->kern_data_ptr, M_CTL);
6319                 ctl_set_param_len_error(ctsio);
6320                 ctl_done((union ctl_io *)ctsio);
6321                 return (CTL_RETVAL_COMPLETE);
6322         }
6323
6324         /*
6325          * Set the IO_CONT flag, so that if this I/O gets passed to
6326          * ctl_config_write_done(), it'll get passed back to
6327          * ctl_do_mode_select() for further processing, or completion if
6328          * we're all done.
6329          */
6330         ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT;
6331         ctsio->io_cont = ctl_do_mode_select;
6332
6333         modepage_info = (union ctl_modepage_info *)
6334                 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
6335         memset(modepage_info, 0, sizeof(*modepage_info));
6336         modepage_info->header.len_left = param_len - header_size - bd_len;
6337         modepage_info->header.len_used = header_size + bd_len;
6338
6339         return (ctl_do_mode_select((union ctl_io *)ctsio));
6340 }
6341
6342 int
6343 ctl_mode_sense(struct ctl_scsiio *ctsio)
6344 {
6345         struct ctl_lun *lun;
6346         int pc, page_code, dbd, llba, subpage;
6347         int alloc_len, page_len, header_len, total_len;
6348         struct scsi_mode_block_descr *block_desc;
6349         struct ctl_page_index *page_index;
6350
6351         dbd = 0;
6352         llba = 0;
6353         block_desc = NULL;
6354
6355         CTL_DEBUG_PRINT(("ctl_mode_sense\n"));
6356
6357         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6358         switch (ctsio->cdb[0]) {
6359         case MODE_SENSE_6: {
6360                 struct scsi_mode_sense_6 *cdb;
6361
6362                 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb;
6363
6364                 header_len = sizeof(struct scsi_mode_hdr_6);
6365                 if (cdb->byte2 & SMS_DBD)
6366                         dbd = 1;
6367                 else
6368                         header_len += sizeof(struct scsi_mode_block_descr);
6369
6370                 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6;
6371                 page_code = cdb->page & SMS_PAGE_CODE;
6372                 subpage = cdb->subpage;
6373                 alloc_len = cdb->length;
6374                 break;
6375         }
6376         case MODE_SENSE_10: {
6377                 struct scsi_mode_sense_10 *cdb;
6378
6379                 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb;
6380
6381                 header_len = sizeof(struct scsi_mode_hdr_10);
6382
6383                 if (cdb->byte2 & SMS_DBD)
6384                         dbd = 1;
6385                 else
6386                         header_len += sizeof(struct scsi_mode_block_descr);
6387                 if (cdb->byte2 & SMS10_LLBAA)
6388                         llba = 1;
6389                 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6;
6390                 page_code = cdb->page & SMS_PAGE_CODE;
6391                 subpage = cdb->subpage;
6392                 alloc_len = scsi_2btoul(cdb->length);
6393                 break;
6394         }
6395         default:
6396                 ctl_set_invalid_opcode(ctsio);
6397                 ctl_done((union ctl_io *)ctsio);
6398                 return (CTL_RETVAL_COMPLETE);
6399                 break; /* NOTREACHED */
6400         }
6401
6402         /*
6403          * We have to make a first pass through to calculate the size of
6404          * the pages that match the user's query.  Then we allocate enough
6405          * memory to hold it, and actually copy the data into the buffer.
6406          */
6407         switch (page_code) {
6408         case SMS_ALL_PAGES_PAGE: {
6409                 int i;
6410
6411                 page_len = 0;
6412
6413                 /*
6414                  * At the moment, values other than 0 and 0xff here are
6415                  * reserved according to SPC-3.
6416                  */
6417                 if ((subpage != SMS_SUBPAGE_PAGE_0)
6418                  && (subpage != SMS_SUBPAGE_ALL)) {
6419                         ctl_set_invalid_field(ctsio,
6420                                               /*sks_valid*/ 1,
6421                                               /*command*/ 1,
6422                                               /*field*/ 3,
6423                                               /*bit_valid*/ 0,
6424                                               /*bit*/ 0);
6425                         ctl_done((union ctl_io *)ctsio);
6426                         return (CTL_RETVAL_COMPLETE);
6427                 }
6428
6429                 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6430                         page_index = &lun->mode_pages.index[i];
6431
6432                         /* Make sure the page is supported for this dev type */
6433                         if (lun->be_lun->lun_type == T_DIRECT &&
6434                             (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
6435                                 continue;
6436                         if (lun->be_lun->lun_type == T_PROCESSOR &&
6437                             (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
6438                                 continue;
6439                         if (lun->be_lun->lun_type == T_CDROM &&
6440                             (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
6441                                 continue;
6442
6443                         /*
6444                          * We don't use this subpage if the user didn't
6445                          * request all subpages.
6446                          */
6447                         if ((page_index->subpage != 0)
6448                          && (subpage == SMS_SUBPAGE_PAGE_0))
6449                                 continue;
6450
6451 #if 0
6452                         printf("found page %#x len %d\n",
6453                                page_index->page_code & SMPH_PC_MASK,
6454                                page_index->page_len);
6455 #endif
6456                         page_len += page_index->page_len;
6457                 }
6458                 break;
6459         }
6460         default: {
6461                 int i;
6462
6463                 page_len = 0;
6464
6465                 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6466                         page_index = &lun->mode_pages.index[i];
6467
6468                         /* Make sure the page is supported for this dev type */
6469                         if (lun->be_lun->lun_type == T_DIRECT &&
6470                             (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
6471                                 continue;
6472                         if (lun->be_lun->lun_type == T_PROCESSOR &&
6473                             (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
6474                                 continue;
6475                         if (lun->be_lun->lun_type == T_CDROM &&
6476                             (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
6477                                 continue;
6478
6479                         /* Look for the right page code */
6480                         if ((page_index->page_code & SMPH_PC_MASK) != page_code)
6481                                 continue;
6482
6483                         /* Look for the right subpage or the subpage wildcard*/
6484                         if ((page_index->subpage != subpage)
6485                          && (subpage != SMS_SUBPAGE_ALL))
6486                                 continue;
6487
6488 #if 0
6489                         printf("found page %#x len %d\n",
6490                                page_index->page_code & SMPH_PC_MASK,
6491                                page_index->page_len);
6492 #endif
6493
6494                         page_len += page_index->page_len;
6495                 }
6496
6497                 if (page_len == 0) {
6498                         ctl_set_invalid_field(ctsio,
6499                                               /*sks_valid*/ 1,
6500                                               /*command*/ 1,
6501                                               /*field*/ 2,
6502                                               /*bit_valid*/ 1,
6503                                               /*bit*/ 5);
6504                         ctl_done((union ctl_io *)ctsio);
6505                         return (CTL_RETVAL_COMPLETE);
6506                 }
6507                 break;
6508         }
6509         }
6510
6511         total_len = header_len + page_len;
6512 #if 0
6513         printf("header_len = %d, page_len = %d, total_len = %d\n",
6514                header_len, page_len, total_len);
6515 #endif
6516
6517         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
6518         ctsio->kern_sg_entries = 0;
6519         ctsio->kern_data_resid = 0;
6520         ctsio->kern_rel_offset = 0;
6521         if (total_len < alloc_len) {
6522                 ctsio->residual = alloc_len - total_len;
6523                 ctsio->kern_data_len = total_len;
6524                 ctsio->kern_total_len = total_len;
6525         } else {
6526                 ctsio->residual = 0;
6527                 ctsio->kern_data_len = alloc_len;
6528                 ctsio->kern_total_len = alloc_len;
6529         }
6530
6531         switch (ctsio->cdb[0]) {
6532         case MODE_SENSE_6: {
6533                 struct scsi_mode_hdr_6 *header;
6534
6535                 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr;
6536
6537                 header->datalen = MIN(total_len - 1, 254);
6538                 if (lun->be_lun->lun_type == T_DIRECT) {
6539                         header->dev_specific = 0x10; /* DPOFUA */
6540                         if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) ||
6541                             (lun->mode_pages.control_page[CTL_PAGE_CURRENT]
6542                             .eca_and_aen & SCP_SWP) != 0)
6543                                     header->dev_specific |= 0x80; /* WP */
6544                 }
6545                 if (dbd)
6546                         header->block_descr_len = 0;
6547                 else
6548                         header->block_descr_len =
6549                                 sizeof(struct scsi_mode_block_descr);
6550                 block_desc = (struct scsi_mode_block_descr *)&header[1];
6551                 break;
6552         }
6553         case MODE_SENSE_10: {
6554                 struct scsi_mode_hdr_10 *header;
6555                 int datalen;
6556
6557                 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr;
6558
6559                 datalen = MIN(total_len - 2, 65533);
6560                 scsi_ulto2b(datalen, header->datalen);
6561                 if (lun->be_lun->lun_type == T_DIRECT) {
6562                         header->dev_specific = 0x10; /* DPOFUA */
6563                         if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) ||
6564                             (lun->mode_pages.control_page[CTL_PAGE_CURRENT]
6565                             .eca_and_aen & SCP_SWP) != 0)
6566                                     header->dev_specific |= 0x80; /* WP */
6567                 }
6568                 if (dbd)
6569                         scsi_ulto2b(0, header->block_descr_len);
6570                 else
6571                         scsi_ulto2b(sizeof(struct scsi_mode_block_descr),
6572                                     header->block_descr_len);
6573                 block_desc = (struct scsi_mode_block_descr *)&header[1];
6574                 break;
6575         }
6576         default:
6577                 panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]);
6578         }
6579
6580         /*
6581          * If we've got a disk, use its blocksize in the block
6582          * descriptor.  Otherwise, just set it to 0.
6583          */
6584         if (dbd == 0) {
6585                 if (lun->be_lun->lun_type == T_DIRECT)
6586                         scsi_ulto3b(lun->be_lun->blocksize,
6587                                     block_desc->block_len);
6588                 else
6589                         scsi_ulto3b(0, block_desc->block_len);
6590         }
6591
6592         switch (page_code) {
6593         case SMS_ALL_PAGES_PAGE: {
6594                 int i, data_used;
6595
6596                 data_used = header_len;
6597                 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6598                         struct ctl_page_index *page_index;
6599
6600                         page_index = &lun->mode_pages.index[i];
6601                         if (lun->be_lun->lun_type == T_DIRECT &&
6602                             (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
6603                                 continue;
6604                         if (lun->be_lun->lun_type == T_PROCESSOR &&
6605                             (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
6606                                 continue;
6607                         if (lun->be_lun->lun_type == T_CDROM &&
6608                             (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
6609                                 continue;
6610
6611                         /*
6612                          * We don't use this subpage if the user didn't
6613                          * request all subpages.  We already checked (above)
6614                          * to make sure the user only specified a subpage
6615                          * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case.
6616                          */
6617                         if ((page_index->subpage != 0)
6618                          && (subpage == SMS_SUBPAGE_PAGE_0))
6619                                 continue;
6620
6621                         /*
6622                          * Call the handler, if it exists, to update the
6623                          * page to the latest values.
6624                          */
6625                         if (page_index->sense_handler != NULL)
6626                                 page_index->sense_handler(ctsio, page_index,pc);
6627
6628                         memcpy(ctsio->kern_data_ptr + data_used,
6629                                page_index->page_data +
6630                                (page_index->page_len * pc),
6631                                page_index->page_len);
6632                         data_used += page_index->page_len;
6633                 }
6634                 break;
6635         }
6636         default: {
6637                 int i, data_used;
6638
6639                 data_used = header_len;
6640
6641                 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6642                         struct ctl_page_index *page_index;
6643
6644                         page_index = &lun->mode_pages.index[i];
6645
6646                         /* Look for the right page code */
6647                         if ((page_index->page_code & SMPH_PC_MASK) != page_code)
6648                                 continue;
6649
6650                         /* Look for the right subpage or the subpage wildcard*/
6651                         if ((page_index->subpage != subpage)
6652                          && (subpage != SMS_SUBPAGE_ALL))
6653                                 continue;
6654
6655                         /* Make sure the page is supported for this dev type */
6656                         if (lun->be_lun->lun_type == T_DIRECT &&
6657                             (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0)
6658                                 continue;
6659                         if (lun->be_lun->lun_type == T_PROCESSOR &&
6660                             (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0)
6661                                 continue;
6662                         if (lun->be_lun->lun_type == T_CDROM &&
6663                             (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0)
6664                                 continue;
6665
6666                         /*
6667                          * Call the handler, if it exists, to update the
6668                          * page to the latest values.
6669                          */
6670                         if (page_index->sense_handler != NULL)
6671                                 page_index->sense_handler(ctsio, page_index,pc);
6672
6673                         memcpy(ctsio->kern_data_ptr + data_used,
6674                                page_index->page_data +
6675                                (page_index->page_len * pc),
6676                                page_index->page_len);
6677                         data_used += page_index->page_len;
6678                 }
6679                 break;
6680         }
6681         }
6682
6683         ctl_set_success(ctsio);
6684         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6685         ctsio->be_move_done = ctl_config_move_done;
6686         ctl_datamove((union ctl_io *)ctsio);
6687         return (CTL_RETVAL_COMPLETE);
6688 }
6689
6690 int
6691 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio,
6692                                struct ctl_page_index *page_index,
6693                                int pc)
6694 {
6695         struct ctl_lun *lun;
6696         struct scsi_log_param_header *phdr;
6697         uint8_t *data;
6698         uint64_t val;
6699
6700         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6701         data = page_index->page_data;
6702
6703         if (lun->backend->lun_attr != NULL &&
6704             (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail"))
6705              != UINT64_MAX) {
6706                 phdr = (struct scsi_log_param_header *)data;
6707                 scsi_ulto2b(0x0001, phdr->param_code);
6708                 phdr->param_control = SLP_LBIN | SLP_LP;
6709                 phdr->param_len = 8;
6710                 data = (uint8_t *)(phdr + 1);
6711                 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
6712                 data[4] = 0x02; /* per-pool */
6713                 data += phdr->param_len;
6714         }
6715
6716         if (lun->backend->lun_attr != NULL &&
6717             (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused"))
6718              != UINT64_MAX) {
6719                 phdr = (struct scsi_log_param_header *)data;
6720                 scsi_ulto2b(0x0002, phdr->param_code);
6721                 phdr->param_control = SLP_LBIN | SLP_LP;
6722                 phdr->param_len = 8;
6723                 data = (uint8_t *)(phdr + 1);
6724                 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
6725                 data[4] = 0x01; /* per-LUN */
6726                 data += phdr->param_len;
6727         }
6728
6729         if (lun->backend->lun_attr != NULL &&
6730             (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail"))
6731              != UINT64_MAX) {
6732                 phdr = (struct scsi_log_param_header *)data;
6733                 scsi_ulto2b(0x00f1, phdr->param_code);
6734                 phdr->param_control = SLP_LBIN | SLP_LP;
6735                 phdr->param_len = 8;
6736                 data = (uint8_t *)(phdr + 1);
6737                 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
6738                 data[4] = 0x02; /* per-pool */
6739                 data += phdr->param_len;
6740         }
6741
6742         if (lun->backend->lun_attr != NULL &&
6743             (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused"))
6744              != UINT64_MAX) {
6745                 phdr = (struct scsi_log_param_header *)data;
6746                 scsi_ulto2b(0x00f2, phdr->param_code);
6747                 phdr->param_control = SLP_LBIN | SLP_LP;
6748                 phdr->param_len = 8;
6749                 data = (uint8_t *)(phdr + 1);
6750                 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
6751                 data[4] = 0x02; /* per-pool */
6752                 data += phdr->param_len;
6753         }
6754
6755         page_index->page_len = data - page_index->page_data;
6756         return (0);
6757 }
6758
6759 int
6760 ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio,
6761                                struct ctl_page_index *page_index,
6762                                int pc)
6763 {
6764         struct ctl_lun *lun;
6765         struct stat_page *data;
6766         uint64_t rn, wn, rb, wb;
6767         struct bintime rt, wt;
6768         int i;
6769
6770         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6771         data = (struct stat_page *)page_index->page_data;
6772
6773         scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code);
6774         data->sap.hdr.param_control = SLP_LBIN;
6775         data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) -
6776             sizeof(struct scsi_log_param_header);
6777         rn = wn = rb = wb = 0;
6778         bintime_clear(&rt);
6779         bintime_clear(&wt);
6780         for (i = 0; i < CTL_MAX_PORTS; i++) {
6781                 rn += lun->stats.ports[i].operations[CTL_STATS_READ];
6782                 wn += lun->stats.ports[i].operations[CTL_STATS_WRITE];
6783                 rb += lun->stats.ports[i].bytes[CTL_STATS_READ];
6784                 wb += lun->stats.ports[i].bytes[CTL_STATS_WRITE];
6785                 bintime_add(&rt, &lun->stats.ports[i].time[CTL_STATS_READ]);
6786                 bintime_add(&wt, &lun->stats.ports[i].time[CTL_STATS_WRITE]);
6787         }
6788         scsi_u64to8b(rn, data->sap.read_num);
6789         scsi_u64to8b(wn, data->sap.write_num);
6790         if (lun->stats.blocksize > 0) {
6791                 scsi_u64to8b(wb / lun->stats.blocksize,
6792                     data->sap.recvieved_lba);
6793                 scsi_u64to8b(rb / lun->stats.blocksize,
6794                     data->sap.transmitted_lba);
6795         }
6796         scsi_u64to8b((uint64_t)rt.sec * 1000 + rt.frac / (UINT64_MAX / 1000),
6797             data->sap.read_int);
6798         scsi_u64to8b((uint64_t)wt.sec * 1000 + wt.frac / (UINT64_MAX / 1000),
6799             data->sap.write_int);
6800         scsi_u64to8b(0, data->sap.weighted_num);
6801         scsi_u64to8b(0, data->sap.weighted_int);
6802         scsi_ulto2b(SLP_IT, data->it.hdr.param_code);
6803         data->it.hdr.param_control = SLP_LBIN;
6804         data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) -
6805             sizeof(struct scsi_log_param_header);
6806 #ifdef CTL_TIME_IO
6807         scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int);
6808 #endif
6809         scsi_ulto2b(SLP_TI, data->ti.hdr.param_code);
6810         data->it.hdr.param_control = SLP_LBIN;
6811         data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) -
6812             sizeof(struct scsi_log_param_header);
6813         scsi_ulto4b(3, data->ti.exponent);
6814         scsi_ulto4b(1, data->ti.integer);
6815
6816         page_index->page_len = sizeof(*data);
6817         return (0);
6818 }
6819
6820 int
6821 ctl_log_sense(struct ctl_scsiio *ctsio)
6822 {
6823         struct ctl_lun *lun;
6824         int i, pc, page_code, subpage;
6825         int alloc_len, total_len;
6826         struct ctl_page_index *page_index;
6827         struct scsi_log_sense *cdb;
6828         struct scsi_log_header *header;
6829
6830         CTL_DEBUG_PRINT(("ctl_log_sense\n"));
6831
6832         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6833         cdb = (struct scsi_log_sense *)ctsio->cdb;
6834         pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6;
6835         page_code = cdb->page & SLS_PAGE_CODE;
6836         subpage = cdb->subpage;
6837         alloc_len = scsi_2btoul(cdb->length);
6838
6839         page_index = NULL;
6840         for (i = 0; i < CTL_NUM_LOG_PAGES; i++) {
6841                 page_index = &lun->log_pages.index[i];
6842
6843                 /* Look for the right page code */
6844                 if ((page_index->page_code & SL_PAGE_CODE) != page_code)
6845                         continue;
6846
6847                 /* Look for the right subpage or the subpage wildcard*/
6848                 if (page_index->subpage != subpage)
6849                         continue;
6850
6851                 break;
6852         }
6853         if (i >= CTL_NUM_LOG_PAGES) {
6854                 ctl_set_invalid_field(ctsio,
6855                                       /*sks_valid*/ 1,
6856                                       /*command*/ 1,
6857                                       /*field*/ 2,
6858                                       /*bit_valid*/ 0,
6859                                       /*bit*/ 0);
6860                 ctl_done((union ctl_io *)ctsio);
6861                 return (CTL_RETVAL_COMPLETE);
6862         }
6863
6864         total_len = sizeof(struct scsi_log_header) + page_index->page_len;
6865
6866         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
6867         ctsio->kern_sg_entries = 0;
6868         ctsio->kern_data_resid = 0;
6869         ctsio->kern_rel_offset = 0;
6870         if (total_len < alloc_len) {
6871                 ctsio->residual = alloc_len - total_len;
6872                 ctsio->kern_data_len = total_len;
6873                 ctsio->kern_total_len = total_len;
6874         } else {
6875                 ctsio->residual = 0;
6876                 ctsio->kern_data_len = alloc_len;
6877                 ctsio->kern_total_len = alloc_len;
6878         }
6879
6880         header = (struct scsi_log_header *)ctsio->kern_data_ptr;
6881         header->page = page_index->page_code;
6882         if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING)
6883                 header->page |= SL_DS;
6884         if (page_index->subpage) {
6885                 header->page |= SL_SPF;
6886                 header->subpage = page_index->subpage;
6887         }
6888         scsi_ulto2b(page_index->page_len, header->datalen);
6889
6890         /*
6891          * Call the handler, if it exists, to update the
6892          * page to the latest values.
6893          */
6894         if (page_index->sense_handler != NULL)
6895                 page_index->sense_handler(ctsio, page_index, pc);
6896
6897         memcpy(header + 1, page_index->page_data, page_index->page_len);
6898
6899         ctl_set_success(ctsio);
6900         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6901         ctsio->be_move_done = ctl_config_move_done;
6902         ctl_datamove((union ctl_io *)ctsio);
6903         return (CTL_RETVAL_COMPLETE);
6904 }
6905
6906 int
6907 ctl_read_capacity(struct ctl_scsiio *ctsio)
6908 {
6909         struct scsi_read_capacity *cdb;
6910         struct scsi_read_capacity_data *data;
6911         struct ctl_lun *lun;
6912         uint32_t lba;
6913
6914         CTL_DEBUG_PRINT(("ctl_read_capacity\n"));
6915
6916         cdb = (struct scsi_read_capacity *)ctsio->cdb;
6917
6918         lba = scsi_4btoul(cdb->addr);
6919         if (((cdb->pmi & SRC_PMI) == 0)
6920          && (lba != 0)) {
6921                 ctl_set_invalid_field(/*ctsio*/ ctsio,
6922                                       /*sks_valid*/ 1,
6923                                       /*command*/ 1,
6924                                       /*field*/ 2,
6925                                       /*bit_valid*/ 0,
6926                                       /*bit*/ 0);
6927                 ctl_done((union ctl_io *)ctsio);
6928                 return (CTL_RETVAL_COMPLETE);
6929         }
6930
6931         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6932
6933         ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
6934         data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr;
6935         ctsio->residual = 0;
6936         ctsio->kern_data_len = sizeof(*data);
6937         ctsio->kern_total_len = sizeof(*data);
6938         ctsio->kern_data_resid = 0;
6939         ctsio->kern_rel_offset = 0;
6940         ctsio->kern_sg_entries = 0;
6941
6942         /*
6943          * If the maximum LBA is greater than 0xfffffffe, the user must
6944          * issue a SERVICE ACTION IN (16) command, with the read capacity
6945          * serivce action set.
6946          */
6947         if (lun->be_lun->maxlba > 0xfffffffe)
6948                 scsi_ulto4b(0xffffffff, data->addr);
6949         else
6950                 scsi_ulto4b(lun->be_lun->maxlba, data->addr);
6951
6952         /*
6953          * XXX KDM this may not be 512 bytes...
6954          */
6955         scsi_ulto4b(lun->be_lun->blocksize, data->length);
6956
6957         ctl_set_success(ctsio);
6958         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6959         ctsio->be_move_done = ctl_config_move_done;
6960         ctl_datamove((union ctl_io *)ctsio);
6961         return (CTL_RETVAL_COMPLETE);
6962 }
6963
6964 int
6965 ctl_read_capacity_16(struct ctl_scsiio *ctsio)
6966 {
6967         struct scsi_read_capacity_16 *cdb;
6968         struct scsi_read_capacity_data_long *data;
6969         struct ctl_lun *lun;
6970         uint64_t lba;
6971         uint32_t alloc_len;
6972
6973         CTL_DEBUG_PRINT(("ctl_read_capacity_16\n"));
6974
6975         cdb = (struct scsi_read_capacity_16 *)ctsio->cdb;
6976
6977         alloc_len = scsi_4btoul(cdb->alloc_len);
6978         lba = scsi_8btou64(cdb->addr);
6979
6980         if ((cdb->reladr & SRC16_PMI)
6981          && (lba != 0)) {
6982                 ctl_set_invalid_field(/*ctsio*/ ctsio,
6983                                       /*sks_valid*/ 1,
6984                                       /*command*/ 1,
6985                                       /*field*/ 2,
6986                                       /*bit_valid*/ 0,
6987                                       /*bit*/ 0);
6988                 ctl_done((union ctl_io *)ctsio);
6989                 return (CTL_RETVAL_COMPLETE);
6990         }
6991
6992         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6993
6994         ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
6995         data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr;
6996
6997         if (sizeof(*data) < alloc_len) {
6998                 ctsio->residual = alloc_len - sizeof(*data);
6999                 ctsio->kern_data_len = sizeof(*data);
7000                 ctsio->kern_total_len = sizeof(*data);
7001         } else {
7002                 ctsio->residual = 0;
7003                 ctsio->kern_data_len = alloc_len;
7004                 ctsio->kern_total_len = alloc_len;
7005         }
7006         ctsio->kern_data_resid = 0;
7007         ctsio->kern_rel_offset = 0;
7008         ctsio->kern_sg_entries = 0;
7009
7010         scsi_u64to8b(lun->be_lun->maxlba, data->addr);
7011         /* XXX KDM this may not be 512 bytes... */
7012         scsi_ulto4b(lun->be_lun->blocksize, data->length);
7013         data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE;
7014         scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp);
7015         if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
7016                 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ;
7017
7018         ctl_set_success(ctsio);
7019         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7020         ctsio->be_move_done = ctl_config_move_done;
7021         ctl_datamove((union ctl_io *)ctsio);
7022         return (CTL_RETVAL_COMPLETE);
7023 }
7024
7025 int
7026 ctl_get_lba_status(struct ctl_scsiio *ctsio)
7027 {
7028         struct scsi_get_lba_status *cdb;
7029         struct scsi_get_lba_status_data *data;
7030         struct ctl_lun *lun;
7031         struct ctl_lba_len_flags *lbalen;
7032         uint64_t lba;
7033         uint32_t alloc_len, total_len;
7034         int retval;
7035
7036         CTL_DEBUG_PRINT(("ctl_get_lba_status\n"));
7037
7038         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7039         cdb = (struct scsi_get_lba_status *)ctsio->cdb;
7040         lba = scsi_8btou64(cdb->addr);
7041         alloc_len = scsi_4btoul(cdb->alloc_len);
7042
7043         if (lba > lun->be_lun->maxlba) {
7044                 ctl_set_lba_out_of_range(ctsio);
7045                 ctl_done((union ctl_io *)ctsio);
7046                 return (CTL_RETVAL_COMPLETE);
7047         }
7048
7049         total_len = sizeof(*data) + sizeof(data->descr[0]);
7050         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7051         data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr;
7052
7053         if (total_len < alloc_len) {
7054                 ctsio->residual = alloc_len - total_len;
7055                 ctsio->kern_data_len = total_len;
7056                 ctsio->kern_total_len = total_len;
7057         } else {
7058                 ctsio->residual = 0;
7059                 ctsio->kern_data_len = alloc_len;
7060                 ctsio->kern_total_len = alloc_len;
7061         }
7062         ctsio->kern_data_resid = 0;
7063         ctsio->kern_rel_offset = 0;
7064         ctsio->kern_sg_entries = 0;
7065
7066         /* Fill dummy data in case backend can't tell anything. */
7067         scsi_ulto4b(4 + sizeof(data->descr[0]), data->length);
7068         scsi_u64to8b(lba, data->descr[0].addr);
7069         scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba),
7070             data->descr[0].length);
7071         data->descr[0].status = 0; /* Mapped or unknown. */
7072
7073         ctl_set_success(ctsio);
7074         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7075         ctsio->be_move_done = ctl_config_move_done;
7076
7077         lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
7078         lbalen->lba = lba;
7079         lbalen->len = total_len;
7080         lbalen->flags = 0;
7081         retval = lun->backend->config_read((union ctl_io *)ctsio);
7082         return (CTL_RETVAL_COMPLETE);
7083 }
7084
7085 int
7086 ctl_read_defect(struct ctl_scsiio *ctsio)
7087 {
7088         struct scsi_read_defect_data_10 *ccb10;
7089         struct scsi_read_defect_data_12 *ccb12;
7090         struct scsi_read_defect_data_hdr_10 *data10;
7091         struct scsi_read_defect_data_hdr_12 *data12;
7092         uint32_t alloc_len, data_len;
7093         uint8_t format;
7094
7095         CTL_DEBUG_PRINT(("ctl_read_defect\n"));
7096
7097         if (ctsio->cdb[0] == READ_DEFECT_DATA_10) {
7098                 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb;
7099                 format = ccb10->format;
7100                 alloc_len = scsi_2btoul(ccb10->alloc_length);
7101                 data_len = sizeof(*data10);
7102         } else {
7103                 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb;
7104                 format = ccb12->format;
7105                 alloc_len = scsi_4btoul(ccb12->alloc_length);
7106                 data_len = sizeof(*data12);
7107         }
7108         if (alloc_len == 0) {
7109                 ctl_set_success(ctsio);
7110                 ctl_done((union ctl_io *)ctsio);
7111                 return (CTL_RETVAL_COMPLETE);
7112         }
7113
7114         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
7115         if (data_len < alloc_len) {
7116                 ctsio->residual = alloc_len - data_len;
7117                 ctsio->kern_data_len = data_len;
7118                 ctsio->kern_total_len = data_len;
7119         } else {
7120                 ctsio->residual = 0;
7121                 ctsio->kern_data_len = alloc_len;
7122                 ctsio->kern_total_len = alloc_len;
7123         }
7124         ctsio->kern_data_resid = 0;
7125         ctsio->kern_rel_offset = 0;
7126         ctsio->kern_sg_entries = 0;
7127
7128         if (ctsio->cdb[0] == READ_DEFECT_DATA_10) {
7129                 data10 = (struct scsi_read_defect_data_hdr_10 *)
7130                     ctsio->kern_data_ptr;
7131                 data10->format = format;
7132                 scsi_ulto2b(0, data10->length);
7133         } else {
7134                 data12 = (struct scsi_read_defect_data_hdr_12 *)
7135                     ctsio->kern_data_ptr;
7136                 data12->format = format;
7137                 scsi_ulto2b(0, data12->generation);
7138                 scsi_ulto4b(0, data12->length);
7139         }
7140
7141         ctl_set_success(ctsio);
7142         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7143         ctsio->be_move_done = ctl_config_move_done;
7144         ctl_datamove((union ctl_io *)ctsio);
7145         return (CTL_RETVAL_COMPLETE);
7146 }
7147
7148 int
7149 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
7150 {
7151         struct scsi_maintenance_in *cdb;
7152         int retval;
7153         int alloc_len, ext, total_len = 0, g, pc, pg, ts, os;
7154         int num_ha_groups, num_target_ports, shared_group;
7155         struct ctl_lun *lun;
7156         struct ctl_softc *softc;
7157         struct ctl_port *port;
7158         struct scsi_target_group_data *rtg_ptr;
7159         struct scsi_target_group_data_extended *rtg_ext_ptr;
7160         struct scsi_target_port_group_descriptor *tpg_desc;
7161
7162         CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n"));
7163
7164         cdb = (struct scsi_maintenance_in *)ctsio->cdb;
7165         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7166         softc = lun->ctl_softc;
7167
7168         retval = CTL_RETVAL_COMPLETE;
7169
7170         switch (cdb->byte2 & STG_PDF_MASK) {
7171         case STG_PDF_LENGTH:
7172                 ext = 0;
7173                 break;
7174         case STG_PDF_EXTENDED:
7175                 ext = 1;
7176                 break;
7177         default:
7178                 ctl_set_invalid_field(/*ctsio*/ ctsio,
7179                                       /*sks_valid*/ 1,
7180                                       /*command*/ 1,
7181                                       /*field*/ 2,
7182                                       /*bit_valid*/ 1,
7183                                       /*bit*/ 5);
7184                 ctl_done((union ctl_io *)ctsio);
7185                 return(retval);
7186         }
7187
7188         num_target_ports = 0;
7189         shared_group = (softc->is_single != 0);
7190         mtx_lock(&softc->ctl_lock);
7191         STAILQ_FOREACH(port, &softc->port_list, links) {
7192                 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
7193                         continue;
7194                 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
7195                         continue;
7196                 num_target_ports++;
7197                 if (port->status & CTL_PORT_STATUS_HA_SHARED)
7198                         shared_group = 1;
7199         }
7200         mtx_unlock(&softc->ctl_lock);
7201         num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES;
7202
7203         if (ext)
7204                 total_len = sizeof(struct scsi_target_group_data_extended);
7205         else
7206                 total_len = sizeof(struct scsi_target_group_data);
7207         total_len += sizeof(struct scsi_target_port_group_descriptor) *
7208                 (shared_group + num_ha_groups) +
7209             sizeof(struct scsi_target_port_descriptor) * num_target_ports;
7210
7211         alloc_len = scsi_4btoul(cdb->length);
7212
7213         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7214
7215         ctsio->kern_sg_entries = 0;
7216
7217         if (total_len < alloc_len) {
7218                 ctsio->residual = alloc_len - total_len;
7219                 ctsio->kern_data_len = total_len;
7220                 ctsio->kern_total_len = total_len;
7221         } else {
7222                 ctsio->residual = 0;
7223                 ctsio->kern_data_len = alloc_len;
7224                 ctsio->kern_total_len = alloc_len;
7225         }
7226         ctsio->kern_data_resid = 0;
7227         ctsio->kern_rel_offset = 0;
7228
7229         if (ext) {
7230                 rtg_ext_ptr = (struct scsi_target_group_data_extended *)
7231                     ctsio->kern_data_ptr;
7232                 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length);
7233                 rtg_ext_ptr->format_type = 0x10;
7234                 rtg_ext_ptr->implicit_transition_time = 0;
7235                 tpg_desc = &rtg_ext_ptr->groups[0];
7236         } else {
7237                 rtg_ptr = (struct scsi_target_group_data *)
7238                     ctsio->kern_data_ptr;
7239                 scsi_ulto4b(total_len - 4, rtg_ptr->length);
7240                 tpg_desc = &rtg_ptr->groups[0];
7241         }
7242
7243         mtx_lock(&softc->ctl_lock);
7244         pg = softc->port_min / softc->port_cnt;
7245         if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) {
7246                 /* Some shelf is known to be primary. */
7247                 if (softc->ha_link == CTL_HA_LINK_OFFLINE)
7248                         os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE;
7249                 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN)
7250                         os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING;
7251                 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY)
7252                         os = TPG_ASYMMETRIC_ACCESS_STANDBY;
7253                 else
7254                         os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
7255                 if (lun->flags & CTL_LUN_PRIMARY_SC) {
7256                         ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
7257                 } else {
7258                         ts = os;
7259                         os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
7260                 }
7261         } else {
7262                 /* No known primary shelf. */
7263                 if (softc->ha_link == CTL_HA_LINK_OFFLINE) {
7264                         ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE;
7265                         os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
7266                 } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) {
7267                         ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING;
7268                         os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
7269                 } else {
7270                         ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING;
7271                 }
7272         }
7273         if (shared_group) {
7274                 tpg_desc->pref_state = ts;
7275                 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP |
7276                     TPG_U_SUP | TPG_T_SUP;
7277                 scsi_ulto2b(1, tpg_desc->target_port_group);
7278                 tpg_desc->status = TPG_IMPLICIT;
7279                 pc = 0;
7280                 STAILQ_FOREACH(port, &softc->port_list, links) {
7281                         if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
7282                                 continue;
7283                         if (!softc->is_single &&
7284                             (port->status & CTL_PORT_STATUS_HA_SHARED) == 0)
7285                                 continue;
7286                         if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
7287                                 continue;
7288                         scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc].
7289                             relative_target_port_identifier);
7290                         pc++;
7291                 }
7292                 tpg_desc->target_port_count = pc;
7293                 tpg_desc = (struct scsi_target_port_group_descriptor *)
7294                     &tpg_desc->descriptors[pc];
7295         }
7296         for (g = 0; g < num_ha_groups; g++) {
7297                 tpg_desc->pref_state = (g == pg) ? ts : os;
7298                 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP |
7299                     TPG_U_SUP | TPG_T_SUP;
7300                 scsi_ulto2b(2 + g, tpg_desc->target_port_group);
7301                 tpg_desc->status = TPG_IMPLICIT;
7302                 pc = 0;
7303                 STAILQ_FOREACH(port, &softc->port_list, links) {
7304                         if (port->targ_port < g * softc->port_cnt ||
7305                             port->targ_port >= (g + 1) * softc->port_cnt)
7306                                 continue;
7307                         if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
7308                                 continue;
7309                         if (port->status & CTL_PORT_STATUS_HA_SHARED)
7310                                 continue;
7311                         if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
7312                                 continue;
7313                         scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc].
7314                             relative_target_port_identifier);
7315                         pc++;
7316                 }
7317                 tpg_desc->target_port_count = pc;
7318                 tpg_desc = (struct scsi_target_port_group_descriptor *)
7319                     &tpg_desc->descriptors[pc];
7320         }
7321         mtx_unlock(&softc->ctl_lock);
7322
7323         ctl_set_success(ctsio);
7324         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7325         ctsio->be_move_done = ctl_config_move_done;
7326         ctl_datamove((union ctl_io *)ctsio);
7327         return(retval);
7328 }
7329
7330 int
7331 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio)
7332 {
7333         struct ctl_lun *lun;
7334         struct scsi_report_supported_opcodes *cdb;
7335         const struct ctl_cmd_entry *entry, *sentry;
7336         struct scsi_report_supported_opcodes_all *all;
7337         struct scsi_report_supported_opcodes_descr *descr;
7338         struct scsi_report_supported_opcodes_one *one;
7339         int retval;
7340         int alloc_len, total_len;
7341         int opcode, service_action, i, j, num;
7342
7343         CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n"));
7344
7345         cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb;
7346         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7347
7348         retval = CTL_RETVAL_COMPLETE;
7349
7350         opcode = cdb->requested_opcode;
7351         service_action = scsi_2btoul(cdb->requested_service_action);
7352         switch (cdb->options & RSO_OPTIONS_MASK) {
7353         case RSO_OPTIONS_ALL:
7354                 num = 0;
7355                 for (i = 0; i < 256; i++) {
7356                         entry = &ctl_cmd_table[i];
7357                         if (entry->flags & CTL_CMD_FLAG_SA5) {
7358                                 for (j = 0; j < 32; j++) {
7359                                         sentry = &((const struct ctl_cmd_entry *)
7360                                             entry->execute)[j];
7361                                         if (ctl_cmd_applicable(
7362                                             lun->be_lun->lun_type, sentry))
7363                                                 num++;
7364                                 }
7365                         } else {
7366                                 if (ctl_cmd_applicable(lun->be_lun->lun_type,
7367                                     entry))
7368                                         num++;
7369                         }
7370                 }
7371                 total_len = sizeof(struct scsi_report_supported_opcodes_all) +
7372                     num * sizeof(struct scsi_report_supported_opcodes_descr);
7373                 break;
7374         case RSO_OPTIONS_OC:
7375                 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) {
7376                         ctl_set_invalid_field(/*ctsio*/ ctsio,
7377                                               /*sks_valid*/ 1,
7378                                               /*command*/ 1,
7379                                               /*field*/ 2,
7380                                               /*bit_valid*/ 1,
7381                                               /*bit*/ 2);
7382                         ctl_done((union ctl_io *)ctsio);
7383                         return (CTL_RETVAL_COMPLETE);
7384                 }
7385                 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32;
7386                 break;
7387         case RSO_OPTIONS_OC_SA:
7388                 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 ||
7389                     service_action >= 32) {
7390                         ctl_set_invalid_field(/*ctsio*/ ctsio,
7391                                               /*sks_valid*/ 1,
7392                                               /*command*/ 1,
7393                                               /*field*/ 2,
7394                                               /*bit_valid*/ 1,
7395                                               /*bit*/ 2);
7396                         ctl_done((union ctl_io *)ctsio);
7397                         return (CTL_RETVAL_COMPLETE);
7398                 }
7399                 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32;
7400                 break;
7401         default:
7402                 ctl_set_invalid_field(/*ctsio*/ ctsio,
7403                                       /*sks_valid*/ 1,
7404                                       /*command*/ 1,
7405                                       /*field*/ 2,
7406                                       /*bit_valid*/ 1,
7407                                       /*bit*/ 2);
7408                 ctl_done((union ctl_io *)ctsio);
7409                 return (CTL_RETVAL_COMPLETE);
7410         }
7411
7412         alloc_len = scsi_4btoul(cdb->length);
7413
7414         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7415
7416         ctsio->kern_sg_entries = 0;
7417
7418         if (total_len < alloc_len) {
7419                 ctsio->residual = alloc_len - total_len;
7420                 ctsio->kern_data_len = total_len;
7421                 ctsio->kern_total_len = total_len;
7422         } else {
7423                 ctsio->residual = 0;
7424                 ctsio->kern_data_len = alloc_len;
7425                 ctsio->kern_total_len = alloc_len;
7426         }
7427         ctsio->kern_data_resid = 0;
7428         ctsio->kern_rel_offset = 0;
7429
7430         switch (cdb->options & RSO_OPTIONS_MASK) {
7431         case RSO_OPTIONS_ALL:
7432                 all = (struct scsi_report_supported_opcodes_all *)
7433                     ctsio->kern_data_ptr;
7434                 num = 0;
7435                 for (i = 0; i < 256; i++) {
7436                         entry = &ctl_cmd_table[i];
7437                         if (entry->flags & CTL_CMD_FLAG_SA5) {
7438                                 for (j = 0; j < 32; j++) {
7439                                         sentry = &((const struct ctl_cmd_entry *)
7440                                             entry->execute)[j];
7441                                         if (!ctl_cmd_applicable(
7442                                             lun->be_lun->lun_type, sentry))
7443                                                 continue;
7444                                         descr = &all->descr[num++];
7445                                         descr->opcode = i;
7446                                         scsi_ulto2b(j, descr->service_action);
7447                                         descr->flags = RSO_SERVACTV;
7448                                         scsi_ulto2b(sentry->length,
7449                                             descr->cdb_length);
7450                                 }
7451                         } else {
7452                                 if (!ctl_cmd_applicable(lun->be_lun->lun_type,
7453                                     entry))
7454                                         continue;
7455                                 descr = &all->descr[num++];
7456                                 descr->opcode = i;
7457                                 scsi_ulto2b(0, descr->service_action);
7458                                 descr->flags = 0;
7459                                 scsi_ulto2b(entry->length, descr->cdb_length);
7460                         }
7461                 }
7462                 scsi_ulto4b(
7463                     num * sizeof(struct scsi_report_supported_opcodes_descr),
7464                     all->length);
7465                 break;
7466         case RSO_OPTIONS_OC:
7467                 one = (struct scsi_report_supported_opcodes_one *)
7468                     ctsio->kern_data_ptr;
7469                 entry = &ctl_cmd_table[opcode];
7470                 goto fill_one;
7471         case RSO_OPTIONS_OC_SA:
7472                 one = (struct scsi_report_supported_opcodes_one *)
7473                     ctsio->kern_data_ptr;
7474                 entry = &ctl_cmd_table[opcode];
7475                 entry = &((const struct ctl_cmd_entry *)
7476                     entry->execute)[service_action];
7477 fill_one:
7478                 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) {
7479                         one->support = 3;
7480                         scsi_ulto2b(entry->length, one->cdb_length);
7481                         one->cdb_usage[0] = opcode;
7482                         memcpy(&one->cdb_usage[1], entry->usage,
7483                             entry->length - 1);
7484                 } else
7485                         one->support = 1;
7486                 break;
7487         }
7488
7489         ctl_set_success(ctsio);
7490         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7491         ctsio->be_move_done = ctl_config_move_done;
7492         ctl_datamove((union ctl_io *)ctsio);
7493         return(retval);
7494 }
7495
7496 int
7497 ctl_report_supported_tmf(struct ctl_scsiio *ctsio)
7498 {
7499         struct scsi_report_supported_tmf *cdb;
7500         struct scsi_report_supported_tmf_data *data;
7501         int retval;
7502         int alloc_len, total_len;
7503
7504         CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
7505
7506         cdb = (struct scsi_report_supported_tmf *)ctsio->cdb;
7507
7508         retval = CTL_RETVAL_COMPLETE;
7509
7510         total_len = sizeof(struct scsi_report_supported_tmf_data);
7511         alloc_len = scsi_4btoul(cdb->length);
7512
7513         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7514
7515         ctsio->kern_sg_entries = 0;
7516
7517         if (total_len < alloc_len) {
7518                 ctsio->residual = alloc_len - total_len;
7519                 ctsio->kern_data_len = total_len;
7520                 ctsio->kern_total_len = total_len;
7521         } else {
7522                 ctsio->residual = 0;
7523                 ctsio->kern_data_len = alloc_len;
7524                 ctsio->kern_total_len = alloc_len;
7525         }
7526         ctsio->kern_data_resid = 0;
7527         ctsio->kern_rel_offset = 0;
7528
7529         data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr;
7530         data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS |
7531             RST_TRS;
7532         data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS;
7533
7534         ctl_set_success(ctsio);
7535         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7536         ctsio->be_move_done = ctl_config_move_done;
7537         ctl_datamove((union ctl_io *)ctsio);
7538         return (retval);
7539 }
7540
7541 int
7542 ctl_report_timestamp(struct ctl_scsiio *ctsio)
7543 {
7544         struct scsi_report_timestamp *cdb;
7545         struct scsi_report_timestamp_data *data;
7546         struct timeval tv;
7547         int64_t timestamp;
7548         int retval;
7549         int alloc_len, total_len;
7550
7551         CTL_DEBUG_PRINT(("ctl_report_timestamp\n"));
7552
7553         cdb = (struct scsi_report_timestamp *)ctsio->cdb;
7554
7555         retval = CTL_RETVAL_COMPLETE;
7556
7557         total_len = sizeof(struct scsi_report_timestamp_data);
7558         alloc_len = scsi_4btoul(cdb->length);
7559
7560         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7561
7562         ctsio->kern_sg_entries = 0;
7563
7564         if (total_len < alloc_len) {
7565                 ctsio->residual = alloc_len - total_len;
7566                 ctsio->kern_data_len = total_len;
7567                 ctsio->kern_total_len = total_len;
7568         } else {
7569                 ctsio->residual = 0;
7570                 ctsio->kern_data_len = alloc_len;
7571                 ctsio->kern_total_len = alloc_len;
7572         }
7573         ctsio->kern_data_resid = 0;
7574         ctsio->kern_rel_offset = 0;
7575
7576         data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr;
7577         scsi_ulto2b(sizeof(*data) - 2, data->length);
7578         data->origin = RTS_ORIG_OUTSIDE;
7579         getmicrotime(&tv);
7580         timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000;
7581         scsi_ulto4b(timestamp >> 16, data->timestamp);
7582         scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]);
7583
7584         ctl_set_success(ctsio);
7585         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7586         ctsio->be_move_done = ctl_config_move_done;
7587         ctl_datamove((union ctl_io *)ctsio);
7588         return (retval);
7589 }
7590
7591 int
7592 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio)
7593 {
7594         struct scsi_per_res_in *cdb;
7595         int alloc_len, total_len = 0;
7596         /* struct scsi_per_res_in_rsrv in_data; */
7597         struct ctl_lun *lun;
7598         struct ctl_softc *softc;
7599         uint64_t key;
7600
7601         CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n"));
7602
7603         cdb = (struct scsi_per_res_in *)ctsio->cdb;
7604
7605         alloc_len = scsi_2btoul(cdb->length);
7606
7607         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7608         softc = lun->ctl_softc;
7609
7610 retry:
7611         mtx_lock(&lun->lun_lock);
7612         switch (cdb->action) {
7613         case SPRI_RK: /* read keys */
7614                 total_len = sizeof(struct scsi_per_res_in_keys) +
7615                         lun->pr_key_count *
7616                         sizeof(struct scsi_per_res_key);
7617                 break;
7618         case SPRI_RR: /* read reservation */
7619                 if (lun->flags & CTL_LUN_PR_RESERVED)
7620                         total_len = sizeof(struct scsi_per_res_in_rsrv);
7621                 else
7622                         total_len = sizeof(struct scsi_per_res_in_header);
7623                 break;
7624         case SPRI_RC: /* report capabilities */
7625                 total_len = sizeof(struct scsi_per_res_cap);
7626                 break;
7627         case SPRI_RS: /* read full status */
7628                 total_len = sizeof(struct scsi_per_res_in_header) +
7629                     (sizeof(struct scsi_per_res_in_full_desc) + 256) *
7630                     lun->pr_key_count;
7631                 break;
7632         default:
7633                 panic("%s: Invalid PR type %#x", __func__, cdb->action);
7634         }
7635         mtx_unlock(&lun->lun_lock);
7636
7637         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7638
7639         if (total_len < alloc_len) {
7640                 ctsio->residual = alloc_len - total_len;
7641                 ctsio->kern_data_len = total_len;
7642                 ctsio->kern_total_len = total_len;
7643         } else {
7644                 ctsio->residual = 0;
7645                 ctsio->kern_data_len = alloc_len;
7646                 ctsio->kern_total_len = alloc_len;
7647         }
7648
7649         ctsio->kern_data_resid = 0;
7650         ctsio->kern_rel_offset = 0;
7651         ctsio->kern_sg_entries = 0;
7652
7653         mtx_lock(&lun->lun_lock);
7654         switch (cdb->action) {
7655         case SPRI_RK: { // read keys
7656         struct scsi_per_res_in_keys *res_keys;
7657                 int i, key_count;
7658
7659                 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr;
7660
7661                 /*
7662                  * We had to drop the lock to allocate our buffer, which
7663                  * leaves time for someone to come in with another
7664                  * persistent reservation.  (That is unlikely, though,
7665                  * since this should be the only persistent reservation
7666                  * command active right now.)
7667                  */
7668                 if (total_len != (sizeof(struct scsi_per_res_in_keys) +
7669                     (lun->pr_key_count *
7670                      sizeof(struct scsi_per_res_key)))){
7671                         mtx_unlock(&lun->lun_lock);
7672                         free(ctsio->kern_data_ptr, M_CTL);
7673                         printf("%s: reservation length changed, retrying\n",
7674                                __func__);
7675                         goto retry;
7676                 }
7677
7678                 scsi_ulto4b(lun->pr_generation, res_keys->header.generation);
7679
7680                 scsi_ulto4b(sizeof(struct scsi_per_res_key) *
7681                              lun->pr_key_count, res_keys->header.length);
7682
7683                 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) {
7684                         if ((key = ctl_get_prkey(lun, i)) == 0)
7685                                 continue;
7686
7687                         /*
7688                          * We used lun->pr_key_count to calculate the
7689                          * size to allocate.  If it turns out the number of
7690                          * initiators with the registered flag set is
7691                          * larger than that (i.e. they haven't been kept in
7692                          * sync), we've got a problem.
7693                          */
7694                         if (key_count >= lun->pr_key_count) {
7695                                 key_count++;
7696                                 continue;
7697                         }
7698                         scsi_u64to8b(key, res_keys->keys[key_count].key);
7699                         key_count++;
7700                 }
7701                 break;
7702         }
7703         case SPRI_RR: { // read reservation
7704                 struct scsi_per_res_in_rsrv *res;
7705                 int tmp_len, header_only;
7706
7707                 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr;
7708
7709                 scsi_ulto4b(lun->pr_generation, res->header.generation);
7710
7711                 if (lun->flags & CTL_LUN_PR_RESERVED)
7712                 {
7713                         tmp_len = sizeof(struct scsi_per_res_in_rsrv);
7714                         scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data),
7715                                     res->header.length);
7716                         header_only = 0;
7717                 } else {
7718                         tmp_len = sizeof(struct scsi_per_res_in_header);
7719                         scsi_ulto4b(0, res->header.length);
7720                         header_only = 1;
7721                 }
7722
7723                 /*
7724                  * We had to drop the lock to allocate our buffer, which
7725                  * leaves time for someone to come in with another
7726                  * persistent reservation.  (That is unlikely, though,
7727                  * since this should be the only persistent reservation
7728                  * command active right now.)
7729                  */
7730                 if (tmp_len != total_len) {
7731                         mtx_unlock(&lun->lun_lock);
7732                         free(ctsio->kern_data_ptr, M_CTL);
7733                         printf("%s: reservation status changed, retrying\n",
7734                                __func__);
7735                         goto retry;
7736                 }
7737
7738                 /*
7739                  * No reservation held, so we're done.
7740                  */
7741                 if (header_only != 0)
7742                         break;
7743
7744                 /*
7745                  * If the registration is an All Registrants type, the key
7746                  * is 0, since it doesn't really matter.
7747                  */
7748                 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) {
7749                         scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx),
7750                             res->data.reservation);
7751                 }
7752                 res->data.scopetype = lun->pr_res_type;
7753                 break;
7754         }
7755         case SPRI_RC:     //report capabilities
7756         {
7757                 struct scsi_per_res_cap *res_cap;
7758                 uint16_t type_mask;
7759
7760                 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr;
7761                 scsi_ulto2b(sizeof(*res_cap), res_cap->length);
7762                 res_cap->flags1 = SPRI_CRH;
7763                 res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5;
7764                 type_mask = SPRI_TM_WR_EX_AR |
7765                             SPRI_TM_EX_AC_RO |
7766                             SPRI_TM_WR_EX_RO |
7767                             SPRI_TM_EX_AC |
7768                             SPRI_TM_WR_EX |
7769                             SPRI_TM_EX_AC_AR;
7770                 scsi_ulto2b(type_mask, res_cap->type_mask);
7771                 break;
7772         }
7773         case SPRI_RS: { // read full status
7774                 struct scsi_per_res_in_full *res_status;
7775                 struct scsi_per_res_in_full_desc *res_desc;
7776                 struct ctl_port *port;
7777                 int i, len;
7778
7779                 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr;
7780
7781                 /*
7782                  * We had to drop the lock to allocate our buffer, which
7783                  * leaves time for someone to come in with another
7784                  * persistent reservation.  (That is unlikely, though,
7785                  * since this should be the only persistent reservation
7786                  * command active right now.)
7787                  */
7788                 if (total_len < (sizeof(struct scsi_per_res_in_header) +
7789                     (sizeof(struct scsi_per_res_in_full_desc) + 256) *
7790                      lun->pr_key_count)){
7791                         mtx_unlock(&lun->lun_lock);
7792                         free(ctsio->kern_data_ptr, M_CTL);
7793                         printf("%s: reservation length changed, retrying\n",
7794                                __func__);
7795                         goto retry;
7796                 }
7797
7798                 scsi_ulto4b(lun->pr_generation, res_status->header.generation);
7799
7800                 res_desc = &res_status->desc[0];
7801                 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
7802                         if ((key = ctl_get_prkey(lun, i)) == 0)
7803                                 continue;
7804
7805                         scsi_u64to8b(key, res_desc->res_key.key);
7806                         if ((lun->flags & CTL_LUN_PR_RESERVED) &&
7807                             (lun->pr_res_idx == i ||
7808                              lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) {
7809                                 res_desc->flags = SPRI_FULL_R_HOLDER;
7810                                 res_desc->scopetype = lun->pr_res_type;
7811                         }
7812                         scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT,
7813                             res_desc->rel_trgt_port_id);
7814                         len = 0;
7815                         port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT];
7816                         if (port != NULL)
7817                                 len = ctl_create_iid(port,
7818                                     i % CTL_MAX_INIT_PER_PORT,
7819                                     res_desc->transport_id);
7820                         scsi_ulto4b(len, res_desc->additional_length);
7821                         res_desc = (struct scsi_per_res_in_full_desc *)
7822                             &res_desc->transport_id[len];
7823                 }
7824                 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0],
7825                     res_status->header.length);
7826                 break;
7827         }
7828         default:
7829                 panic("%s: Invalid PR type %#x", __func__, cdb->action);
7830         }
7831         mtx_unlock(&lun->lun_lock);
7832
7833         ctl_set_success(ctsio);
7834         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7835         ctsio->be_move_done = ctl_config_move_done;
7836         ctl_datamove((union ctl_io *)ctsio);
7837         return (CTL_RETVAL_COMPLETE);
7838 }
7839
7840 /*
7841  * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if
7842  * it should return.
7843  */
7844 static int
7845 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
7846                 uint64_t sa_res_key, uint8_t type, uint32_t residx,
7847                 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb,
7848                 struct scsi_per_res_out_parms* param)
7849 {
7850         union ctl_ha_msg persis_io;
7851         int i;
7852
7853         mtx_lock(&lun->lun_lock);
7854         if (sa_res_key == 0) {
7855                 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
7856                         /* validate scope and type */
7857                         if ((cdb->scope_type & SPR_SCOPE_MASK) !=
7858                              SPR_LU_SCOPE) {
7859                                 mtx_unlock(&lun->lun_lock);
7860                                 ctl_set_invalid_field(/*ctsio*/ ctsio,
7861                                                       /*sks_valid*/ 1,
7862                                                       /*command*/ 1,
7863                                                       /*field*/ 2,
7864                                                       /*bit_valid*/ 1,
7865                                                       /*bit*/ 4);
7866                                 ctl_done((union ctl_io *)ctsio);
7867                                 return (1);
7868                         }
7869
7870                         if (type>8 || type==2 || type==4 || type==0) {
7871                                 mtx_unlock(&lun->lun_lock);
7872                                 ctl_set_invalid_field(/*ctsio*/ ctsio,
7873                                                       /*sks_valid*/ 1,
7874                                                       /*command*/ 1,
7875                                                       /*field*/ 2,
7876                                                       /*bit_valid*/ 1,
7877                                                       /*bit*/ 0);
7878                                 ctl_done((union ctl_io *)ctsio);
7879                                 return (1);
7880                         }
7881
7882                         /*
7883                          * Unregister everybody else and build UA for
7884                          * them
7885                          */
7886                         for(i = 0; i < CTL_MAX_INITIATORS; i++) {
7887                                 if (i == residx || ctl_get_prkey(lun, i) == 0)
7888                                         continue;
7889
7890                                 ctl_clr_prkey(lun, i);
7891                                 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
7892                         }
7893                         lun->pr_key_count = 1;
7894                         lun->pr_res_type = type;
7895                         if (lun->pr_res_type != SPR_TYPE_WR_EX_AR &&
7896                             lun->pr_res_type != SPR_TYPE_EX_AC_AR)
7897                                 lun->pr_res_idx = residx;
7898                         lun->pr_generation++;
7899                         mtx_unlock(&lun->lun_lock);
7900
7901                         /* send msg to other side */
7902                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
7903                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
7904                         persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
7905                         persis_io.pr.pr_info.residx = lun->pr_res_idx;
7906                         persis_io.pr.pr_info.res_type = type;
7907                         memcpy(persis_io.pr.pr_info.sa_res_key,
7908                                param->serv_act_res_key,
7909                                sizeof(param->serv_act_res_key));
7910                         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
7911                             sizeof(persis_io.pr), M_WAITOK);
7912                 } else {
7913                         /* not all registrants */
7914                         mtx_unlock(&lun->lun_lock);
7915                         free(ctsio->kern_data_ptr, M_CTL);
7916                         ctl_set_invalid_field(ctsio,
7917                                               /*sks_valid*/ 1,
7918                                               /*command*/ 0,
7919                                               /*field*/ 8,
7920                                               /*bit_valid*/ 0,
7921                                               /*bit*/ 0);
7922                         ctl_done((union ctl_io *)ctsio);
7923                         return (1);
7924                 }
7925         } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS
7926                 || !(lun->flags & CTL_LUN_PR_RESERVED)) {
7927                 int found = 0;
7928
7929                 if (res_key == sa_res_key) {
7930                         /* special case */
7931                         /*
7932                          * The spec implies this is not good but doesn't
7933                          * say what to do. There are two choices either
7934                          * generate a res conflict or check condition
7935                          * with illegal field in parameter data. Since
7936                          * that is what is done when the sa_res_key is
7937                          * zero I'll take that approach since this has
7938                          * to do with the sa_res_key.
7939                          */
7940                         mtx_unlock(&lun->lun_lock);
7941                         free(ctsio->kern_data_ptr, M_CTL);
7942                         ctl_set_invalid_field(ctsio,
7943                                               /*sks_valid*/ 1,
7944                                               /*command*/ 0,
7945                                               /*field*/ 8,
7946                                               /*bit_valid*/ 0,
7947                                               /*bit*/ 0);
7948                         ctl_done((union ctl_io *)ctsio);
7949                         return (1);
7950                 }
7951
7952                 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
7953                         if (ctl_get_prkey(lun, i) != sa_res_key)
7954                                 continue;
7955
7956                         found = 1;
7957                         ctl_clr_prkey(lun, i);
7958                         lun->pr_key_count--;
7959                         ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
7960                 }
7961                 if (!found) {
7962                         mtx_unlock(&lun->lun_lock);
7963                         free(ctsio->kern_data_ptr, M_CTL);
7964                         ctl_set_reservation_conflict(ctsio);
7965                         ctl_done((union ctl_io *)ctsio);
7966                         return (CTL_RETVAL_COMPLETE);
7967                 }
7968                 lun->pr_generation++;
7969                 mtx_unlock(&lun->lun_lock);
7970
7971                 /* send msg to other side */
7972                 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
7973                 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
7974                 persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
7975                 persis_io.pr.pr_info.residx = lun->pr_res_idx;
7976                 persis_io.pr.pr_info.res_type = type;
7977                 memcpy(persis_io.pr.pr_info.sa_res_key,
7978                        param->serv_act_res_key,
7979                        sizeof(param->serv_act_res_key));
7980                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
7981                     sizeof(persis_io.pr), M_WAITOK);
7982         } else {
7983                 /* Reserved but not all registrants */
7984                 /* sa_res_key is res holder */
7985                 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) {
7986                         /* validate scope and type */
7987                         if ((cdb->scope_type & SPR_SCOPE_MASK) !=
7988                              SPR_LU_SCOPE) {
7989                                 mtx_unlock(&lun->lun_lock);
7990                                 ctl_set_invalid_field(/*ctsio*/ ctsio,
7991                                                       /*sks_valid*/ 1,
7992                                                       /*command*/ 1,
7993                                                       /*field*/ 2,
7994                                                       /*bit_valid*/ 1,
7995                                                       /*bit*/ 4);
7996                                 ctl_done((union ctl_io *)ctsio);
7997                                 return (1);
7998                         }
7999
8000                         if (type>8 || type==2 || type==4 || type==0) {
8001                                 mtx_unlock(&lun->lun_lock);
8002                                 ctl_set_invalid_field(/*ctsio*/ ctsio,
8003                                                       /*sks_valid*/ 1,
8004                                                       /*command*/ 1,
8005                                                       /*field*/ 2,
8006                                                       /*bit_valid*/ 1,
8007                                                       /*bit*/ 0);
8008                                 ctl_done((union ctl_io *)ctsio);
8009                                 return (1);
8010                         }
8011
8012                         /*
8013                          * Do the following:
8014                          * if sa_res_key != res_key remove all
8015                          * registrants w/sa_res_key and generate UA
8016                          * for these registrants(Registrations
8017                          * Preempted) if it wasn't an exclusive
8018                          * reservation generate UA(Reservations
8019                          * Preempted) for all other registered nexuses
8020                          * if the type has changed. Establish the new
8021                          * reservation and holder. If res_key and
8022                          * sa_res_key are the same do the above
8023                          * except don't unregister the res holder.
8024                          */
8025
8026                         for(i = 0; i < CTL_MAX_INITIATORS; i++) {
8027                                 if (i == residx || ctl_get_prkey(lun, i) == 0)
8028                                         continue;
8029
8030                                 if (sa_res_key == ctl_get_prkey(lun, i)) {
8031                                         ctl_clr_prkey(lun, i);
8032                                         lun->pr_key_count--;
8033                                         ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8034                                 } else if (type != lun->pr_res_type &&
8035                                     (lun->pr_res_type == SPR_TYPE_WR_EX_RO ||
8036                                      lun->pr_res_type == SPR_TYPE_EX_AC_RO)) {
8037                                         ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
8038                                 }
8039                         }
8040                         lun->pr_res_type = type;
8041                         if (lun->pr_res_type != SPR_TYPE_WR_EX_AR &&
8042                             lun->pr_res_type != SPR_TYPE_EX_AC_AR)
8043                                 lun->pr_res_idx = residx;
8044                         else
8045                                 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
8046                         lun->pr_generation++;
8047                         mtx_unlock(&lun->lun_lock);
8048
8049                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8050                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8051                         persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
8052                         persis_io.pr.pr_info.residx = lun->pr_res_idx;
8053                         persis_io.pr.pr_info.res_type = type;
8054                         memcpy(persis_io.pr.pr_info.sa_res_key,
8055                                param->serv_act_res_key,
8056                                sizeof(param->serv_act_res_key));
8057                         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8058                             sizeof(persis_io.pr), M_WAITOK);
8059                 } else {
8060                         /*
8061                          * sa_res_key is not the res holder just
8062                          * remove registrants
8063                          */
8064                         int found=0;
8065
8066                         for (i = 0; i < CTL_MAX_INITIATORS; i++) {
8067                                 if (sa_res_key != ctl_get_prkey(lun, i))
8068                                         continue;
8069
8070                                 found = 1;
8071                                 ctl_clr_prkey(lun, i);
8072                                 lun->pr_key_count--;
8073                                 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8074                         }
8075
8076                         if (!found) {
8077                                 mtx_unlock(&lun->lun_lock);
8078                                 free(ctsio->kern_data_ptr, M_CTL);
8079                                 ctl_set_reservation_conflict(ctsio);
8080                                 ctl_done((union ctl_io *)ctsio);
8081                                 return (1);
8082                         }
8083                         lun->pr_generation++;
8084                         mtx_unlock(&lun->lun_lock);
8085
8086                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8087                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8088                         persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
8089                         persis_io.pr.pr_info.residx = lun->pr_res_idx;
8090                         persis_io.pr.pr_info.res_type = type;
8091                         memcpy(persis_io.pr.pr_info.sa_res_key,
8092                                param->serv_act_res_key,
8093                                sizeof(param->serv_act_res_key));
8094                         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8095                             sizeof(persis_io.pr), M_WAITOK);
8096                 }
8097         }
8098         return (0);
8099 }
8100
8101 static void
8102 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
8103 {
8104         uint64_t sa_res_key;
8105         int i;
8106
8107         sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key);
8108
8109         if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS
8110          || lun->pr_res_idx == CTL_PR_NO_RESERVATION
8111          || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) {
8112                 if (sa_res_key == 0) {
8113                         /*
8114                          * Unregister everybody else and build UA for
8115                          * them
8116                          */
8117                         for(i = 0; i < CTL_MAX_INITIATORS; i++) {
8118                                 if (i == msg->pr.pr_info.residx ||
8119                                     ctl_get_prkey(lun, i) == 0)
8120                                         continue;
8121
8122                                 ctl_clr_prkey(lun, i);
8123                                 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8124                         }
8125
8126                         lun->pr_key_count = 1;
8127                         lun->pr_res_type = msg->pr.pr_info.res_type;
8128                         if (lun->pr_res_type != SPR_TYPE_WR_EX_AR &&
8129                             lun->pr_res_type != SPR_TYPE_EX_AC_AR)
8130                                 lun->pr_res_idx = msg->pr.pr_info.residx;
8131                 } else {
8132                         for (i = 0; i < CTL_MAX_INITIATORS; i++) {
8133                                 if (sa_res_key == ctl_get_prkey(lun, i))
8134                                         continue;
8135
8136                                 ctl_clr_prkey(lun, i);
8137                                 lun->pr_key_count--;
8138                                 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8139                         }
8140                 }
8141         } else {
8142                 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
8143                         if (i == msg->pr.pr_info.residx ||
8144                             ctl_get_prkey(lun, i) == 0)
8145                                 continue;
8146
8147                         if (sa_res_key == ctl_get_prkey(lun, i)) {
8148                                 ctl_clr_prkey(lun, i);
8149                                 lun->pr_key_count--;
8150                                 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8151                         } else if (msg->pr.pr_info.res_type != lun->pr_res_type
8152                             && (lun->pr_res_type == SPR_TYPE_WR_EX_RO ||
8153                              lun->pr_res_type == SPR_TYPE_EX_AC_RO)) {
8154                                 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
8155                         }
8156                 }
8157                 lun->pr_res_type = msg->pr.pr_info.res_type;
8158                 if (lun->pr_res_type != SPR_TYPE_WR_EX_AR &&
8159                     lun->pr_res_type != SPR_TYPE_EX_AC_AR)
8160                         lun->pr_res_idx = msg->pr.pr_info.residx;
8161                 else
8162                         lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
8163         }
8164         lun->pr_generation++;
8165
8166 }
8167
8168
8169 int
8170 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
8171 {
8172         int retval;
8173         u_int32_t param_len;
8174         struct scsi_per_res_out *cdb;
8175         struct ctl_lun *lun;
8176         struct scsi_per_res_out_parms* param;
8177         struct ctl_softc *softc;
8178         uint32_t residx;
8179         uint64_t res_key, sa_res_key, key;
8180         uint8_t type;
8181         union ctl_ha_msg persis_io;
8182         int    i;
8183
8184         CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n"));
8185
8186         retval = CTL_RETVAL_COMPLETE;
8187
8188         cdb = (struct scsi_per_res_out *)ctsio->cdb;
8189         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8190         softc = lun->ctl_softc;
8191
8192         /*
8193          * We only support whole-LUN scope.  The scope & type are ignored for
8194          * register, register and ignore existing key and clear.
8195          * We sometimes ignore scope and type on preempts too!!
8196          * Verify reservation type here as well.
8197          */
8198         type = cdb->scope_type & SPR_TYPE_MASK;
8199         if ((cdb->action == SPRO_RESERVE)
8200          || (cdb->action == SPRO_RELEASE)) {
8201                 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) {
8202                         ctl_set_invalid_field(/*ctsio*/ ctsio,
8203                                               /*sks_valid*/ 1,
8204                                               /*command*/ 1,
8205                                               /*field*/ 2,
8206                                               /*bit_valid*/ 1,
8207                                               /*bit*/ 4);
8208                         ctl_done((union ctl_io *)ctsio);
8209                         return (CTL_RETVAL_COMPLETE);
8210                 }
8211
8212                 if (type>8 || type==2 || type==4 || type==0) {
8213                         ctl_set_invalid_field(/*ctsio*/ ctsio,
8214                                               /*sks_valid*/ 1,
8215                                               /*command*/ 1,
8216                                               /*field*/ 2,
8217                                               /*bit_valid*/ 1,
8218                                               /*bit*/ 0);
8219                         ctl_done((union ctl_io *)ctsio);
8220                         return (CTL_RETVAL_COMPLETE);
8221                 }
8222         }
8223
8224         param_len = scsi_4btoul(cdb->length);
8225
8226         if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
8227                 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
8228                 ctsio->kern_data_len = param_len;
8229                 ctsio->kern_total_len = param_len;
8230                 ctsio->kern_data_resid = 0;
8231                 ctsio->kern_rel_offset = 0;
8232                 ctsio->kern_sg_entries = 0;
8233                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
8234                 ctsio->be_move_done = ctl_config_move_done;
8235                 ctl_datamove((union ctl_io *)ctsio);
8236
8237                 return (CTL_RETVAL_COMPLETE);
8238         }
8239
8240         param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr;
8241
8242         residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
8243         res_key = scsi_8btou64(param->res_key.key);
8244         sa_res_key = scsi_8btou64(param->serv_act_res_key);
8245
8246         /*
8247          * Validate the reservation key here except for SPRO_REG_IGNO
8248          * This must be done for all other service actions
8249          */
8250         if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) {
8251                 mtx_lock(&lun->lun_lock);
8252                 if ((key = ctl_get_prkey(lun, residx)) != 0) {
8253                         if (res_key != key) {
8254                                 /*
8255                                  * The current key passed in doesn't match
8256                                  * the one the initiator previously
8257                                  * registered.
8258                                  */
8259                                 mtx_unlock(&lun->lun_lock);
8260                                 free(ctsio->kern_data_ptr, M_CTL);
8261                                 ctl_set_reservation_conflict(ctsio);
8262                                 ctl_done((union ctl_io *)ctsio);
8263                                 return (CTL_RETVAL_COMPLETE);
8264                         }
8265                 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) {
8266                         /*
8267                          * We are not registered
8268                          */
8269                         mtx_unlock(&lun->lun_lock);
8270                         free(ctsio->kern_data_ptr, M_CTL);
8271                         ctl_set_reservation_conflict(ctsio);
8272                         ctl_done((union ctl_io *)ctsio);
8273                         return (CTL_RETVAL_COMPLETE);
8274                 } else if (res_key != 0) {
8275                         /*
8276                          * We are not registered and trying to register but
8277                          * the register key isn't zero.
8278                          */
8279                         mtx_unlock(&lun->lun_lock);
8280                         free(ctsio->kern_data_ptr, M_CTL);
8281                         ctl_set_reservation_conflict(ctsio);
8282                         ctl_done((union ctl_io *)ctsio);
8283                         return (CTL_RETVAL_COMPLETE);
8284                 }
8285                 mtx_unlock(&lun->lun_lock);
8286         }
8287
8288         switch (cdb->action & SPRO_ACTION_MASK) {
8289         case SPRO_REGISTER:
8290         case SPRO_REG_IGNO: {
8291
8292 #if 0
8293                 printf("Registration received\n");
8294 #endif
8295
8296                 /*
8297                  * We don't support any of these options, as we report in
8298                  * the read capabilities request (see
8299                  * ctl_persistent_reserve_in(), above).
8300                  */
8301                 if ((param->flags & SPR_SPEC_I_PT)
8302                  || (param->flags & SPR_ALL_TG_PT)
8303                  || (param->flags & SPR_APTPL)) {
8304                         int bit_ptr;
8305
8306                         if (param->flags & SPR_APTPL)
8307                                 bit_ptr = 0;
8308                         else if (param->flags & SPR_ALL_TG_PT)
8309                                 bit_ptr = 2;
8310                         else /* SPR_SPEC_I_PT */
8311                                 bit_ptr = 3;
8312
8313                         free(ctsio->kern_data_ptr, M_CTL);
8314                         ctl_set_invalid_field(ctsio,
8315                                               /*sks_valid*/ 1,
8316                                               /*command*/ 0,
8317                                               /*field*/ 20,
8318                                               /*bit_valid*/ 1,
8319                                               /*bit*/ bit_ptr);
8320                         ctl_done((union ctl_io *)ctsio);
8321                         return (CTL_RETVAL_COMPLETE);
8322                 }
8323
8324                 mtx_lock(&lun->lun_lock);
8325
8326                 /*
8327                  * The initiator wants to clear the
8328                  * key/unregister.
8329                  */
8330                 if (sa_res_key == 0) {
8331                         if ((res_key == 0
8332                           && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER)
8333                          || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO
8334                           && ctl_get_prkey(lun, residx) == 0)) {
8335                                 mtx_unlock(&lun->lun_lock);
8336                                 goto done;
8337                         }
8338
8339                         ctl_clr_prkey(lun, residx);
8340                         lun->pr_key_count--;
8341
8342                         if (residx == lun->pr_res_idx) {
8343                                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8344                                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8345
8346                                 if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO ||
8347                                      lun->pr_res_type == SPR_TYPE_EX_AC_RO) &&
8348                                     lun->pr_key_count) {
8349                                         /*
8350                                          * If the reservation is a registrants
8351                                          * only type we need to generate a UA
8352                                          * for other registered inits.  The
8353                                          * sense code should be RESERVATIONS
8354                                          * RELEASED
8355                                          */
8356
8357                                         for (i = softc->init_min; i < softc->init_max; i++){
8358                                                 if (ctl_get_prkey(lun, i) == 0)
8359                                                         continue;
8360                                                 ctl_est_ua(lun, i,
8361                                                     CTL_UA_RES_RELEASE);
8362                                         }
8363                                 }
8364                                 lun->pr_res_type = 0;
8365                         } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
8366                                 if (lun->pr_key_count==0) {
8367                                         lun->flags &= ~CTL_LUN_PR_RESERVED;
8368                                         lun->pr_res_type = 0;
8369                                         lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8370                                 }
8371                         }
8372                         lun->pr_generation++;
8373                         mtx_unlock(&lun->lun_lock);
8374
8375                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8376                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8377                         persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY;
8378                         persis_io.pr.pr_info.residx = residx;
8379                         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8380                             sizeof(persis_io.pr), M_WAITOK);
8381                 } else /* sa_res_key != 0 */ {
8382
8383                         /*
8384                          * If we aren't registered currently then increment
8385                          * the key count and set the registered flag.
8386                          */
8387                         ctl_alloc_prkey(lun, residx);
8388                         if (ctl_get_prkey(lun, residx) == 0)
8389                                 lun->pr_key_count++;
8390                         ctl_set_prkey(lun, residx, sa_res_key);
8391                         lun->pr_generation++;
8392                         mtx_unlock(&lun->lun_lock);
8393
8394                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8395                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8396                         persis_io.pr.pr_info.action = CTL_PR_REG_KEY;
8397                         persis_io.pr.pr_info.residx = residx;
8398                         memcpy(persis_io.pr.pr_info.sa_res_key,
8399                                param->serv_act_res_key,
8400                                sizeof(param->serv_act_res_key));
8401                         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8402                             sizeof(persis_io.pr), M_WAITOK);
8403                 }
8404
8405                 break;
8406         }
8407         case SPRO_RESERVE:
8408 #if 0
8409                 printf("Reserve executed type %d\n", type);
8410 #endif
8411                 mtx_lock(&lun->lun_lock);
8412                 if (lun->flags & CTL_LUN_PR_RESERVED) {
8413                         /*
8414                          * if this isn't the reservation holder and it's
8415                          * not a "all registrants" type or if the type is
8416                          * different then we have a conflict
8417                          */
8418                         if ((lun->pr_res_idx != residx
8419                           && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS)
8420                          || lun->pr_res_type != type) {
8421                                 mtx_unlock(&lun->lun_lock);
8422                                 free(ctsio->kern_data_ptr, M_CTL);
8423                                 ctl_set_reservation_conflict(ctsio);
8424                                 ctl_done((union ctl_io *)ctsio);
8425                                 return (CTL_RETVAL_COMPLETE);
8426                         }
8427                         mtx_unlock(&lun->lun_lock);
8428                 } else /* create a reservation */ {
8429                         /*
8430                          * If it's not an "all registrants" type record
8431                          * reservation holder
8432                          */
8433                         if (type != SPR_TYPE_WR_EX_AR
8434                          && type != SPR_TYPE_EX_AC_AR)
8435                                 lun->pr_res_idx = residx; /* Res holder */
8436                         else
8437                                 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
8438
8439                         lun->flags |= CTL_LUN_PR_RESERVED;
8440                         lun->pr_res_type = type;
8441
8442                         mtx_unlock(&lun->lun_lock);
8443
8444                         /* send msg to other side */
8445                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8446                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8447                         persis_io.pr.pr_info.action = CTL_PR_RESERVE;
8448                         persis_io.pr.pr_info.residx = lun->pr_res_idx;
8449                         persis_io.pr.pr_info.res_type = type;
8450                         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8451                             sizeof(persis_io.pr), M_WAITOK);
8452                 }
8453                 break;
8454
8455         case SPRO_RELEASE:
8456                 mtx_lock(&lun->lun_lock);
8457                 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) {
8458                         /* No reservation exists return good status */
8459                         mtx_unlock(&lun->lun_lock);
8460                         goto done;
8461                 }
8462                 /*
8463                  * Is this nexus a reservation holder?
8464                  */
8465                 if (lun->pr_res_idx != residx
8466                  && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) {
8467                         /*
8468                          * not a res holder return good status but
8469                          * do nothing
8470                          */
8471                         mtx_unlock(&lun->lun_lock);
8472                         goto done;
8473                 }
8474
8475                 if (lun->pr_res_type != type) {
8476                         mtx_unlock(&lun->lun_lock);
8477                         free(ctsio->kern_data_ptr, M_CTL);
8478                         ctl_set_illegal_pr_release(ctsio);
8479                         ctl_done((union ctl_io *)ctsio);
8480                         return (CTL_RETVAL_COMPLETE);
8481                 }
8482
8483                 /* okay to release */
8484                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8485                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8486                 lun->pr_res_type = 0;
8487
8488                 /*
8489                  * if this isn't an exclusive access
8490                  * res generate UA for all other
8491                  * registrants.
8492                  */
8493                 if (type != SPR_TYPE_EX_AC
8494                  && type != SPR_TYPE_WR_EX) {
8495                         for (i = softc->init_min; i < softc->init_max; i++) {
8496                                 if (i == residx || ctl_get_prkey(lun, i) == 0)
8497                                         continue;
8498                                 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
8499                         }
8500                 }
8501                 mtx_unlock(&lun->lun_lock);
8502
8503                 /* Send msg to other side */
8504                 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8505                 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8506                 persis_io.pr.pr_info.action = CTL_PR_RELEASE;
8507                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8508                      sizeof(persis_io.pr), M_WAITOK);
8509                 break;
8510
8511         case SPRO_CLEAR:
8512                 /* send msg to other side */
8513
8514                 mtx_lock(&lun->lun_lock);
8515                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8516                 lun->pr_res_type = 0;
8517                 lun->pr_key_count = 0;
8518                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8519
8520                 ctl_clr_prkey(lun, residx);
8521                 for (i = 0; i < CTL_MAX_INITIATORS; i++)
8522                         if (ctl_get_prkey(lun, i) != 0) {
8523                                 ctl_clr_prkey(lun, i);
8524                                 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8525                         }
8526                 lun->pr_generation++;
8527                 mtx_unlock(&lun->lun_lock);
8528
8529                 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8530                 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8531                 persis_io.pr.pr_info.action = CTL_PR_CLEAR;
8532                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8533                      sizeof(persis_io.pr), M_WAITOK);
8534                 break;
8535
8536         case SPRO_PREEMPT:
8537         case SPRO_PRE_ABO: {
8538                 int nretval;
8539
8540                 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type,
8541                                           residx, ctsio, cdb, param);
8542                 if (nretval != 0)
8543                         return (CTL_RETVAL_COMPLETE);
8544                 break;
8545         }
8546         default:
8547                 panic("%s: Invalid PR type %#x", __func__, cdb->action);
8548         }
8549
8550 done:
8551         free(ctsio->kern_data_ptr, M_CTL);
8552         ctl_set_success(ctsio);
8553         ctl_done((union ctl_io *)ctsio);
8554
8555         return (retval);
8556 }
8557
8558 /*
8559  * This routine is for handling a message from the other SC pertaining to
8560  * persistent reserve out. All the error checking will have been done
8561  * so only perorming the action need be done here to keep the two
8562  * in sync.
8563  */
8564 static void
8565 ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
8566 {
8567         struct ctl_softc *softc = control_softc;
8568         struct ctl_lun *lun;
8569         int i;
8570         uint32_t residx, targ_lun;
8571
8572         targ_lun = msg->hdr.nexus.targ_mapped_lun;
8573         mtx_lock(&softc->ctl_lock);
8574         if ((targ_lun >= CTL_MAX_LUNS) ||
8575             ((lun = softc->ctl_luns[targ_lun]) == NULL)) {
8576                 mtx_unlock(&softc->ctl_lock);
8577                 return;
8578         }
8579         mtx_lock(&lun->lun_lock);
8580         mtx_unlock(&softc->ctl_lock);
8581         if (lun->flags & CTL_LUN_DISABLED) {
8582                 mtx_unlock(&lun->lun_lock);
8583                 return;
8584         }
8585         residx = ctl_get_initindex(&msg->hdr.nexus);
8586         switch(msg->pr.pr_info.action) {
8587         case CTL_PR_REG_KEY:
8588                 ctl_alloc_prkey(lun, msg->pr.pr_info.residx);
8589                 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0)
8590                         lun->pr_key_count++;
8591                 ctl_set_prkey(lun, msg->pr.pr_info.residx,
8592                     scsi_8btou64(msg->pr.pr_info.sa_res_key));
8593                 lun->pr_generation++;
8594                 break;
8595
8596         case CTL_PR_UNREG_KEY:
8597                 ctl_clr_prkey(lun, msg->pr.pr_info.residx);
8598                 lun->pr_key_count--;
8599
8600                 /* XXX Need to see if the reservation has been released */
8601                 /* if so do we need to generate UA? */
8602                 if (msg->pr.pr_info.residx == lun->pr_res_idx) {
8603                         lun->flags &= ~CTL_LUN_PR_RESERVED;
8604                         lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8605
8606                         if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO ||
8607                              lun->pr_res_type == SPR_TYPE_EX_AC_RO) &&
8608                             lun->pr_key_count) {
8609                                 /*
8610                                  * If the reservation is a registrants
8611                                  * only type we need to generate a UA
8612                                  * for other registered inits.  The
8613                                  * sense code should be RESERVATIONS
8614                                  * RELEASED
8615                                  */
8616
8617                                 for (i = softc->init_min; i < softc->init_max; i++) {
8618                                         if (ctl_get_prkey(lun, i) == 0)
8619                                                 continue;
8620
8621                                         ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
8622                                 }
8623                         }
8624                         lun->pr_res_type = 0;
8625                 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
8626                         if (lun->pr_key_count==0) {
8627                                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8628                                 lun->pr_res_type = 0;
8629                                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8630                         }
8631                 }
8632                 lun->pr_generation++;
8633                 break;
8634
8635         case CTL_PR_RESERVE:
8636                 lun->flags |= CTL_LUN_PR_RESERVED;
8637                 lun->pr_res_type = msg->pr.pr_info.res_type;
8638                 lun->pr_res_idx = msg->pr.pr_info.residx;
8639
8640                 break;
8641
8642         case CTL_PR_RELEASE:
8643                 /*
8644                  * if this isn't an exclusive access res generate UA for all
8645                  * other registrants.
8646                  */
8647                 if (lun->pr_res_type != SPR_TYPE_EX_AC &&
8648                     lun->pr_res_type != SPR_TYPE_WR_EX) {
8649                         for (i = softc->init_min; i < softc->init_max; i++)
8650                                 if (i == residx || ctl_get_prkey(lun, i) == 0)
8651                                         continue;
8652                                 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
8653                 }
8654
8655                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8656                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8657                 lun->pr_res_type = 0;
8658                 break;
8659
8660         case CTL_PR_PREEMPT:
8661                 ctl_pro_preempt_other(lun, msg);
8662                 break;
8663         case CTL_PR_CLEAR:
8664                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8665                 lun->pr_res_type = 0;
8666                 lun->pr_key_count = 0;
8667                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8668
8669                 for (i=0; i < CTL_MAX_INITIATORS; i++) {
8670                         if (ctl_get_prkey(lun, i) == 0)
8671                                 continue;
8672                         ctl_clr_prkey(lun, i);
8673                         ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT);
8674                 }
8675                 lun->pr_generation++;
8676                 break;
8677         }
8678
8679         mtx_unlock(&lun->lun_lock);
8680 }
8681
8682 int
8683 ctl_read_write(struct ctl_scsiio *ctsio)
8684 {
8685         struct ctl_lun *lun;
8686         struct ctl_lba_len_flags *lbalen;
8687         uint64_t lba;
8688         uint32_t num_blocks;
8689         int flags, retval;
8690         int isread;
8691
8692         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8693
8694         CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0]));
8695
8696         flags = 0;
8697         isread = ctsio->cdb[0] == READ_6  || ctsio->cdb[0] == READ_10
8698               || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16;
8699         switch (ctsio->cdb[0]) {
8700         case READ_6:
8701         case WRITE_6: {
8702                 struct scsi_rw_6 *cdb;
8703
8704                 cdb = (struct scsi_rw_6 *)ctsio->cdb;
8705
8706                 lba = scsi_3btoul(cdb->addr);
8707                 /* only 5 bits are valid in the most significant address byte */
8708                 lba &= 0x1fffff;
8709                 num_blocks = cdb->length;
8710                 /*
8711                  * This is correct according to SBC-2.
8712                  */
8713                 if (num_blocks == 0)
8714                         num_blocks = 256;
8715                 break;
8716         }
8717         case READ_10:
8718         case WRITE_10: {
8719                 struct scsi_rw_10 *cdb;
8720
8721                 cdb = (struct scsi_rw_10 *)ctsio->cdb;
8722                 if (cdb->byte2 & SRW10_FUA)
8723                         flags |= CTL_LLF_FUA;
8724                 if (cdb->byte2 & SRW10_DPO)
8725                         flags |= CTL_LLF_DPO;
8726                 lba = scsi_4btoul(cdb->addr);
8727                 num_blocks = scsi_2btoul(cdb->length);
8728                 break;
8729         }
8730         case WRITE_VERIFY_10: {
8731                 struct scsi_write_verify_10 *cdb;
8732
8733                 cdb = (struct scsi_write_verify_10 *)ctsio->cdb;
8734                 flags |= CTL_LLF_FUA;
8735                 if (cdb->byte2 & SWV_DPO)
8736                         flags |= CTL_LLF_DPO;
8737                 lba = scsi_4btoul(cdb->addr);
8738                 num_blocks = scsi_2btoul(cdb->length);
8739                 break;
8740         }
8741         case READ_12:
8742         case WRITE_12: {
8743                 struct scsi_rw_12 *cdb;
8744
8745                 cdb = (struct scsi_rw_12 *)ctsio->cdb;
8746                 if (cdb->byte2 & SRW12_FUA)
8747                         flags |= CTL_LLF_FUA;
8748                 if (cdb->byte2 & SRW12_DPO)
8749                         flags |= CTL_LLF_DPO;
8750                 lba = scsi_4btoul(cdb->addr);
8751                 num_blocks = scsi_4btoul(cdb->length);
8752                 break;
8753         }
8754         case WRITE_VERIFY_12: {
8755                 struct scsi_write_verify_12 *cdb;
8756
8757                 cdb = (struct scsi_write_verify_12 *)ctsio->cdb;
8758                 flags |= CTL_LLF_FUA;
8759                 if (cdb->byte2 & SWV_DPO)
8760                         flags |= CTL_LLF_DPO;
8761                 lba = scsi_4btoul(cdb->addr);
8762                 num_blocks = scsi_4btoul(cdb->length);
8763                 break;
8764         }
8765         case READ_16:
8766         case WRITE_16: {
8767                 struct scsi_rw_16 *cdb;
8768
8769                 cdb = (struct scsi_rw_16 *)ctsio->cdb;
8770                 if (cdb->byte2 & SRW12_FUA)
8771                         flags |= CTL_LLF_FUA;
8772                 if (cdb->byte2 & SRW12_DPO)
8773                         flags |= CTL_LLF_DPO;
8774                 lba = scsi_8btou64(cdb->addr);
8775                 num_blocks = scsi_4btoul(cdb->length);
8776                 break;
8777         }
8778         case WRITE_ATOMIC_16: {
8779                 struct scsi_write_atomic_16 *cdb;
8780
8781                 if (lun->be_lun->atomicblock == 0) {
8782                         ctl_set_invalid_opcode(ctsio);
8783                         ctl_done((union ctl_io *)ctsio);
8784                         return (CTL_RETVAL_COMPLETE);
8785                 }
8786
8787                 cdb = (struct scsi_write_atomic_16 *)ctsio->cdb;
8788                 if (cdb->byte2 & SRW12_FUA)
8789                         flags |= CTL_LLF_FUA;
8790                 if (cdb->byte2 & SRW12_DPO)
8791                         flags |= CTL_LLF_DPO;
8792                 lba = scsi_8btou64(cdb->addr);
8793                 num_blocks = scsi_2btoul(cdb->length);
8794                 if (num_blocks > lun->be_lun->atomicblock) {
8795                         ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
8796                             /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0,
8797                             /*bit*/ 0);
8798                         ctl_done((union ctl_io *)ctsio);
8799                         return (CTL_RETVAL_COMPLETE);
8800                 }
8801                 break;
8802         }
8803         case WRITE_VERIFY_16: {
8804                 struct scsi_write_verify_16 *cdb;
8805
8806                 cdb = (struct scsi_write_verify_16 *)ctsio->cdb;
8807                 flags |= CTL_LLF_FUA;
8808                 if (cdb->byte2 & SWV_DPO)
8809                         flags |= CTL_LLF_DPO;
8810                 lba = scsi_8btou64(cdb->addr);
8811                 num_blocks = scsi_4btoul(cdb->length);
8812                 break;
8813         }
8814         default:
8815                 /*
8816                  * We got a command we don't support.  This shouldn't
8817                  * happen, commands should be filtered out above us.
8818                  */
8819                 ctl_set_invalid_opcode(ctsio);
8820                 ctl_done((union ctl_io *)ctsio);
8821
8822                 return (CTL_RETVAL_COMPLETE);
8823                 break; /* NOTREACHED */
8824         }
8825
8826         /*
8827          * The first check is to make sure we're in bounds, the second
8828          * check is to catch wrap-around problems.  If the lba + num blocks
8829          * is less than the lba, then we've wrapped around and the block
8830          * range is invalid anyway.
8831          */
8832         if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
8833          || ((lba + num_blocks) < lba)) {
8834                 ctl_set_lba_out_of_range(ctsio);
8835                 ctl_done((union ctl_io *)ctsio);
8836                 return (CTL_RETVAL_COMPLETE);
8837         }
8838
8839         /*
8840          * According to SBC-3, a transfer length of 0 is not an error.
8841          * Note that this cannot happen with WRITE(6) or READ(6), since 0
8842          * translates to 256 blocks for those commands.
8843          */
8844         if (num_blocks == 0) {
8845                 ctl_set_success(ctsio);
8846                 ctl_done((union ctl_io *)ctsio);
8847                 return (CTL_RETVAL_COMPLETE);
8848         }
8849
8850         /* Set FUA and/or DPO if caches are disabled. */
8851         if (isread) {
8852                 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 &
8853                     SCP_RCD) != 0)
8854                         flags |= CTL_LLF_FUA | CTL_LLF_DPO;
8855         } else {
8856                 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 &
8857                     SCP_WCE) == 0)
8858                         flags |= CTL_LLF_FUA;
8859         }
8860
8861         lbalen = (struct ctl_lba_len_flags *)
8862             &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
8863         lbalen->lba = lba;
8864         lbalen->len = num_blocks;
8865         lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags;
8866
8867         ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize;
8868         ctsio->kern_rel_offset = 0;
8869
8870         CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n"));
8871
8872         retval = lun->backend->data_submit((union ctl_io *)ctsio);
8873         return (retval);
8874 }
8875
8876 static int
8877 ctl_cnw_cont(union ctl_io *io)
8878 {
8879         struct ctl_scsiio *ctsio;
8880         struct ctl_lun *lun;
8881         struct ctl_lba_len_flags *lbalen;
8882         int retval;
8883
8884         ctsio = &io->scsiio;
8885         ctsio->io_hdr.status = CTL_STATUS_NONE;
8886         ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT;
8887         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8888         lbalen = (struct ctl_lba_len_flags *)
8889             &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
8890         lbalen->flags &= ~CTL_LLF_COMPARE;
8891         lbalen->flags |= CTL_LLF_WRITE;
8892
8893         CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n"));
8894         retval = lun->backend->data_submit((union ctl_io *)ctsio);
8895         return (retval);
8896 }
8897
8898 int
8899 ctl_cnw(struct ctl_scsiio *ctsio)
8900 {
8901         struct ctl_lun *lun;
8902         struct ctl_lba_len_flags *lbalen;
8903         uint64_t lba;
8904         uint32_t num_blocks;
8905         int flags, retval;
8906
8907         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8908
8909         CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0]));
8910
8911         flags = 0;
8912         switch (ctsio->cdb[0]) {
8913         case COMPARE_AND_WRITE: {
8914                 struct scsi_compare_and_write *cdb;
8915
8916                 cdb = (struct scsi_compare_and_write *)ctsio->cdb;
8917                 if (cdb->byte2 & SRW10_FUA)
8918                         flags |= CTL_LLF_FUA;
8919                 if (cdb->byte2 & SRW10_DPO)
8920                         flags |= CTL_LLF_DPO;
8921                 lba = scsi_8btou64(cdb->addr);
8922                 num_blocks = cdb->length;
8923                 break;
8924         }
8925         default:
8926                 /*
8927                  * We got a command we don't support.  This shouldn't
8928                  * happen, commands should be filtered out above us.
8929                  */
8930                 ctl_set_invalid_opcode(ctsio);
8931                 ctl_done((union ctl_io *)ctsio);
8932
8933                 return (CTL_RETVAL_COMPLETE);
8934                 break; /* NOTREACHED */
8935         }
8936
8937         /*
8938          * The first check is to make sure we're in bounds, the second
8939          * check is to catch wrap-around problems.  If the lba + num blocks
8940          * is less than the lba, then we've wrapped around and the block
8941          * range is invalid anyway.
8942          */
8943         if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
8944          || ((lba + num_blocks) < lba)) {
8945                 ctl_set_lba_out_of_range(ctsio);
8946                 ctl_done((union ctl_io *)ctsio);
8947                 return (CTL_RETVAL_COMPLETE);
8948         }
8949
8950         /*
8951          * According to SBC-3, a transfer length of 0 is not an error.
8952          */
8953         if (num_blocks == 0) {
8954                 ctl_set_success(ctsio);
8955                 ctl_done((union ctl_io *)ctsio);
8956                 return (CTL_RETVAL_COMPLETE);
8957         }
8958
8959         /* Set FUA if write cache is disabled. */
8960         if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 &
8961             SCP_WCE) == 0)
8962                 flags |= CTL_LLF_FUA;
8963
8964         ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize;
8965         ctsio->kern_rel_offset = 0;
8966
8967         /*
8968          * Set the IO_CONT flag, so that if this I/O gets passed to
8969          * ctl_data_submit_done(), it'll get passed back to
8970          * ctl_ctl_cnw_cont() for further processing.
8971          */
8972         ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT;
8973         ctsio->io_cont = ctl_cnw_cont;
8974
8975         lbalen = (struct ctl_lba_len_flags *)
8976             &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
8977         lbalen->lba = lba;
8978         lbalen->len = num_blocks;
8979         lbalen->flags = CTL_LLF_COMPARE | flags;
8980
8981         CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n"));
8982         retval = lun->backend->data_submit((union ctl_io *)ctsio);
8983         return (retval);
8984 }
8985
8986 int
8987 ctl_verify(struct ctl_scsiio *ctsio)
8988 {
8989         struct ctl_lun *lun;
8990         struct ctl_lba_len_flags *lbalen;
8991         uint64_t lba;
8992         uint32_t num_blocks;
8993         int bytchk, flags;
8994         int retval;
8995
8996         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8997
8998         CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0]));
8999
9000         bytchk = 0;
9001         flags = CTL_LLF_FUA;
9002         switch (ctsio->cdb[0]) {
9003         case VERIFY_10: {
9004                 struct scsi_verify_10 *cdb;
9005
9006                 cdb = (struct scsi_verify_10 *)ctsio->cdb;
9007                 if (cdb->byte2 & SVFY_BYTCHK)
9008                         bytchk = 1;
9009                 if (cdb->byte2 & SVFY_DPO)
9010                         flags |= CTL_LLF_DPO;
9011                 lba = scsi_4btoul(cdb->addr);
9012                 num_blocks = scsi_2btoul(cdb->length);
9013                 break;
9014         }
9015         case VERIFY_12: {
9016                 struct scsi_verify_12 *cdb;
9017
9018                 cdb = (struct scsi_verify_12 *)ctsio->cdb;
9019                 if (cdb->byte2 & SVFY_BYTCHK)
9020                         bytchk = 1;
9021                 if (cdb->byte2 & SVFY_DPO)
9022                         flags |= CTL_LLF_DPO;
9023                 lba = scsi_4btoul(cdb->addr);
9024                 num_blocks = scsi_4btoul(cdb->length);
9025                 break;
9026         }
9027         case VERIFY_16: {
9028                 struct scsi_rw_16 *cdb;
9029
9030                 cdb = (struct scsi_rw_16 *)ctsio->cdb;
9031                 if (cdb->byte2 & SVFY_BYTCHK)
9032                         bytchk = 1;
9033                 if (cdb->byte2 & SVFY_DPO)
9034                         flags |= CTL_LLF_DPO;
9035                 lba = scsi_8btou64(cdb->addr);
9036                 num_blocks = scsi_4btoul(cdb->length);
9037                 break;
9038         }
9039         default:
9040                 /*
9041                  * We got a command we don't support.  This shouldn't
9042                  * happen, commands should be filtered out above us.
9043                  */
9044                 ctl_set_invalid_opcode(ctsio);
9045                 ctl_done((union ctl_io *)ctsio);
9046                 return (CTL_RETVAL_COMPLETE);
9047         }
9048
9049         /*
9050          * The first check is to make sure we're in bounds, the second
9051          * check is to catch wrap-around problems.  If the lba + num blocks
9052          * is less than the lba, then we've wrapped around and the block
9053          * range is invalid anyway.
9054          */
9055         if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
9056          || ((lba + num_blocks) < lba)) {
9057                 ctl_set_lba_out_of_range(ctsio);
9058                 ctl_done((union ctl_io *)ctsio);
9059                 return (CTL_RETVAL_COMPLETE);
9060         }
9061
9062         /*
9063          * According to SBC-3, a transfer length of 0 is not an error.
9064          */
9065         if (num_blocks == 0) {
9066                 ctl_set_success(ctsio);
9067                 ctl_done((union ctl_io *)ctsio);
9068                 return (CTL_RETVAL_COMPLETE);
9069         }
9070
9071         lbalen = (struct ctl_lba_len_flags *)
9072             &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
9073         lbalen->lba = lba;
9074         lbalen->len = num_blocks;
9075         if (bytchk) {
9076                 lbalen->flags = CTL_LLF_COMPARE | flags;
9077                 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize;
9078         } else {
9079                 lbalen->flags = CTL_LLF_VERIFY | flags;
9080                 ctsio->kern_total_len = 0;
9081         }
9082         ctsio->kern_rel_offset = 0;
9083
9084         CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n"));
9085         retval = lun->backend->data_submit((union ctl_io *)ctsio);
9086         return (retval);
9087 }
9088
9089 int
9090 ctl_report_luns(struct ctl_scsiio *ctsio)
9091 {
9092         struct ctl_softc *softc;
9093         struct scsi_report_luns *cdb;
9094         struct scsi_report_luns_data *lun_data;
9095         struct ctl_lun *lun, *request_lun;
9096         struct ctl_port *port;
9097         int num_luns, retval;
9098         uint32_t alloc_len, lun_datalen;
9099         int num_filled;
9100         uint32_t initidx, targ_lun_id, lun_id;
9101
9102         retval = CTL_RETVAL_COMPLETE;
9103         cdb = (struct scsi_report_luns *)ctsio->cdb;
9104         port = ctl_io_port(&ctsio->io_hdr);
9105         softc = port->ctl_softc;
9106
9107         CTL_DEBUG_PRINT(("ctl_report_luns\n"));
9108
9109         mtx_lock(&softc->ctl_lock);
9110         num_luns = 0;
9111         for (targ_lun_id = 0; targ_lun_id < CTL_MAX_LUNS; targ_lun_id++) {
9112                 if (ctl_lun_map_from_port(port, targ_lun_id) < CTL_MAX_LUNS)
9113                         num_luns++;
9114         }
9115         mtx_unlock(&softc->ctl_lock);
9116
9117         switch (cdb->select_report) {
9118         case RPL_REPORT_DEFAULT:
9119         case RPL_REPORT_ALL:
9120         case RPL_REPORT_NONSUBSID:
9121                 break;
9122         case RPL_REPORT_WELLKNOWN:
9123         case RPL_REPORT_ADMIN:
9124         case RPL_REPORT_CONGLOM:
9125                 num_luns = 0;
9126                 break;
9127         default:
9128                 ctl_set_invalid_field(ctsio,
9129                                       /*sks_valid*/ 1,
9130                                       /*command*/ 1,
9131                                       /*field*/ 2,
9132                                       /*bit_valid*/ 0,
9133                                       /*bit*/ 0);
9134                 ctl_done((union ctl_io *)ctsio);
9135                 return (retval);
9136                 break; /* NOTREACHED */
9137         }
9138
9139         alloc_len = scsi_4btoul(cdb->length);
9140         /*
9141          * The initiator has to allocate at least 16 bytes for this request,
9142          * so he can at least get the header and the first LUN.  Otherwise
9143          * we reject the request (per SPC-3 rev 14, section 6.21).
9144          */
9145         if (alloc_len < (sizeof(struct scsi_report_luns_data) +
9146             sizeof(struct scsi_report_luns_lundata))) {
9147                 ctl_set_invalid_field(ctsio,
9148                                       /*sks_valid*/ 1,
9149                                       /*command*/ 1,
9150                                       /*field*/ 6,
9151                                       /*bit_valid*/ 0,
9152                                       /*bit*/ 0);
9153                 ctl_done((union ctl_io *)ctsio);
9154                 return (retval);
9155         }
9156
9157         request_lun = (struct ctl_lun *)
9158                 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9159
9160         lun_datalen = sizeof(*lun_data) +
9161                 (num_luns * sizeof(struct scsi_report_luns_lundata));
9162
9163         ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO);
9164         lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr;
9165         ctsio->kern_sg_entries = 0;
9166
9167         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
9168
9169         mtx_lock(&softc->ctl_lock);
9170         for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) {
9171                 lun_id = ctl_lun_map_from_port(port, targ_lun_id);
9172                 if (lun_id >= CTL_MAX_LUNS)
9173                         continue;
9174                 lun = softc->ctl_luns[lun_id];
9175                 if (lun == NULL)
9176                         continue;
9177
9178                 be64enc(lun_data->luns[num_filled++].lundata,
9179                     ctl_encode_lun(targ_lun_id));
9180
9181                 /*
9182                  * According to SPC-3, rev 14 section 6.21:
9183                  *
9184                  * "The execution of a REPORT LUNS command to any valid and
9185                  * installed logical unit shall clear the REPORTED LUNS DATA
9186                  * HAS CHANGED unit attention condition for all logical
9187                  * units of that target with respect to the requesting
9188                  * initiator. A valid and installed logical unit is one
9189                  * having a PERIPHERAL QUALIFIER of 000b in the standard
9190                  * INQUIRY data (see 6.4.2)."
9191                  *
9192                  * If request_lun is NULL, the LUN this report luns command
9193                  * was issued to is either disabled or doesn't exist. In that
9194                  * case, we shouldn't clear any pending lun change unit
9195                  * attention.
9196                  */
9197                 if (request_lun != NULL) {
9198                         mtx_lock(&lun->lun_lock);
9199                         ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE);
9200                         mtx_unlock(&lun->lun_lock);
9201                 }
9202         }
9203         mtx_unlock(&softc->ctl_lock);
9204
9205         /*
9206          * It's quite possible that we've returned fewer LUNs than we allocated
9207          * space for.  Trim it.
9208          */
9209         lun_datalen = sizeof(*lun_data) +
9210                 (num_filled * sizeof(struct scsi_report_luns_lundata));
9211
9212         if (lun_datalen < alloc_len) {
9213                 ctsio->residual = alloc_len - lun_datalen;
9214                 ctsio->kern_data_len = lun_datalen;
9215                 ctsio->kern_total_len = lun_datalen;
9216         } else {
9217                 ctsio->residual = 0;
9218                 ctsio->kern_data_len = alloc_len;
9219                 ctsio->kern_total_len = alloc_len;
9220         }
9221         ctsio->kern_data_resid = 0;
9222         ctsio->kern_rel_offset = 0;
9223         ctsio->kern_sg_entries = 0;
9224
9225         /*
9226          * We set this to the actual data length, regardless of how much
9227          * space we actually have to return results.  If the user looks at
9228          * this value, he'll know whether or not he allocated enough space
9229          * and reissue the command if necessary.  We don't support well
9230          * known logical units, so if the user asks for that, return none.
9231          */
9232         scsi_ulto4b(lun_datalen - 8, lun_data->length);
9233
9234         /*
9235          * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy
9236          * this request.
9237          */
9238         ctl_set_success(ctsio);
9239         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9240         ctsio->be_move_done = ctl_config_move_done;
9241         ctl_datamove((union ctl_io *)ctsio);
9242         return (retval);
9243 }
9244
9245 int
9246 ctl_request_sense(struct ctl_scsiio *ctsio)
9247 {
9248         struct scsi_request_sense *cdb;
9249         struct scsi_sense_data *sense_ptr;
9250         struct ctl_softc *ctl_softc;
9251         struct ctl_lun *lun;
9252         uint32_t initidx;
9253         int have_error;
9254         scsi_sense_data_type sense_format;
9255         ctl_ua_type ua_type;
9256
9257         cdb = (struct scsi_request_sense *)ctsio->cdb;
9258
9259         ctl_softc = control_softc;
9260         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9261
9262         CTL_DEBUG_PRINT(("ctl_request_sense\n"));
9263
9264         /*
9265          * Determine which sense format the user wants.
9266          */
9267         if (cdb->byte2 & SRS_DESC)
9268                 sense_format = SSD_TYPE_DESC;
9269         else
9270                 sense_format = SSD_TYPE_FIXED;
9271
9272         ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK);
9273         sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr;
9274         ctsio->kern_sg_entries = 0;
9275
9276         /*
9277          * struct scsi_sense_data, which is currently set to 256 bytes, is
9278          * larger than the largest allowed value for the length field in the
9279          * REQUEST SENSE CDB, which is 252 bytes as of SPC-4.
9280          */
9281         ctsio->residual = 0;
9282         ctsio->kern_data_len = cdb->length;
9283         ctsio->kern_total_len = cdb->length;
9284
9285         ctsio->kern_data_resid = 0;
9286         ctsio->kern_rel_offset = 0;
9287         ctsio->kern_sg_entries = 0;
9288
9289         /*
9290          * If we don't have a LUN, we don't have any pending sense.
9291          */
9292         if (lun == NULL)
9293                 goto no_sense;
9294
9295         have_error = 0;
9296         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
9297         /*
9298          * Check for pending sense, and then for pending unit attentions.
9299          * Pending sense gets returned first, then pending unit attentions.
9300          */
9301         mtx_lock(&lun->lun_lock);
9302 #ifdef CTL_WITH_CA
9303         if (ctl_is_set(lun->have_ca, initidx)) {
9304                 scsi_sense_data_type stored_format;
9305
9306                 /*
9307                  * Check to see which sense format was used for the stored
9308                  * sense data.
9309                  */
9310                 stored_format = scsi_sense_type(&lun->pending_sense[initidx]);
9311
9312                 /*
9313                  * If the user requested a different sense format than the
9314                  * one we stored, then we need to convert it to the other
9315                  * format.  If we're going from descriptor to fixed format
9316                  * sense data, we may lose things in translation, depending
9317                  * on what options were used.
9318                  *
9319                  * If the stored format is SSD_TYPE_NONE (i.e. invalid),
9320                  * for some reason we'll just copy it out as-is.
9321                  */
9322                 if ((stored_format == SSD_TYPE_FIXED)
9323                  && (sense_format == SSD_TYPE_DESC))
9324                         ctl_sense_to_desc((struct scsi_sense_data_fixed *)
9325                             &lun->pending_sense[initidx],
9326                             (struct scsi_sense_data_desc *)sense_ptr);
9327                 else if ((stored_format == SSD_TYPE_DESC)
9328                       && (sense_format == SSD_TYPE_FIXED))
9329                         ctl_sense_to_fixed((struct scsi_sense_data_desc *)
9330                             &lun->pending_sense[initidx],
9331                             (struct scsi_sense_data_fixed *)sense_ptr);
9332                 else
9333                         memcpy(sense_ptr, &lun->pending_sense[initidx],
9334                                MIN(sizeof(*sense_ptr),
9335                                sizeof(lun->pending_sense[initidx])));
9336
9337                 ctl_clear_mask(lun->have_ca, initidx);
9338                 have_error = 1;
9339         } else
9340 #endif
9341         {
9342                 ua_type = ctl_build_ua(lun, initidx, sense_ptr, sense_format);
9343                 if (ua_type != CTL_UA_NONE)
9344                         have_error = 1;
9345                 if (ua_type == CTL_UA_LUN_CHANGE) {
9346                         mtx_unlock(&lun->lun_lock);
9347                         mtx_lock(&ctl_softc->ctl_lock);
9348                         ctl_clr_ua_allluns(ctl_softc, initidx, ua_type);
9349                         mtx_unlock(&ctl_softc->ctl_lock);
9350                         mtx_lock(&lun->lun_lock);
9351                 }
9352
9353         }
9354         mtx_unlock(&lun->lun_lock);
9355
9356         /*
9357          * We already have a pending error, return it.
9358          */
9359         if (have_error != 0) {
9360                 /*
9361                  * We report the SCSI status as OK, since the status of the
9362                  * request sense command itself is OK.
9363                  * We report 0 for the sense length, because we aren't doing
9364                  * autosense in this case.  We're reporting sense as
9365                  * parameter data.
9366                  */
9367                 ctl_set_success(ctsio);
9368                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9369                 ctsio->be_move_done = ctl_config_move_done;
9370                 ctl_datamove((union ctl_io *)ctsio);
9371                 return (CTL_RETVAL_COMPLETE);
9372         }
9373
9374 no_sense:
9375
9376         /*
9377          * No sense information to report, so we report that everything is
9378          * okay.
9379          */
9380         ctl_set_sense_data(sense_ptr,
9381                            lun,
9382                            sense_format,
9383                            /*current_error*/ 1,
9384                            /*sense_key*/ SSD_KEY_NO_SENSE,
9385                            /*asc*/ 0x00,
9386                            /*ascq*/ 0x00,
9387                            SSD_ELEM_NONE);
9388
9389         /*
9390          * We report 0 for the sense length, because we aren't doing
9391          * autosense in this case.  We're reporting sense as parameter data.
9392          */
9393         ctl_set_success(ctsio);
9394         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9395         ctsio->be_move_done = ctl_config_move_done;
9396         ctl_datamove((union ctl_io *)ctsio);
9397         return (CTL_RETVAL_COMPLETE);
9398 }
9399
9400 int
9401 ctl_tur(struct ctl_scsiio *ctsio)
9402 {
9403
9404         CTL_DEBUG_PRINT(("ctl_tur\n"));
9405
9406         ctl_set_success(ctsio);
9407         ctl_done((union ctl_io *)ctsio);
9408
9409         return (CTL_RETVAL_COMPLETE);
9410 }
9411
9412 /*
9413  * SCSI VPD page 0x00, the Supported VPD Pages page.
9414  */
9415 static int
9416 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len)
9417 {
9418         struct scsi_vpd_supported_pages *pages;
9419         int sup_page_size;
9420         struct ctl_lun *lun;
9421         int p;
9422
9423         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9424
9425         sup_page_size = sizeof(struct scsi_vpd_supported_pages) *
9426             SCSI_EVPD_NUM_SUPPORTED_PAGES;
9427         ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO);
9428         pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr;
9429         ctsio->kern_sg_entries = 0;
9430
9431         if (sup_page_size < alloc_len) {
9432                 ctsio->residual = alloc_len - sup_page_size;
9433                 ctsio->kern_data_len = sup_page_size;
9434                 ctsio->kern_total_len = sup_page_size;
9435         } else {
9436                 ctsio->residual = 0;
9437                 ctsio->kern_data_len = alloc_len;
9438                 ctsio->kern_total_len = alloc_len;
9439         }
9440         ctsio->kern_data_resid = 0;
9441         ctsio->kern_rel_offset = 0;
9442         ctsio->kern_sg_entries = 0;
9443
9444         /*
9445          * The control device is always connected.  The disk device, on the
9446          * other hand, may not be online all the time.  Need to change this
9447          * to figure out whether the disk device is actually online or not.
9448          */
9449         if (lun != NULL)
9450                 pages->device = (SID_QUAL_LU_CONNECTED << 5) |
9451                                 lun->be_lun->lun_type;
9452         else
9453                 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9454
9455         p = 0;
9456         /* Supported VPD pages */
9457         pages->page_list[p++] = SVPD_SUPPORTED_PAGES;
9458         /* Serial Number */
9459         pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER;
9460         /* Device Identification */
9461         pages->page_list[p++] = SVPD_DEVICE_ID;
9462         /* Extended INQUIRY Data */
9463         pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA;
9464         /* Mode Page Policy */
9465         pages->page_list[p++] = SVPD_MODE_PAGE_POLICY;
9466         /* SCSI Ports */
9467         pages->page_list[p++] = SVPD_SCSI_PORTS;
9468         /* Third-party Copy */
9469         pages->page_list[p++] = SVPD_SCSI_TPC;
9470         if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) {
9471                 /* Block limits */
9472                 pages->page_list[p++] = SVPD_BLOCK_LIMITS;
9473                 /* Block Device Characteristics */
9474                 pages->page_list[p++] = SVPD_BDC;
9475                 /* Logical Block Provisioning */
9476                 pages->page_list[p++] = SVPD_LBP;
9477         }
9478         pages->length = p;
9479
9480         ctl_set_success(ctsio);
9481         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9482         ctsio->be_move_done = ctl_config_move_done;
9483         ctl_datamove((union ctl_io *)ctsio);
9484         return (CTL_RETVAL_COMPLETE);
9485 }
9486
9487 /*
9488  * SCSI VPD page 0x80, the Unit Serial Number page.
9489  */
9490 static int
9491 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
9492 {
9493         struct scsi_vpd_unit_serial_number *sn_ptr;
9494         struct ctl_lun *lun;
9495         int data_len;
9496
9497         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9498
9499         data_len = 4 + CTL_SN_LEN;
9500         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9501         sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr;
9502         if (data_len < alloc_len) {
9503                 ctsio->residual = alloc_len - data_len;
9504                 ctsio->kern_data_len = data_len;
9505                 ctsio->kern_total_len = data_len;
9506         } else {
9507                 ctsio->residual = 0;
9508                 ctsio->kern_data_len = alloc_len;
9509                 ctsio->kern_total_len = alloc_len;
9510         }
9511         ctsio->kern_data_resid = 0;
9512         ctsio->kern_rel_offset = 0;
9513         ctsio->kern_sg_entries = 0;
9514
9515         /*
9516          * The control device is always connected.  The disk device, on the
9517          * other hand, may not be online all the time.  Need to change this
9518          * to figure out whether the disk device is actually online or not.
9519          */
9520         if (lun != NULL)
9521                 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9522                                   lun->be_lun->lun_type;
9523         else
9524                 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9525
9526         sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER;
9527         sn_ptr->length = CTL_SN_LEN;
9528         /*
9529          * If we don't have a LUN, we just leave the serial number as
9530          * all spaces.
9531          */
9532         if (lun != NULL) {
9533                 strncpy((char *)sn_ptr->serial_num,
9534                         (char *)lun->be_lun->serial_num, CTL_SN_LEN);
9535         } else
9536                 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN);
9537
9538         ctl_set_success(ctsio);
9539         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9540         ctsio->be_move_done = ctl_config_move_done;
9541         ctl_datamove((union ctl_io *)ctsio);
9542         return (CTL_RETVAL_COMPLETE);
9543 }
9544
9545
9546 /*
9547  * SCSI VPD page 0x86, the Extended INQUIRY Data page.
9548  */
9549 static int
9550 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len)
9551 {
9552         struct scsi_vpd_extended_inquiry_data *eid_ptr;
9553         struct ctl_lun *lun;
9554         int data_len;
9555
9556         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9557
9558         data_len = sizeof(struct scsi_vpd_extended_inquiry_data);
9559         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9560         eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr;
9561         ctsio->kern_sg_entries = 0;
9562
9563         if (data_len < alloc_len) {
9564                 ctsio->residual = alloc_len - data_len;
9565                 ctsio->kern_data_len = data_len;
9566                 ctsio->kern_total_len = data_len;
9567         } else {
9568                 ctsio->residual = 0;
9569                 ctsio->kern_data_len = alloc_len;
9570                 ctsio->kern_total_len = alloc_len;
9571         }
9572         ctsio->kern_data_resid = 0;
9573         ctsio->kern_rel_offset = 0;
9574         ctsio->kern_sg_entries = 0;
9575
9576         /*
9577          * The control device is always connected.  The disk device, on the
9578          * other hand, may not be online all the time.
9579          */
9580         if (lun != NULL)
9581                 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9582                                      lun->be_lun->lun_type;
9583         else
9584                 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9585         eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA;
9586         scsi_ulto2b(data_len - 4, eid_ptr->page_length);
9587         /*
9588          * We support head of queue, ordered and simple tags.
9589          */
9590         eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP;
9591         /*
9592          * Volatile cache supported.
9593          */
9594         eid_ptr->flags3 = SVPD_EID_V_SUP;
9595
9596         /*
9597          * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit
9598          * attention for a particular IT nexus on all LUNs once we report
9599          * it to that nexus once.  This bit is required as of SPC-4.
9600          */
9601         eid_ptr->flags4 = SVPD_EID_LUICLT;
9602
9603         /*
9604          * XXX KDM in order to correctly answer this, we would need
9605          * information from the SIM to determine how much sense data it
9606          * can send.  So this would really be a path inquiry field, most
9607          * likely.  This can be set to a maximum of 252 according to SPC-4,
9608          * but the hardware may or may not be able to support that much.
9609          * 0 just means that the maximum sense data length is not reported.
9610          */
9611         eid_ptr->max_sense_length = 0;
9612
9613         ctl_set_success(ctsio);
9614         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9615         ctsio->be_move_done = ctl_config_move_done;
9616         ctl_datamove((union ctl_io *)ctsio);
9617         return (CTL_RETVAL_COMPLETE);
9618 }
9619
9620 static int
9621 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len)
9622 {
9623         struct scsi_vpd_mode_page_policy *mpp_ptr;
9624         struct ctl_lun *lun;
9625         int data_len;
9626
9627         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9628
9629         data_len = sizeof(struct scsi_vpd_mode_page_policy) +
9630             sizeof(struct scsi_vpd_mode_page_policy_descr);
9631
9632         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9633         mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr;
9634         ctsio->kern_sg_entries = 0;
9635
9636         if (data_len < alloc_len) {
9637                 ctsio->residual = alloc_len - data_len;
9638                 ctsio->kern_data_len = data_len;
9639                 ctsio->kern_total_len = data_len;
9640         } else {
9641                 ctsio->residual = 0;
9642                 ctsio->kern_data_len = alloc_len;
9643                 ctsio->kern_total_len = alloc_len;
9644         }
9645         ctsio->kern_data_resid = 0;
9646         ctsio->kern_rel_offset = 0;
9647         ctsio->kern_sg_entries = 0;
9648
9649         /*
9650          * The control device is always connected.  The disk device, on the
9651          * other hand, may not be online all the time.
9652          */
9653         if (lun != NULL)
9654                 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9655                                      lun->be_lun->lun_type;
9656         else
9657                 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9658         mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY;
9659         scsi_ulto2b(data_len - 4, mpp_ptr->page_length);
9660         mpp_ptr->descr[0].page_code = 0x3f;
9661         mpp_ptr->descr[0].subpage_code = 0xff;
9662         mpp_ptr->descr[0].policy = SVPD_MPP_SHARED;
9663
9664         ctl_set_success(ctsio);
9665         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9666         ctsio->be_move_done = ctl_config_move_done;
9667         ctl_datamove((union ctl_io *)ctsio);
9668         return (CTL_RETVAL_COMPLETE);
9669 }
9670
9671 /*
9672  * SCSI VPD page 0x83, the Device Identification page.
9673  */
9674 static int
9675 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
9676 {
9677         struct scsi_vpd_device_id *devid_ptr;
9678         struct scsi_vpd_id_descriptor *desc;
9679         struct ctl_softc *softc;
9680         struct ctl_lun *lun;
9681         struct ctl_port *port;
9682         int data_len, g;
9683         uint8_t proto;
9684
9685         softc = control_softc;
9686
9687         port = ctl_io_port(&ctsio->io_hdr);
9688         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9689
9690         data_len = sizeof(struct scsi_vpd_device_id) +
9691             sizeof(struct scsi_vpd_id_descriptor) +
9692                 sizeof(struct scsi_vpd_id_rel_trgt_port_id) +
9693             sizeof(struct scsi_vpd_id_descriptor) +
9694                 sizeof(struct scsi_vpd_id_trgt_port_grp_id);
9695         if (lun && lun->lun_devid)
9696                 data_len += lun->lun_devid->len;
9697         if (port && port->port_devid)
9698                 data_len += port->port_devid->len;
9699         if (port && port->target_devid)
9700                 data_len += port->target_devid->len;
9701
9702         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9703         devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr;
9704         ctsio->kern_sg_entries = 0;
9705
9706         if (data_len < alloc_len) {
9707                 ctsio->residual = alloc_len - data_len;
9708                 ctsio->kern_data_len = data_len;
9709                 ctsio->kern_total_len = data_len;
9710         } else {
9711                 ctsio->residual = 0;
9712                 ctsio->kern_data_len = alloc_len;
9713                 ctsio->kern_total_len = alloc_len;
9714         }
9715         ctsio->kern_data_resid = 0;
9716         ctsio->kern_rel_offset = 0;
9717         ctsio->kern_sg_entries = 0;
9718
9719         /*
9720          * The control device is always connected.  The disk device, on the
9721          * other hand, may not be online all the time.
9722          */
9723         if (lun != NULL)
9724                 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9725                                      lun->be_lun->lun_type;
9726         else
9727                 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9728         devid_ptr->page_code = SVPD_DEVICE_ID;
9729         scsi_ulto2b(data_len - 4, devid_ptr->length);
9730
9731         if (port && port->port_type == CTL_PORT_FC)
9732                 proto = SCSI_PROTO_FC << 4;
9733         else if (port && port->port_type == CTL_PORT_ISCSI)
9734                 proto = SCSI_PROTO_ISCSI << 4;
9735         else
9736                 proto = SCSI_PROTO_SPI << 4;
9737         desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list;
9738
9739         /*
9740          * We're using a LUN association here.  i.e., this device ID is a
9741          * per-LUN identifier.
9742          */
9743         if (lun && lun->lun_devid) {
9744                 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len);
9745                 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc +
9746                     lun->lun_devid->len);
9747         }
9748
9749         /*
9750          * This is for the WWPN which is a port association.
9751          */
9752         if (port && port->port_devid) {
9753                 memcpy(desc, port->port_devid->data, port->port_devid->len);
9754                 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc +
9755                     port->port_devid->len);
9756         }
9757
9758         /*
9759          * This is for the Relative Target Port(type 4h) identifier
9760          */
9761         desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
9762         desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
9763             SVPD_ID_TYPE_RELTARG;
9764         desc->length = 4;
9765         scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]);
9766         desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
9767             sizeof(struct scsi_vpd_id_rel_trgt_port_id));
9768
9769         /*
9770          * This is for the Target Port Group(type 5h) identifier
9771          */
9772         desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
9773         desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
9774             SVPD_ID_TYPE_TPORTGRP;
9775         desc->length = 4;
9776         if (softc->is_single ||
9777             (port && port->status & CTL_PORT_STATUS_HA_SHARED))
9778                 g = 1;
9779         else
9780                 g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt;
9781         scsi_ulto2b(g, &desc->identifier[2]);
9782         desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
9783             sizeof(struct scsi_vpd_id_trgt_port_grp_id));
9784
9785         /*
9786          * This is for the Target identifier
9787          */
9788         if (port && port->target_devid) {
9789                 memcpy(desc, port->target_devid->data, port->target_devid->len);
9790         }
9791
9792         ctl_set_success(ctsio);
9793         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9794         ctsio->be_move_done = ctl_config_move_done;
9795         ctl_datamove((union ctl_io *)ctsio);
9796         return (CTL_RETVAL_COMPLETE);
9797 }
9798
9799 static int
9800 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
9801 {
9802         struct ctl_softc *softc = control_softc;
9803         struct scsi_vpd_scsi_ports *sp;
9804         struct scsi_vpd_port_designation *pd;
9805         struct scsi_vpd_port_designation_cont *pdc;
9806         struct ctl_lun *lun;
9807         struct ctl_port *port;
9808         int data_len, num_target_ports, iid_len, id_len;
9809
9810         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9811
9812         num_target_ports = 0;
9813         iid_len = 0;
9814         id_len = 0;
9815         mtx_lock(&softc->ctl_lock);
9816         STAILQ_FOREACH(port, &softc->port_list, links) {
9817                 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
9818                         continue;
9819                 if (lun != NULL &&
9820                     ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
9821                         continue;
9822                 num_target_ports++;
9823                 if (port->init_devid)
9824                         iid_len += port->init_devid->len;
9825                 if (port->port_devid)
9826                         id_len += port->port_devid->len;
9827         }
9828         mtx_unlock(&softc->ctl_lock);
9829
9830         data_len = sizeof(struct scsi_vpd_scsi_ports) +
9831             num_target_ports * (sizeof(struct scsi_vpd_port_designation) +
9832              sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len;
9833         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9834         sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr;
9835         ctsio->kern_sg_entries = 0;
9836
9837         if (data_len < alloc_len) {
9838                 ctsio->residual = alloc_len - data_len;
9839                 ctsio->kern_data_len = data_len;
9840                 ctsio->kern_total_len = data_len;
9841         } else {
9842                 ctsio->residual = 0;
9843                 ctsio->kern_data_len = alloc_len;
9844                 ctsio->kern_total_len = alloc_len;
9845         }
9846         ctsio->kern_data_resid = 0;
9847         ctsio->kern_rel_offset = 0;
9848         ctsio->kern_sg_entries = 0;
9849
9850         /*
9851          * The control device is always connected.  The disk device, on the
9852          * other hand, may not be online all the time.  Need to change this
9853          * to figure out whether the disk device is actually online or not.
9854          */
9855         if (lun != NULL)
9856                 sp->device = (SID_QUAL_LU_CONNECTED << 5) |
9857                                   lun->be_lun->lun_type;
9858         else
9859                 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9860
9861         sp->page_code = SVPD_SCSI_PORTS;
9862         scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports),
9863             sp->page_length);
9864         pd = &sp->design[0];
9865
9866         mtx_lock(&softc->ctl_lock);
9867         STAILQ_FOREACH(port, &softc->port_list, links) {
9868                 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
9869                         continue;
9870                 if (lun != NULL &&
9871                     ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
9872                         continue;
9873                 scsi_ulto2b(port->targ_port, pd->relative_port_id);
9874                 if (port->init_devid) {
9875                         iid_len = port->init_devid->len;
9876                         memcpy(pd->initiator_transportid,
9877                             port->init_devid->data, port->init_devid->len);
9878                 } else
9879                         iid_len = 0;
9880                 scsi_ulto2b(iid_len, pd->initiator_transportid_length);
9881                 pdc = (struct scsi_vpd_port_designation_cont *)
9882                     (&pd->initiator_transportid[iid_len]);
9883                 if (port->port_devid) {
9884                         id_len = port->port_devid->len;
9885                         memcpy(pdc->target_port_descriptors,
9886                             port->port_devid->data, port->port_devid->len);
9887                 } else
9888                         id_len = 0;
9889                 scsi_ulto2b(id_len, pdc->target_port_descriptors_length);
9890                 pd = (struct scsi_vpd_port_designation *)
9891                     ((uint8_t *)pdc->target_port_descriptors + id_len);
9892         }
9893         mtx_unlock(&softc->ctl_lock);
9894
9895         ctl_set_success(ctsio);
9896         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9897         ctsio->be_move_done = ctl_config_move_done;
9898         ctl_datamove((union ctl_io *)ctsio);
9899         return (CTL_RETVAL_COMPLETE);
9900 }
9901
9902 static int
9903 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len)
9904 {
9905         struct scsi_vpd_block_limits *bl_ptr;
9906         struct ctl_lun *lun;
9907
9908         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9909
9910         ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO);
9911         bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr;
9912         ctsio->kern_sg_entries = 0;
9913
9914         if (sizeof(*bl_ptr) < alloc_len) {
9915                 ctsio->residual = alloc_len - sizeof(*bl_ptr);
9916                 ctsio->kern_data_len = sizeof(*bl_ptr);
9917                 ctsio->kern_total_len = sizeof(*bl_ptr);
9918         } else {
9919                 ctsio->residual = 0;
9920                 ctsio->kern_data_len = alloc_len;
9921                 ctsio->kern_total_len = alloc_len;
9922         }
9923         ctsio->kern_data_resid = 0;
9924         ctsio->kern_rel_offset = 0;
9925         ctsio->kern_sg_entries = 0;
9926
9927         /*
9928          * The control device is always connected.  The disk device, on the
9929          * other hand, may not be online all the time.  Need to change this
9930          * to figure out whether the disk device is actually online or not.
9931          */
9932         if (lun != NULL)
9933                 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9934                                   lun->be_lun->lun_type;
9935         else
9936                 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9937
9938         bl_ptr->page_code = SVPD_BLOCK_LIMITS;
9939         scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length);
9940         bl_ptr->max_cmp_write_len = 0xff;
9941         scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len);
9942         if (lun != NULL) {
9943                 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len);
9944                 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
9945                         scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt);
9946                         scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt);
9947                         if (lun->be_lun->ublockexp != 0) {
9948                                 scsi_ulto4b((1 << lun->be_lun->ublockexp),
9949                                     bl_ptr->opt_unmap_grain);
9950                                 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff,
9951                                     bl_ptr->unmap_grain_align);
9952                         }
9953                 }
9954                 scsi_ulto4b(lun->be_lun->atomicblock,
9955                     bl_ptr->max_atomic_transfer_length);
9956                 scsi_ulto4b(0, bl_ptr->atomic_alignment);
9957                 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity);
9958                 scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary);
9959                 scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size);
9960         }
9961         scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length);
9962
9963         ctl_set_success(ctsio);
9964         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9965         ctsio->be_move_done = ctl_config_move_done;
9966         ctl_datamove((union ctl_io *)ctsio);
9967         return (CTL_RETVAL_COMPLETE);
9968 }
9969
9970 static int
9971 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len)
9972 {
9973         struct scsi_vpd_block_device_characteristics *bdc_ptr;
9974         struct ctl_lun *lun;
9975         const char *value;
9976         u_int i;
9977
9978         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9979
9980         ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO);
9981         bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr;
9982         ctsio->kern_sg_entries = 0;
9983
9984         if (sizeof(*bdc_ptr) < alloc_len) {
9985                 ctsio->residual = alloc_len - sizeof(*bdc_ptr);
9986                 ctsio->kern_data_len = sizeof(*bdc_ptr);
9987                 ctsio->kern_total_len = sizeof(*bdc_ptr);
9988         } else {
9989                 ctsio->residual = 0;
9990                 ctsio->kern_data_len = alloc_len;
9991                 ctsio->kern_total_len = alloc_len;
9992         }
9993         ctsio->kern_data_resid = 0;
9994         ctsio->kern_rel_offset = 0;
9995         ctsio->kern_sg_entries = 0;
9996
9997         /*
9998          * The control device is always connected.  The disk device, on the
9999          * other hand, may not be online all the time.  Need to change this
10000          * to figure out whether the disk device is actually online or not.
10001          */
10002         if (lun != NULL)
10003                 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
10004                                   lun->be_lun->lun_type;
10005         else
10006                 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
10007         bdc_ptr->page_code = SVPD_BDC;
10008         scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length);
10009         if (lun != NULL &&
10010             (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL)
10011                 i = strtol(value, NULL, 0);
10012         else
10013                 i = CTL_DEFAULT_ROTATION_RATE;
10014         scsi_ulto2b(i, bdc_ptr->medium_rotation_rate);
10015         if (lun != NULL &&
10016             (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL)
10017                 i = strtol(value, NULL, 0);
10018         else
10019                 i = 0;
10020         bdc_ptr->wab_wac_ff = (i & 0x0f);
10021         bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS;
10022
10023         ctl_set_success(ctsio);
10024         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10025         ctsio->be_move_done = ctl_config_move_done;
10026         ctl_datamove((union ctl_io *)ctsio);
10027         return (CTL_RETVAL_COMPLETE);
10028 }
10029
10030 static int
10031 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len)
10032 {
10033         struct scsi_vpd_logical_block_prov *lbp_ptr;
10034         struct ctl_lun *lun;
10035
10036         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10037
10038         ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO);
10039         lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr;
10040         ctsio->kern_sg_entries = 0;
10041
10042         if (sizeof(*lbp_ptr) < alloc_len) {
10043                 ctsio->residual = alloc_len - sizeof(*lbp_ptr);
10044                 ctsio->kern_data_len = sizeof(*lbp_ptr);
10045                 ctsio->kern_total_len = sizeof(*lbp_ptr);
10046         } else {
10047                 ctsio->residual = 0;
10048                 ctsio->kern_data_len = alloc_len;
10049                 ctsio->kern_total_len = alloc_len;
10050         }
10051         ctsio->kern_data_resid = 0;
10052         ctsio->kern_rel_offset = 0;
10053         ctsio->kern_sg_entries = 0;
10054
10055         /*
10056          * The control device is always connected.  The disk device, on the
10057          * other hand, may not be online all the time.  Need to change this
10058          * to figure out whether the disk device is actually online or not.
10059          */
10060         if (lun != NULL)
10061                 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
10062                                   lun->be_lun->lun_type;
10063         else
10064                 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
10065
10066         lbp_ptr->page_code = SVPD_LBP;
10067         scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length);
10068         lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT;
10069         if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
10070                 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 |
10071                     SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP;
10072                 lbp_ptr->prov_type = SVPD_LBP_THIN;
10073         }
10074
10075         ctl_set_success(ctsio);
10076         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10077         ctsio->be_move_done = ctl_config_move_done;
10078         ctl_datamove((union ctl_io *)ctsio);
10079         return (CTL_RETVAL_COMPLETE);
10080 }
10081
10082 /*
10083  * INQUIRY with the EVPD bit set.
10084  */
10085 static int
10086 ctl_inquiry_evpd(struct ctl_scsiio *ctsio)
10087 {
10088         struct ctl_lun *lun;
10089         struct scsi_inquiry *cdb;
10090         int alloc_len, retval;
10091
10092         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10093         cdb = (struct scsi_inquiry *)ctsio->cdb;
10094         alloc_len = scsi_2btoul(cdb->length);
10095
10096         switch (cdb->page_code) {
10097         case SVPD_SUPPORTED_PAGES:
10098                 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len);
10099                 break;
10100         case SVPD_UNIT_SERIAL_NUMBER:
10101                 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len);
10102                 break;
10103         case SVPD_DEVICE_ID:
10104                 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len);
10105                 break;
10106         case SVPD_EXTENDED_INQUIRY_DATA:
10107                 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len);
10108                 break;
10109         case SVPD_MODE_PAGE_POLICY:
10110                 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len);
10111                 break;
10112         case SVPD_SCSI_PORTS:
10113                 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len);
10114                 break;
10115         case SVPD_SCSI_TPC:
10116                 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len);
10117                 break;
10118         case SVPD_BLOCK_LIMITS:
10119                 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT)
10120                         goto err;
10121                 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len);
10122                 break;
10123         case SVPD_BDC:
10124                 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT)
10125                         goto err;
10126                 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len);
10127                 break;
10128         case SVPD_LBP:
10129                 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT)
10130                         goto err;
10131                 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len);
10132                 break;
10133         default:
10134 err:
10135                 ctl_set_invalid_field(ctsio,
10136                                       /*sks_valid*/ 1,
10137                                       /*command*/ 1,
10138                                       /*field*/ 2,
10139                                       /*bit_valid*/ 0,
10140                                       /*bit*/ 0);
10141                 ctl_done((union ctl_io *)ctsio);
10142                 retval = CTL_RETVAL_COMPLETE;
10143                 break;
10144         }
10145
10146         return (retval);
10147 }
10148
10149 /*
10150  * Standard INQUIRY data.
10151  */
10152 static int
10153 ctl_inquiry_std(struct ctl_scsiio *ctsio)
10154 {
10155         struct scsi_inquiry_data *inq_ptr;
10156         struct scsi_inquiry *cdb;
10157         struct ctl_softc *softc = control_softc;
10158         struct ctl_port *port;
10159         struct ctl_lun *lun;
10160         char *val;
10161         uint32_t alloc_len, data_len;
10162         ctl_port_type port_type;
10163
10164         port = ctl_io_port(&ctsio->io_hdr);
10165         port_type = port->port_type;
10166         if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL)
10167                 port_type = CTL_PORT_SCSI;
10168
10169         lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10170         cdb = (struct scsi_inquiry *)ctsio->cdb;
10171         alloc_len = scsi_2btoul(cdb->length);
10172
10173         /*
10174          * We malloc the full inquiry data size here and fill it
10175          * in.  If the user only asks for less, we'll give him
10176          * that much.
10177          */
10178         data_len = offsetof(struct scsi_inquiry_data, vendor_specific1);
10179         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10180         inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr;
10181         ctsio->kern_sg_entries = 0;
10182         ctsio->kern_data_resid = 0;
10183         ctsio->kern_rel_offset = 0;
10184
10185         if (data_len < alloc_len) {
10186                 ctsio->residual = alloc_len - data_len;
10187                 ctsio->kern_data_len = data_len;
10188                 ctsio->kern_total_len = data_len;
10189         } else {
10190                 ctsio->residual = 0;
10191                 ctsio->kern_data_len = alloc_len;
10192                 ctsio->kern_total_len = alloc_len;
10193         }
10194
10195         if (lun != NULL) {
10196                 if ((lun->flags & CTL_LUN_PRIMARY_SC) ||
10197                     softc->ha_link >= CTL_HA_LINK_UNKNOWN) {
10198                         inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
10199                             lun->be_lun->lun_type;
10200                 } else {
10201                         inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) |
10202                             lun->be_lun->lun_type;
10203                 }
10204                 if (lun->flags & CTL_LUN_REMOVABLE)
10205                         inq_ptr->dev_qual2 |= SID_RMB;
10206         } else
10207                 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE;
10208
10209         /* RMB in byte 2 is 0 */
10210         inq_ptr->version = SCSI_REV_SPC4;
10211
10212         /*
10213          * According to SAM-3, even if a device only supports a single
10214          * level of LUN addressing, it should still set the HISUP bit:
10215          *
10216          * 4.9.1 Logical unit numbers overview
10217          *
10218          * All logical unit number formats described in this standard are
10219          * hierarchical in structure even when only a single level in that
10220          * hierarchy is used. The HISUP bit shall be set to one in the
10221          * standard INQUIRY data (see SPC-2) when any logical unit number
10222          * format described in this standard is used.  Non-hierarchical
10223          * formats are outside the scope of this standard.
10224          *
10225          * Therefore we set the HiSup bit here.
10226          *
10227          * The reponse format is 2, per SPC-3.
10228          */
10229         inq_ptr->response_format = SID_HiSup | 2;
10230
10231         inq_ptr->additional_length = data_len -
10232             (offsetof(struct scsi_inquiry_data, additional_length) + 1);
10233         CTL_DEBUG_PRINT(("additional_length = %d\n",
10234                          inq_ptr->additional_length));
10235
10236         inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT;
10237         if (port_type == CTL_PORT_SCSI)
10238                 inq_ptr->spc2_flags = SPC2_SID_ADDR16;
10239         inq_ptr->spc2_flags |= SPC2_SID_MultiP;
10240         inq_ptr->flags = SID_CmdQue;
10241         if (port_type == CTL_PORT_SCSI)
10242                 inq_ptr->flags |= SID_WBus16 | SID_Sync;
10243
10244         /*
10245          * Per SPC-3, unused bytes in ASCII strings are filled with spaces.
10246          * We have 8 bytes for the vendor name, and 16 bytes for the device
10247          * name and 4 bytes for the revision.
10248          */
10249         if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options,
10250             "vendor")) == NULL) {
10251                 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor));
10252         } else {
10253                 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor));
10254                 strncpy(inq_ptr->vendor, val,
10255                     min(sizeof(inq_ptr->vendor), strlen(val)));
10256         }
10257         if (lun == NULL) {
10258                 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT,
10259                     sizeof(inq_ptr->product));
10260         } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) {
10261                 switch (lun->be_lun->lun_type) {
10262                 case T_DIRECT:
10263                         strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT,
10264                             sizeof(inq_ptr->product));
10265                         break;
10266                 case T_PROCESSOR:
10267                         strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT,
10268                             sizeof(inq_ptr->product));
10269                         break;
10270                 case T_CDROM:
10271                         strncpy(inq_ptr->product, CTL_CDROM_PRODUCT,
10272                             sizeof(inq_ptr->product));
10273                         break;
10274                 default:
10275                         strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT,
10276                             sizeof(inq_ptr->product));
10277                         break;
10278                 }
10279         } else {
10280                 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product));
10281                 strncpy(inq_ptr->product, val,
10282                     min(sizeof(inq_ptr->product), strlen(val)));
10283         }
10284
10285         /*
10286          * XXX make this a macro somewhere so it automatically gets
10287          * incremented when we make changes.
10288          */
10289         if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options,
10290             "revision")) == NULL) {
10291                 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision));
10292         } else {
10293                 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision));
10294                 strncpy(inq_ptr->revision, val,
10295                     min(sizeof(inq_ptr->revision), strlen(val)));
10296         }
10297
10298         /*
10299          * For parallel SCSI, we support double transition and single
10300          * transition clocking.  We also support QAS (Quick Arbitration
10301          * and Selection) and Information Unit transfers on both the
10302          * control and array devices.
10303          */
10304         if (port_type == CTL_PORT_SCSI)
10305                 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS |
10306                                     SID_SPI_IUS;
10307
10308         /* SAM-5 (no version claimed) */
10309         scsi_ulto2b(0x00A0, inq_ptr->version1);
10310         /* SPC-4 (no version claimed) */
10311         scsi_ulto2b(0x0460, inq_ptr->version2);
10312         if (port_type == CTL_PORT_FC) {
10313                 /* FCP-2 ANSI INCITS.350:2003 */
10314                 scsi_ulto2b(0x0917, inq_ptr->version3);
10315         } else if (port_type == CTL_PORT_SCSI) {
10316                 /* SPI-4 ANSI INCITS.362:200x */
10317                 scsi_ulto2b(0x0B56, inq_ptr->version3);
10318         } else if (port_type == CTL_PORT_ISCSI) {
10319                 /* iSCSI (no version claimed) */
10320                 scsi_ulto2b(0x0960, inq_ptr->version3);
10321         } else if (port_type == CTL_PORT_SAS) {
10322                 /* SAS (no version claimed) */
10323                 scsi_ulto2b(0x0BE0, inq_ptr->version3);
10324         }
10325
10326         if (lun == NULL) {
10327                 /* SBC-4 (no version claimed) */
10328                 scsi_ulto2b(0x0600, inq_ptr->version4);
10329         } else {
10330                 switch (lun->be_lun->lun_type) {
10331                 case T_DIRECT:
10332                         /* SBC-4 (no version claimed) */
10333                         scsi_ulto2b(0x0600, inq_ptr->version4);
10334                         break;
10335                 case T_PROCESSOR:
10336                         break;
10337                 case T_CDROM:
10338                         /* MMC-6 (no version claimed) */
10339                         scsi_ulto2b(0x04E0, inq_ptr->version4);
10340                         break;
10341                 default:
10342                         break;
10343                 }
10344         }
10345
10346         ctl_set_success(ctsio);
10347         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10348         ctsio->be_move_done = ctl_config_move_done;
10349         ctl_datamove((union ctl_io *)ctsio);
10350         return (CTL_RETVAL_COMPLETE);
10351 }
10352
10353 int
10354 ctl_inquiry(struct ctl_scsiio *ctsio)
10355 {
10356         struct scsi_inquiry *cdb;
10357         int retval;
10358
10359         CTL_DEBUG_PRINT(("ctl_inquiry\n"));
10360
10361         cdb = (struct scsi_inquiry *)ctsio->cdb;
10362         if (cdb->byte2 & SI_EVPD)
10363                 retval = ctl_inquiry_evpd(ctsio);
10364         else if (cdb->page_code == 0)
10365                 retval = ctl_inquiry_std(ctsio);
10366         else {
10367                 ctl_set_invalid_field(ctsio,
10368                                       /*sks_valid*/ 1,
10369                                       /*command*/ 1,
10370                                       /*field*/ 2,
10371                                       /*bit_valid*/ 0,
10372                                       /*bit*/ 0);
10373                 ctl_done((union ctl_io *)ctsio);
10374                 return (CTL_RETVAL_COMPLETE);
10375         }
10376
10377         return (retval);
10378 }
10379
10380 int
10381 ctl_get_config(struct ctl_scsiio *ctsio)
10382 {
10383         struct scsi_get_config_header *hdr;
10384         struct scsi_get_config_feature *feature;
10385         struct scsi_get_config *cdb;
10386         struct ctl_lun *lun;
10387         uint32_t alloc_len, data_len;
10388         int rt, starting;
10389
10390         lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10391         cdb = (struct scsi_get_config *)ctsio->cdb;
10392         rt = (cdb->rt & SGC_RT_MASK);
10393         starting = scsi_2btoul(cdb->starting_feature);
10394         alloc_len = scsi_2btoul(cdb->length);
10395
10396         data_len = sizeof(struct scsi_get_config_header) +
10397             sizeof(struct scsi_get_config_feature) + 8 +
10398             sizeof(struct scsi_get_config_feature) + 8 +
10399             sizeof(struct scsi_get_config_feature) + 4 +
10400             sizeof(struct scsi_get_config_feature) + 4 +
10401             sizeof(struct scsi_get_config_feature) + 8 +
10402             sizeof(struct scsi_get_config_feature) +
10403             sizeof(struct scsi_get_config_feature) + 4 +
10404             sizeof(struct scsi_get_config_feature) + 4 +
10405             sizeof(struct scsi_get_config_feature) + 4 +
10406             sizeof(struct scsi_get_config_feature) + 4 +
10407             sizeof(struct scsi_get_config_feature) + 4 +
10408             sizeof(struct scsi_get_config_feature) + 4;
10409         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10410         ctsio->kern_sg_entries = 0;
10411         ctsio->kern_data_resid = 0;
10412         ctsio->kern_rel_offset = 0;
10413
10414         hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr;
10415         if (lun->flags & CTL_LUN_NO_MEDIA)
10416                 scsi_ulto2b(0x0000, hdr->current_profile);
10417         else
10418                 scsi_ulto2b(0x0010, hdr->current_profile);
10419         feature = (struct scsi_get_config_feature *)(hdr + 1);
10420
10421         if (starting > 0x003b)
10422                 goto done;
10423         if (starting > 0x003a)
10424                 goto f3b;
10425         if (starting > 0x002b)
10426                 goto f3a;
10427         if (starting > 0x002a)
10428                 goto f2b;
10429         if (starting > 0x001f)
10430                 goto f2a;
10431         if (starting > 0x001e)
10432                 goto f1f;
10433         if (starting > 0x001d)
10434                 goto f1e;
10435         if (starting > 0x0010)
10436                 goto f1d;
10437         if (starting > 0x0003)
10438                 goto f10;
10439         if (starting > 0x0002)
10440                 goto f3;
10441         if (starting > 0x0001)
10442                 goto f2;
10443         if (starting > 0x0000)
10444                 goto f1;
10445
10446         /* Profile List */
10447         scsi_ulto2b(0x0000, feature->feature_code);
10448         feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT;
10449         feature->add_length = 8;
10450         scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */
10451         feature->feature_data[2] = 0x00;
10452         scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */
10453         feature->feature_data[6] = 0x01;
10454         feature = (struct scsi_get_config_feature *)
10455             &feature->feature_data[feature->add_length];
10456
10457 f1:     /* Core */
10458         scsi_ulto2b(0x0001, feature->feature_code);
10459         feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT;
10460         feature->add_length = 8;
10461         scsi_ulto4b(0x00000000, &feature->feature_data[0]);
10462         feature->feature_data[4] = 0x03;
10463         feature = (struct scsi_get_config_feature *)
10464             &feature->feature_data[feature->add_length];
10465
10466 f2:     /* Morphing */
10467         scsi_ulto2b(0x0002, feature->feature_code);
10468         feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT;
10469         feature->add_length = 4;
10470         feature->feature_data[0] = 0x02;
10471         feature = (struct scsi_get_config_feature *)
10472             &feature->feature_data[feature->add_length];
10473
10474 f3:     /* Removable Medium */
10475         scsi_ulto2b(0x0003, feature->feature_code);
10476         feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT;
10477         feature->add_length = 4;
10478         feature->feature_data[0] = 0x39;
10479         feature = (struct scsi_get_config_feature *)
10480             &feature->feature_data[feature->add_length];
10481
10482         if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA))
10483                 goto done;
10484
10485 f10:    /* Random Read */
10486         scsi_ulto2b(0x0010, feature->feature_code);
10487         feature->flags = 0x00;
10488         if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10489                 feature->flags |= SGC_F_CURRENT;
10490         feature->add_length = 8;
10491         scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]);
10492         scsi_ulto2b(1, &feature->feature_data[4]);
10493         feature->feature_data[6] = 0x00;
10494         feature = (struct scsi_get_config_feature *)
10495             &feature->feature_data[feature->add_length];
10496
10497 f1d:    /* Multi-Read */
10498         scsi_ulto2b(0x001D, feature->feature_code);
10499         feature->flags = 0x00;
10500         if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10501                 feature->flags |= SGC_F_CURRENT;
10502         feature->add_length = 0;
10503         feature = (struct scsi_get_config_feature *)
10504             &feature->feature_data[feature->add_length];
10505
10506 f1e:    /* CD Read */
10507         scsi_ulto2b(0x001E, feature->feature_code);
10508         feature->flags = 0x00;
10509         if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10510                 feature->flags |= SGC_F_CURRENT;
10511         feature->add_length = 4;
10512         feature->feature_data[0] = 0x00;
10513         feature = (struct scsi_get_config_feature *)
10514             &feature->feature_data[feature->add_length];
10515
10516 f1f:    /* DVD Read */
10517         scsi_ulto2b(0x001F, feature->feature_code);
10518         feature->flags = 0x08;
10519         if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10520                 feature->flags |= SGC_F_CURRENT;
10521         feature->add_length = 4;
10522         feature->feature_data[0] = 0x01;
10523         feature->feature_data[2] = 0x03;
10524         feature = (struct scsi_get_config_feature *)
10525             &feature->feature_data[feature->add_length];
10526
10527 f2a:    /* DVD+RW */
10528         scsi_ulto2b(0x002A, feature->feature_code);
10529         feature->flags = 0x04;
10530         if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10531                 feature->flags |= SGC_F_CURRENT;
10532         feature->add_length = 4;
10533         feature->feature_data[0] = 0x00;
10534         feature->feature_data[1] = 0x00;
10535         feature = (struct scsi_get_config_feature *)
10536             &feature->feature_data[feature->add_length];
10537
10538 f2b:    /* DVD+R */
10539         scsi_ulto2b(0x002B, feature->feature_code);
10540         feature->flags = 0x00;
10541         if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10542                 feature->flags |= SGC_F_CURRENT;
10543         feature->add_length = 4;
10544         feature->feature_data[0] = 0x00;
10545         feature = (struct scsi_get_config_feature *)
10546             &feature->feature_data[feature->add_length];
10547
10548 f3a:    /* DVD+RW Dual Layer */
10549         scsi_ulto2b(0x003A, feature->feature_code);
10550         feature->flags = 0x00;
10551         if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10552                 feature->flags |= SGC_F_CURRENT;
10553         feature->add_length = 4;
10554         feature->feature_data[0] = 0x00;
10555         feature->feature_data[1] = 0x00;
10556         feature = (struct scsi_get_config_feature *)
10557             &feature->feature_data[feature->add_length];
10558
10559 f3b:    /* DVD+R Dual Layer */
10560         scsi_ulto2b(0x003B, feature->feature_code);
10561         feature->flags = 0x00;
10562         if ((lun->flags & CTL_LUN_NO_MEDIA) == 0)
10563                 feature->flags |= SGC_F_CURRENT;
10564         feature->add_length = 4;
10565         feature->feature_data[0] = 0x00;
10566         feature = (struct scsi_get_config_feature *)
10567             &feature->feature_data[feature->add_length];
10568
10569 done:
10570         data_len = (uint8_t *)feature - (uint8_t *)hdr;
10571         if (rt == SGC_RT_SPECIFIC && data_len > 4) {
10572                 feature = (struct scsi_get_config_feature *)(hdr + 1);
10573                 if (scsi_2btoul(feature->feature_code) == starting)
10574                         feature = (struct scsi_get_config_feature *)
10575                             &feature->feature_data[feature->add_length];
10576                 data_len = (uint8_t *)feature - (uint8_t *)hdr;
10577         }
10578         scsi_ulto4b(data_len - 4, hdr->data_length);
10579         if (data_len < alloc_len) {
10580                 ctsio->residual = alloc_len - data_len;
10581                 ctsio->kern_data_len = data_len;
10582                 ctsio->kern_total_len = data_len;
10583         } else {
10584                 ctsio->residual = 0;
10585                 ctsio->kern_data_len = alloc_len;
10586                 ctsio->kern_total_len = alloc_len;
10587         }
10588
10589         ctl_set_success(ctsio);
10590         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10591         ctsio->be_move_done = ctl_config_move_done;
10592         ctl_datamove((union ctl_io *)ctsio);
10593         return (CTL_RETVAL_COMPLETE);
10594 }
10595
10596 int
10597 ctl_get_event_status(struct ctl_scsiio *ctsio)
10598 {
10599         struct scsi_get_event_status_header *hdr;
10600         struct scsi_get_event_status *cdb;
10601         struct ctl_lun *lun;
10602         uint32_t alloc_len, data_len;
10603         int notif_class;
10604
10605         lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10606         cdb = (struct scsi_get_event_status *)ctsio->cdb;
10607         if ((cdb->byte2 & SGESN_POLLED) == 0) {
10608                 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
10609                     /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0);
10610                 ctl_done((union ctl_io *)ctsio);
10611                 return (CTL_RETVAL_COMPLETE);
10612         }
10613         notif_class = cdb->notif_class;
10614         alloc_len = scsi_2btoul(cdb->length);
10615
10616         data_len = sizeof(struct scsi_get_event_status_header);
10617         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10618         ctsio->kern_sg_entries = 0;
10619         ctsio->kern_data_resid = 0;
10620         ctsio->kern_rel_offset = 0;
10621
10622         if (data_len < alloc_len) {
10623                 ctsio->residual = alloc_len - data_len;
10624                 ctsio->kern_data_len = data_len;
10625                 ctsio->kern_total_len = data_len;
10626         } else {
10627                 ctsio->residual = 0;
10628                 ctsio->kern_data_len = alloc_len;
10629                 ctsio->kern_total_len = alloc_len;
10630         }
10631
10632         hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr;
10633         scsi_ulto2b(0, hdr->descr_length);
10634         hdr->nea_class = SGESN_NEA;
10635         hdr->supported_class = 0;
10636
10637         ctl_set_success(ctsio);
10638         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10639         ctsio->be_move_done = ctl_config_move_done;
10640         ctl_datamove((union ctl_io *)ctsio);
10641         return (CTL_RETVAL_COMPLETE);
10642 }
10643
10644 int
10645 ctl_mechanism_status(struct ctl_scsiio *ctsio)
10646 {
10647         struct scsi_mechanism_status_header *hdr;
10648         struct scsi_mechanism_status *cdb;
10649         struct ctl_lun *lun;
10650         uint32_t alloc_len, data_len;
10651
10652         lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10653         cdb = (struct scsi_mechanism_status *)ctsio->cdb;
10654         alloc_len = scsi_2btoul(cdb->length);
10655
10656         data_len = sizeof(struct scsi_mechanism_status_header);
10657         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10658         ctsio->kern_sg_entries = 0;
10659         ctsio->kern_data_resid = 0;
10660         ctsio->kern_rel_offset = 0;
10661
10662         if (data_len < alloc_len) {
10663                 ctsio->residual = alloc_len - data_len;
10664                 ctsio->kern_data_len = data_len;
10665                 ctsio->kern_total_len = data_len;
10666         } else {
10667                 ctsio->residual = 0;
10668                 ctsio->kern_data_len = alloc_len;
10669                 ctsio->kern_total_len = alloc_len;
10670         }
10671
10672         hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr;
10673         hdr->state1 = 0x00;
10674         hdr->state2 = 0xe0;
10675         scsi_ulto3b(0, hdr->lba);
10676         hdr->slots_num = 0;
10677         scsi_ulto2b(0, hdr->slots_length);
10678
10679         ctl_set_success(ctsio);
10680         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10681         ctsio->be_move_done = ctl_config_move_done;
10682         ctl_datamove((union ctl_io *)ctsio);
10683         return (CTL_RETVAL_COMPLETE);
10684 }
10685
10686 static void
10687 ctl_ultomsf(uint32_t lba, uint8_t *buf)
10688 {
10689
10690         lba += 150;
10691         buf[0] = 0;
10692         buf[1] = bin2bcd((lba / 75) / 60);
10693         buf[2] = bin2bcd((lba / 75) % 60);
10694         buf[3] = bin2bcd(lba % 75);
10695 }
10696
10697 int
10698 ctl_read_toc(struct ctl_scsiio *ctsio)
10699 {
10700         struct scsi_read_toc_hdr *hdr;
10701         struct scsi_read_toc_type01_descr *descr;
10702         struct scsi_read_toc *cdb;
10703         struct ctl_lun *lun;
10704         uint32_t alloc_len, data_len;
10705         int format, msf;
10706
10707         lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10708         cdb = (struct scsi_read_toc *)ctsio->cdb;
10709         msf = (cdb->byte2 & CD_MSF) != 0;
10710         format = cdb->format;
10711         alloc_len = scsi_2btoul(cdb->data_len);
10712
10713         data_len = sizeof(struct scsi_read_toc_hdr);
10714         if (format == 0)
10715                 data_len += 2 * sizeof(struct scsi_read_toc_type01_descr);
10716         else
10717                 data_len += sizeof(struct scsi_read_toc_type01_descr);
10718         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10719         ctsio->kern_sg_entries = 0;
10720         ctsio->kern_data_resid = 0;
10721         ctsio->kern_rel_offset = 0;
10722
10723         if (data_len < alloc_len) {
10724                 ctsio->residual = alloc_len - data_len;
10725                 ctsio->kern_data_len = data_len;
10726                 ctsio->kern_total_len = data_len;
10727         } else {
10728                 ctsio->residual = 0;
10729                 ctsio->kern_data_len = alloc_len;
10730                 ctsio->kern_total_len = alloc_len;
10731         }
10732
10733         hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr;
10734         if (format == 0) {
10735                 scsi_ulto2b(0x12, hdr->data_length);
10736                 hdr->first = 1;
10737                 hdr->last = 1;
10738                 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1);
10739                 descr->addr_ctl = 0x14;
10740                 descr->track_number = 1;
10741                 if (msf)
10742                         ctl_ultomsf(0, descr->track_start);
10743                 else
10744                         scsi_ulto4b(0, descr->track_start);
10745                 descr++;
10746                 descr->addr_ctl = 0x14;
10747                 descr->track_number = 0xaa;
10748                 if (msf)
10749                         ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start);
10750                 else
10751                         scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start);
10752         } else {
10753                 scsi_ulto2b(0x0a, hdr->data_length);
10754                 hdr->first = 1;
10755                 hdr->last = 1;
10756                 descr = (struct scsi_read_toc_type01_descr *)(hdr + 1);
10757                 descr->addr_ctl = 0x14;
10758                 descr->track_number = 1;
10759                 if (msf)
10760                         ctl_ultomsf(0, descr->track_start);
10761                 else
10762                         scsi_ulto4b(0, descr->track_start);
10763         }
10764
10765         ctl_set_success(ctsio);
10766         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10767         ctsio->be_move_done = ctl_config_move_done;
10768         ctl_datamove((union ctl_io *)ctsio);
10769         return (CTL_RETVAL_COMPLETE);
10770 }
10771
10772 /*
10773  * For known CDB types, parse the LBA and length.
10774  */
10775 static int
10776 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len)
10777 {
10778         if (io->io_hdr.io_type != CTL_IO_SCSI)
10779                 return (1);
10780
10781         switch (io->scsiio.cdb[0]) {
10782         case COMPARE_AND_WRITE: {
10783                 struct scsi_compare_and_write *cdb;
10784
10785                 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb;
10786
10787                 *lba = scsi_8btou64(cdb->addr);
10788                 *len = cdb->length;
10789                 break;
10790         }
10791         case READ_6:
10792         case WRITE_6: {
10793                 struct scsi_rw_6 *cdb;
10794
10795                 cdb = (struct scsi_rw_6 *)io->scsiio.cdb;
10796
10797                 *lba = scsi_3btoul(cdb->addr);
10798                 /* only 5 bits are valid in the most significant address byte */
10799                 *lba &= 0x1fffff;
10800                 *len = cdb->length;
10801                 break;
10802         }
10803         case READ_10:
10804         case WRITE_10: {
10805                 struct scsi_rw_10 *cdb;
10806
10807                 cdb = (struct scsi_rw_10 *)io->scsiio.cdb;
10808
10809                 *lba = scsi_4btoul(cdb->addr);
10810                 *len = scsi_2btoul(cdb->length);
10811                 break;
10812         }
10813         case WRITE_VERIFY_10: {
10814                 struct scsi_write_verify_10 *cdb;
10815
10816                 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb;
10817
10818                 *lba = scsi_4btoul(cdb->addr);
10819                 *len = scsi_2btoul(cdb->length);
10820                 break;
10821         }
10822         case READ_12:
10823         case WRITE_12: {
10824                 struct scsi_rw_12 *cdb;
10825
10826                 cdb = (struct scsi_rw_12 *)io->scsiio.cdb;
10827
10828                 *lba = scsi_4btoul(cdb->addr);
10829                 *len = scsi_4btoul(cdb->length);
10830                 break;
10831         }
10832         case WRITE_VERIFY_12: {
10833                 struct scsi_write_verify_12 *cdb;
10834
10835                 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb;
10836
10837                 *lba = scsi_4btoul(cdb->addr);
10838                 *len = scsi_4btoul(cdb->length);
10839                 break;
10840         }
10841         case READ_16:
10842         case WRITE_16: {
10843                 struct scsi_rw_16 *cdb;
10844
10845                 cdb = (struct scsi_rw_16 *)io->scsiio.cdb;
10846
10847                 *lba = scsi_8btou64(cdb->addr);
10848                 *len = scsi_4btoul(cdb->length);
10849                 break;
10850         }
10851         case WRITE_ATOMIC_16: {
10852                 struct scsi_write_atomic_16 *cdb;
10853
10854                 cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb;
10855
10856                 *lba = scsi_8btou64(cdb->addr);
10857                 *len = scsi_2btoul(cdb->length);
10858                 break;
10859         }
10860         case WRITE_VERIFY_16: {
10861                 struct scsi_write_verify_16 *cdb;
10862
10863                 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb;
10864
10865                 *lba = scsi_8btou64(cdb->addr);
10866                 *len = scsi_4btoul(cdb->length);
10867                 break;
10868         }
10869         case WRITE_SAME_10: {
10870                 struct scsi_write_same_10 *cdb;
10871
10872                 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb;
10873
10874                 *lba = scsi_4btoul(cdb->addr);
10875                 *len = scsi_2btoul(cdb->length);
10876                 break;
10877         }
10878         case WRITE_SAME_16: {
10879                 struct scsi_write_same_16 *cdb;
10880
10881                 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb;
10882
10883                 *lba = scsi_8btou64(cdb->addr);
10884                 *len = scsi_4btoul(cdb->length);
10885                 break;
10886         }
10887         case VERIFY_10: {
10888                 struct scsi_verify_10 *cdb;
10889
10890                 cdb = (struct scsi_verify_10 *)io->scsiio.cdb;
10891
10892                 *lba = scsi_4btoul(cdb->addr);
10893                 *len = scsi_2btoul(cdb->length);
10894                 break;
10895         }
10896         case VERIFY_12: {
10897                 struct scsi_verify_12 *cdb;
10898
10899                 cdb = (struct scsi_verify_12 *)io->scsiio.cdb;
10900
10901                 *lba = scsi_4btoul(cdb->addr);
10902                 *len = scsi_4btoul(cdb->length);
10903                 break;
10904         }
10905         case VERIFY_16: {
10906                 struct scsi_verify_16 *cdb;
10907
10908                 cdb = (struct scsi_verify_16 *)io->scsiio.cdb;
10909
10910                 *lba = scsi_8btou64(cdb->addr);
10911                 *len = scsi_4btoul(cdb->length);
10912                 break;
10913         }
10914         case UNMAP: {
10915                 *lba = 0;
10916                 *len = UINT64_MAX;
10917                 break;
10918         }
10919         case SERVICE_ACTION_IN: {       /* GET LBA STATUS */
10920                 struct scsi_get_lba_status *cdb;
10921
10922                 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb;
10923                 *lba = scsi_8btou64(cdb->addr);
10924                 *len = UINT32_MAX;
10925                 break;
10926         }
10927         default:
10928                 return (1);
10929                 break; /* NOTREACHED */
10930         }
10931
10932         return (0);
10933 }
10934
10935 static ctl_action
10936 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2,
10937     bool seq)
10938 {
10939         uint64_t endlba1, endlba2;
10940
10941         endlba1 = lba1 + len1 - (seq ? 0 : 1);
10942         endlba2 = lba2 + len2 - 1;
10943
10944         if ((endlba1 < lba2) || (endlba2 < lba1))
10945                 return (CTL_ACTION_PASS);
10946         else
10947                 return (CTL_ACTION_BLOCK);
10948 }
10949
10950 static int
10951 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2)
10952 {
10953         struct ctl_ptr_len_flags *ptrlen;
10954         struct scsi_unmap_desc *buf, *end, *range;
10955         uint64_t lba;
10956         uint32_t len;
10957
10958         /* If not UNMAP -- go other way. */
10959         if (io->io_hdr.io_type != CTL_IO_SCSI ||
10960             io->scsiio.cdb[0] != UNMAP)
10961                 return (CTL_ACTION_ERROR);
10962
10963         /* If UNMAP without data -- block and wait for data. */
10964         ptrlen = (struct ctl_ptr_len_flags *)
10965             &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
10966         if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 ||
10967             ptrlen->ptr == NULL)
10968                 return (CTL_ACTION_BLOCK);
10969
10970         /* UNMAP with data -- check for collision. */
10971         buf = (struct scsi_unmap_desc *)ptrlen->ptr;
10972         end = buf + ptrlen->len / sizeof(*buf);
10973         for (range = buf; range < end; range++) {
10974                 lba = scsi_8btou64(range->lba);
10975                 len = scsi_4btoul(range->length);
10976                 if ((lba < lba2 + len2) && (lba + len > lba2))
10977                         return (CTL_ACTION_BLOCK);
10978         }
10979         return (CTL_ACTION_PASS);
10980 }
10981
10982 static ctl_action
10983 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq)
10984 {
10985         uint64_t lba1, lba2;
10986         uint64_t len1, len2;
10987         int retval;
10988
10989         if (ctl_get_lba_len(io2, &lba2, &len2) != 0)
10990                 return (CTL_ACTION_ERROR);
10991
10992         retval = ctl_extent_check_unmap(io1, lba2, len2);
10993         if (retval != CTL_ACTION_ERROR)
10994                 return (retval);
10995
10996         if (ctl_get_lba_len(io1, &lba1, &len1) != 0)
10997                 return (CTL_ACTION_ERROR);
10998
10999         if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE)
11000                 seq = FALSE;
11001         return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq));
11002 }
11003
11004 static ctl_action
11005 ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2)
11006 {
11007         uint64_t lba1, lba2;
11008         uint64_t len1, len2;
11009
11010         if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE)
11011                 return (CTL_ACTION_PASS);
11012         if (ctl_get_lba_len(io1, &lba1, &len1) != 0)
11013                 return (CTL_ACTION_ERROR);
11014         if (ctl_get_lba_len(io2, &lba2, &len2) != 0)
11015                 return (CTL_ACTION_ERROR);
11016
11017         if (lba1 + len1 == lba2)
11018                 return (CTL_ACTION_BLOCK);
11019         return (CTL_ACTION_PASS);
11020 }
11021
11022 static ctl_action
11023 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io,
11024     union ctl_io *ooa_io)
11025 {
11026         const struct ctl_cmd_entry *pending_entry, *ooa_entry;
11027         const ctl_serialize_action *serialize_row;
11028
11029         /*
11030          * The initiator attempted multiple untagged commands at the same
11031          * time.  Can't do that.
11032          */
11033         if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
11034          && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
11035          && ((pending_io->io_hdr.nexus.targ_port ==
11036               ooa_io->io_hdr.nexus.targ_port)
11037           && (pending_io->io_hdr.nexus.initid ==
11038               ooa_io->io_hdr.nexus.initid))
11039          && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT |
11040               CTL_FLAG_STATUS_SENT)) == 0))
11041                 return (CTL_ACTION_OVERLAP);
11042
11043         /*
11044          * The initiator attempted to send multiple tagged commands with
11045          * the same ID.  (It's fine if different initiators have the same
11046          * tag ID.)
11047          *
11048          * Even if all of those conditions are true, we don't kill the I/O
11049          * if the command ahead of us has been aborted.  We won't end up
11050          * sending it to the FETD, and it's perfectly legal to resend a
11051          * command with the same tag number as long as the previous
11052          * instance of this tag number has been aborted somehow.
11053          */
11054         if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
11055          && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
11056          && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num)
11057          && ((pending_io->io_hdr.nexus.targ_port ==
11058               ooa_io->io_hdr.nexus.targ_port)
11059           && (pending_io->io_hdr.nexus.initid ==
11060               ooa_io->io_hdr.nexus.initid))
11061          && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT |
11062               CTL_FLAG_STATUS_SENT)) == 0))
11063                 return (CTL_ACTION_OVERLAP_TAG);
11064
11065         /*
11066          * If we get a head of queue tag, SAM-3 says that we should
11067          * immediately execute it.
11068          *
11069          * What happens if this command would normally block for some other
11070          * reason?  e.g. a request sense with a head of queue tag
11071          * immediately after a write.  Normally that would block, but this
11072          * will result in its getting executed immediately...
11073          *
11074          * We currently return "pass" instead of "skip", so we'll end up
11075          * going through the rest of the queue to check for overlapped tags.
11076          *
11077          * XXX KDM check for other types of blockage first??
11078          */
11079         if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)
11080                 return (CTL_ACTION_PASS);
11081
11082         /*
11083          * Ordered tags have to block until all items ahead of them
11084          * have completed.  If we get called with an ordered tag, we always
11085          * block, if something else is ahead of us in the queue.
11086          */
11087         if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED)
11088                 return (CTL_ACTION_BLOCK);
11089
11090         /*
11091          * Simple tags get blocked until all head of queue and ordered tags
11092          * ahead of them have completed.  I'm lumping untagged commands in
11093          * with simple tags here.  XXX KDM is that the right thing to do?
11094          */
11095         if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
11096           || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE))
11097          && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)
11098           || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED)))
11099                 return (CTL_ACTION_BLOCK);
11100
11101         pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL);
11102         KASSERT(pending_entry->seridx < CTL_SERIDX_COUNT,
11103             ("%s: Invalid seridx %d for pending CDB %02x %02x @ %p",
11104              __func__, pending_entry->seridx, pending_io->scsiio.cdb[0],
11105              pending_io->scsiio.cdb[1], pending_io));
11106         ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL);
11107         if (ooa_entry->seridx == CTL_SERIDX_INVLD)
11108                 return (CTL_ACTION_PASS); /* Unsupported command in OOA queue */
11109         KASSERT(ooa_entry->seridx < CTL_SERIDX_COUNT,
11110             ("%s: Invalid seridx %d for ooa CDB %02x %02x @ %p",
11111              __func__, ooa_entry->seridx, ooa_io->scsiio.cdb[0],
11112              ooa_io->scsiio.cdb[1], ooa_io));
11113
11114         serialize_row = ctl_serialize_table[ooa_entry->seridx];
11115
11116         switch (serialize_row[pending_entry->seridx]) {
11117         case CTL_SER_BLOCK:
11118                 return (CTL_ACTION_BLOCK);
11119         case CTL_SER_EXTENT:
11120                 return (ctl_extent_check(ooa_io, pending_io,
11121                     (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON)));
11122         case CTL_SER_EXTENTOPT:
11123                 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags
11124                     & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED)
11125                         return (ctl_extent_check(ooa_io, pending_io,
11126                             (lun->be_lun &&
11127                              lun->be_lun->serseq == CTL_LUN_SERSEQ_ON)));
11128                 return (CTL_ACTION_PASS);
11129         case CTL_SER_EXTENTSEQ:
11130                 if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF)
11131                         return (ctl_extent_check_seq(ooa_io, pending_io));
11132                 return (CTL_ACTION_PASS);
11133         case CTL_SER_PASS:
11134                 return (CTL_ACTION_PASS);
11135         case CTL_SER_BLOCKOPT:
11136                 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags
11137                     & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED)
11138                         return (CTL_ACTION_BLOCK);
11139                 return (CTL_ACTION_PASS);
11140         case CTL_SER_SKIP:
11141                 return (CTL_ACTION_SKIP);
11142         default:
11143                 panic("%s: Invalid serialization value %d for %d => %d",
11144                     __func__, serialize_row[pending_entry->seridx],
11145                     pending_entry->seridx, ooa_entry->seridx);
11146         }
11147
11148         return (CTL_ACTION_ERROR);
11149 }
11150
11151 /*
11152  * Check for blockage or overlaps against the OOA (Order Of Arrival) queue.
11153  * Assumptions:
11154  * - pending_io is generally either incoming, or on the blocked queue
11155  * - starting I/O is the I/O we want to start the check with.
11156  */
11157 static ctl_action
11158 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
11159               union ctl_io *starting_io)
11160 {
11161         union ctl_io *ooa_io;
11162         ctl_action action;
11163
11164         mtx_assert(&lun->lun_lock, MA_OWNED);
11165
11166         /*
11167          * Run back along the OOA queue, starting with the current
11168          * blocked I/O and going through every I/O before it on the
11169          * queue.  If starting_io is NULL, we'll just end up returning
11170          * CTL_ACTION_PASS.
11171          */
11172         for (ooa_io = starting_io; ooa_io != NULL;
11173              ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq,
11174              ooa_links)){
11175
11176                 /*
11177                  * This routine just checks to see whether
11178                  * cur_blocked is blocked by ooa_io, which is ahead
11179                  * of it in the queue.  It doesn't queue/dequeue
11180                  * cur_blocked.
11181                  */
11182                 action = ctl_check_for_blockage(lun, pending_io, ooa_io);
11183                 switch (action) {
11184                 case CTL_ACTION_BLOCK:
11185                 case CTL_ACTION_OVERLAP:
11186                 case CTL_ACTION_OVERLAP_TAG:
11187                 case CTL_ACTION_SKIP:
11188                 case CTL_ACTION_ERROR:
11189                         return (action);
11190                         break; /* NOTREACHED */
11191                 case CTL_ACTION_PASS:
11192                         break;
11193                 default:
11194                         panic("%s: Invalid action %d\n", __func__, action);
11195                 }
11196         }
11197
11198         return (CTL_ACTION_PASS);
11199 }
11200
11201 /*
11202  * Assumptions:
11203  * - An I/O has just completed, and has been removed from the per-LUN OOA
11204  *   queue, so some items on the blocked queue may now be unblocked.
11205  */
11206 static int
11207 ctl_check_blocked(struct ctl_lun *lun)
11208 {
11209         struct ctl_softc *softc = lun->ctl_softc;
11210         union ctl_io *cur_blocked, *next_blocked;
11211
11212         mtx_assert(&lun->lun_lock, MA_OWNED);
11213
11214         /*
11215          * Run forward from the head of the blocked queue, checking each
11216          * entry against the I/Os prior to it on the OOA queue to see if
11217          * there is still any blockage.
11218          *
11219          * We cannot use the TAILQ_FOREACH() macro, because it can't deal
11220          * with our removing a variable on it while it is traversing the
11221          * list.
11222          */
11223         for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue);
11224              cur_blocked != NULL; cur_blocked = next_blocked) {
11225                 union ctl_io *prev_ooa;
11226                 ctl_action action;
11227
11228                 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr,
11229                                                           blocked_links);
11230
11231                 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr,
11232                                                       ctl_ooaq, ooa_links);
11233
11234                 /*
11235                  * If cur_blocked happens to be the first item in the OOA
11236                  * queue now, prev_ooa will be NULL, and the action
11237                  * returned will just be CTL_ACTION_PASS.
11238                  */
11239                 action = ctl_check_ooa(lun, cur_blocked, prev_ooa);
11240
11241                 switch (action) {
11242                 case CTL_ACTION_BLOCK:
11243                         /* Nothing to do here, still blocked */
11244                         break;
11245                 case CTL_ACTION_OVERLAP:
11246                 case CTL_ACTION_OVERLAP_TAG:
11247                         /*
11248                          * This shouldn't happen!  In theory we've already
11249                          * checked this command for overlap...
11250                          */
11251                         break;
11252                 case CTL_ACTION_PASS:
11253                 case CTL_ACTION_SKIP: {
11254                         const struct ctl_cmd_entry *entry;
11255
11256                         /*
11257                          * The skip case shouldn't happen, this transaction
11258                          * should have never made it onto the blocked queue.
11259                          */
11260                         /*
11261                          * This I/O is no longer blocked, we can remove it
11262                          * from the blocked queue.  Since this is a TAILQ
11263                          * (doubly linked list), we can do O(1) removals
11264                          * from any place on the list.
11265                          */
11266                         TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr,
11267                                      blocked_links);
11268                         cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
11269
11270                         if ((softc->ha_mode != CTL_HA_MODE_XFER) &&
11271                             (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){
11272                                 /*
11273                                  * Need to send IO back to original side to
11274                                  * run
11275                                  */
11276                                 union ctl_ha_msg msg_info;
11277
11278                                 cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
11279                                 msg_info.hdr.original_sc =
11280                                         cur_blocked->io_hdr.original_sc;
11281                                 msg_info.hdr.serializing_sc = cur_blocked;
11282                                 msg_info.hdr.msg_type = CTL_MSG_R2R;
11283                                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
11284                                     sizeof(msg_info.hdr), M_NOWAIT);
11285                                 break;
11286                         }
11287                         entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL);
11288
11289                         /*
11290                          * Check this I/O for LUN state changes that may
11291                          * have happened while this command was blocked.
11292                          * The LUN state may have been changed by a command
11293                          * ahead of us in the queue, so we need to re-check
11294                          * for any states that can be caused by SCSI
11295                          * commands.
11296                          */
11297                         if (ctl_scsiio_lun_check(lun, entry,
11298                                                  &cur_blocked->scsiio) == 0) {
11299                                 cur_blocked->io_hdr.flags |=
11300                                                       CTL_FLAG_IS_WAS_ON_RTR;
11301                                 ctl_enqueue_rtr(cur_blocked);
11302                         } else
11303                                 ctl_done(cur_blocked);
11304                         break;
11305                 }
11306                 default:
11307                         /*
11308                          * This probably shouldn't happen -- we shouldn't
11309                          * get CTL_ACTION_ERROR, or anything else.
11310                          */
11311                         break;
11312                 }
11313         }
11314
11315         return (CTL_RETVAL_COMPLETE);
11316 }
11317
11318 /*
11319  * This routine (with one exception) checks LUN flags that can be set by
11320  * commands ahead of us in the OOA queue.  These flags have to be checked
11321  * when a command initially comes in, and when we pull a command off the
11322  * blocked queue and are preparing to execute it.  The reason we have to
11323  * check these flags for commands on the blocked queue is that the LUN
11324  * state may have been changed by a command ahead of us while we're on the
11325  * blocked queue.
11326  *
11327  * Ordering is somewhat important with these checks, so please pay
11328  * careful attention to the placement of any new checks.
11329  */
11330 static int
11331 ctl_scsiio_lun_check(struct ctl_lun *lun,
11332     const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio)
11333 {
11334         struct ctl_softc *softc = lun->ctl_softc;
11335         int retval;
11336         uint32_t residx;
11337
11338         retval = 0;
11339
11340         mtx_assert(&lun->lun_lock, MA_OWNED);
11341
11342         /*
11343          * If this shelf is a secondary shelf controller, we may have to
11344          * reject some commands disallowed by HA mode and link state.
11345          */
11346         if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) {
11347                 if (softc->ha_link == CTL_HA_LINK_OFFLINE &&
11348                     (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) {
11349                         ctl_set_lun_unavail(ctsio);
11350                         retval = 1;
11351                         goto bailout;
11352                 }
11353                 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 &&
11354                     (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) {
11355                         ctl_set_lun_transit(ctsio);
11356                         retval = 1;
11357                         goto bailout;
11358                 }
11359                 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY &&
11360                     (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) {
11361                         ctl_set_lun_standby(ctsio);
11362                         retval = 1;
11363                         goto bailout;
11364                 }
11365
11366                 /* The rest of checks are only done on executing side */
11367                 if (softc->ha_mode == CTL_HA_MODE_XFER)
11368                         goto bailout;
11369         }
11370
11371         if (entry->pattern & CTL_LUN_PAT_WRITE) {
11372                 if (lun->be_lun &&
11373                     lun->be_lun->flags & CTL_LUN_FLAG_READONLY) {
11374                         ctl_set_hw_write_protected(ctsio);
11375                         retval = 1;
11376                         goto bailout;
11377                 }
11378                 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT]
11379                     .eca_and_aen & SCP_SWP) != 0) {
11380                         ctl_set_sense(ctsio, /*current_error*/ 1,
11381                             /*sense_key*/ SSD_KEY_DATA_PROTECT,
11382                             /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE);
11383                         retval = 1;
11384                         goto bailout;
11385                 }
11386         }
11387
11388         /*
11389          * Check for a reservation conflict.  If this command isn't allowed
11390          * even on reserved LUNs, and if this initiator isn't the one who
11391          * reserved us, reject the command with a reservation conflict.
11392          */
11393         residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
11394         if ((lun->flags & CTL_LUN_RESERVED)
11395          && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) {
11396                 if (lun->res_idx != residx) {
11397                         ctl_set_reservation_conflict(ctsio);
11398                         retval = 1;
11399                         goto bailout;
11400                 }
11401         }
11402
11403         if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 ||
11404             (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) {
11405                 /* No reservation or command is allowed. */;
11406         } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) &&
11407             (lun->pr_res_type == SPR_TYPE_WR_EX ||
11408              lun->pr_res_type == SPR_TYPE_WR_EX_RO ||
11409              lun->pr_res_type == SPR_TYPE_WR_EX_AR)) {
11410                 /* The command is allowed for Write Exclusive resv. */;
11411         } else {
11412                 /*
11413                  * if we aren't registered or it's a res holder type
11414                  * reservation and this isn't the res holder then set a
11415                  * conflict.
11416                  */
11417                 if (ctl_get_prkey(lun, residx) == 0 ||
11418                     (residx != lun->pr_res_idx && lun->pr_res_type < 4)) {
11419                         ctl_set_reservation_conflict(ctsio);
11420                         retval = 1;
11421                         goto bailout;
11422                 }
11423         }
11424
11425         if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) {
11426                 if (lun->flags & CTL_LUN_EJECTED)
11427                         ctl_set_lun_ejected(ctsio);
11428                 else if (lun->flags & CTL_LUN_NO_MEDIA) {
11429                         if (lun->flags & CTL_LUN_REMOVABLE)
11430                                 ctl_set_lun_no_media(ctsio);
11431                         else
11432                                 ctl_set_lun_int_reqd(ctsio);
11433                 } else if (lun->flags & CTL_LUN_STOPPED)
11434                         ctl_set_lun_stopped(ctsio);
11435                 else
11436                         goto bailout;
11437                 retval = 1;
11438                 goto bailout;
11439         }
11440
11441 bailout:
11442         return (retval);
11443 }
11444
11445 static void
11446 ctl_failover_io(union ctl_io *io, int have_lock)
11447 {
11448         ctl_set_busy(&io->scsiio);
11449         ctl_done(io);
11450 }
11451
11452 static void
11453 ctl_failover_lun(union ctl_io *rio)
11454 {
11455         struct ctl_softc *softc = control_softc;
11456         struct ctl_lun *lun;
11457         struct ctl_io_hdr *io, *next_io;
11458         uint32_t targ_lun;
11459
11460         targ_lun = rio->io_hdr.nexus.targ_mapped_lun;
11461         CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", targ_lun));
11462
11463         /* Find and lock the LUN. */
11464         mtx_lock(&softc->ctl_lock);
11465         if ((targ_lun < CTL_MAX_LUNS) &&
11466             ((lun = softc->ctl_luns[targ_lun]) != NULL)) {
11467                 mtx_lock(&lun->lun_lock);
11468                 mtx_unlock(&softc->ctl_lock);
11469                 if (lun->flags & CTL_LUN_DISABLED) {
11470                         mtx_unlock(&lun->lun_lock);
11471                         return;
11472                 }
11473         } else {
11474                 mtx_unlock(&softc->ctl_lock);
11475                 return;
11476         }
11477
11478         if (softc->ha_mode == CTL_HA_MODE_XFER) {
11479                 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) {
11480                         /* We are master */
11481                         if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
11482                                 if (io->flags & CTL_FLAG_IO_ACTIVE) {
11483                                         io->flags |= CTL_FLAG_ABORT;
11484                                         io->flags |= CTL_FLAG_FAILOVER;
11485                                 } else { /* This can be only due to DATAMOVE */
11486                                         io->msg_type = CTL_MSG_DATAMOVE_DONE;
11487                                         io->flags &= ~CTL_FLAG_DMA_INPROG;
11488                                         io->flags |= CTL_FLAG_IO_ACTIVE;
11489                                         io->port_status = 31340;
11490                                         ctl_enqueue_isc((union ctl_io *)io);
11491                                 }
11492                         }
11493                         /* We are slave */
11494                         if (io->flags & CTL_FLAG_SENT_2OTHER_SC) {
11495                                 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC;
11496                                 if (io->flags & CTL_FLAG_IO_ACTIVE) {
11497                                         io->flags |= CTL_FLAG_FAILOVER;
11498                                 } else {
11499                                         ctl_set_busy(&((union ctl_io *)io)->
11500                                             scsiio);
11501                                         ctl_done((union ctl_io *)io);
11502                                 }
11503                         }
11504                 }
11505         } else { /* SERIALIZE modes */
11506                 TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links,
11507                     next_io) {
11508                         /* We are master */
11509                         if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
11510                                 TAILQ_REMOVE(&lun->blocked_queue, io,
11511                                     blocked_links);
11512                                 io->flags &= ~CTL_FLAG_BLOCKED;
11513                                 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links);
11514                                 ctl_free_io((union ctl_io *)io);
11515                         }
11516                 }
11517                 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) {
11518                         /* We are master */
11519                         if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
11520                                 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links);
11521                                 ctl_free_io((union ctl_io *)io);
11522                         }
11523                         /* We are slave */
11524                         if (io->flags & CTL_FLAG_SENT_2OTHER_SC) {
11525                                 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC;
11526                                 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) {
11527                                         ctl_set_busy(&((union ctl_io *)io)->
11528                                             scsiio);
11529                                         ctl_done((union ctl_io *)io);
11530                                 }
11531                         }
11532                 }
11533                 ctl_check_blocked(lun);
11534         }
11535         mtx_unlock(&lun->lun_lock);
11536 }
11537
11538 static int
11539 ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio)
11540 {
11541         struct ctl_lun *lun;
11542         const struct ctl_cmd_entry *entry;
11543         uint32_t initidx, targ_lun;
11544         int retval;
11545
11546         retval = 0;
11547
11548         lun = NULL;
11549
11550         targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
11551         if ((targ_lun < CTL_MAX_LUNS)
11552          && ((lun = softc->ctl_luns[targ_lun]) != NULL)) {
11553                 /*
11554                  * If the LUN is invalid, pretend that it doesn't exist.
11555                  * It will go away as soon as all pending I/O has been
11556                  * completed.
11557                  */
11558                 mtx_lock(&lun->lun_lock);
11559                 if (lun->flags & CTL_LUN_DISABLED) {
11560                         mtx_unlock(&lun->lun_lock);
11561                         lun = NULL;
11562                         ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL;
11563                         ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL;
11564                 } else {
11565                         ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun;
11566                         ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr =
11567                                 lun->be_lun;
11568
11569                         /*
11570                          * Every I/O goes into the OOA queue for a
11571                          * particular LUN, and stays there until completion.
11572                          */
11573 #ifdef CTL_TIME_IO
11574                         if (TAILQ_EMPTY(&lun->ooa_queue)) {
11575                                 lun->idle_time += getsbinuptime() -
11576                                     lun->last_busy;
11577                         }
11578 #endif
11579                         TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr,
11580                             ooa_links);
11581                 }
11582         } else {
11583                 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL;
11584                 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL;
11585         }
11586
11587         /* Get command entry and return error if it is unsuppotyed. */
11588         entry = ctl_validate_command(ctsio);
11589         if (entry == NULL) {
11590                 if (lun)
11591                         mtx_unlock(&lun->lun_lock);
11592                 return (retval);
11593         }
11594
11595         ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
11596         ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK;
11597
11598         /*
11599          * Check to see whether we can send this command to LUNs that don't
11600          * exist.  This should pretty much only be the case for inquiry
11601          * and request sense.  Further checks, below, really require having
11602          * a LUN, so we can't really check the command anymore.  Just put
11603          * it on the rtr queue.
11604          */
11605         if (lun == NULL) {
11606                 if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) {
11607                         ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
11608                         ctl_enqueue_rtr((union ctl_io *)ctsio);
11609                         return (retval);
11610                 }
11611
11612                 ctl_set_unsupported_lun(ctsio);
11613                 ctl_done((union ctl_io *)ctsio);
11614                 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n"));
11615                 return (retval);
11616         } else {
11617                 /*
11618                  * Make sure we support this particular command on this LUN.
11619                  * e.g., we don't support writes to the control LUN.
11620                  */
11621                 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) {
11622                         mtx_unlock(&lun->lun_lock);
11623                         ctl_set_invalid_opcode(ctsio);
11624                         ctl_done((union ctl_io *)ctsio);
11625                         return (retval);
11626                 }
11627         }
11628
11629         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
11630
11631 #ifdef CTL_WITH_CA
11632         /*
11633          * If we've got a request sense, it'll clear the contingent
11634          * allegiance condition.  Otherwise, if we have a CA condition for
11635          * this initiator, clear it, because it sent down a command other
11636          * than request sense.
11637          */
11638         if ((ctsio->cdb[0] != REQUEST_SENSE)
11639          && (ctl_is_set(lun->have_ca, initidx)))
11640                 ctl_clear_mask(lun->have_ca, initidx);
11641 #endif
11642
11643         /*
11644          * If the command has this flag set, it handles its own unit
11645          * attention reporting, we shouldn't do anything.  Otherwise we
11646          * check for any pending unit attentions, and send them back to the
11647          * initiator.  We only do this when a command initially comes in,
11648          * not when we pull it off the blocked queue.
11649          *
11650          * According to SAM-3, section 5.3.2, the order that things get
11651          * presented back to the host is basically unit attentions caused
11652          * by some sort of reset event, busy status, reservation conflicts
11653          * or task set full, and finally any other status.
11654          *
11655          * One issue here is that some of the unit attentions we report
11656          * don't fall into the "reset" category (e.g. "reported luns data
11657          * has changed").  So reporting it here, before the reservation
11658          * check, may be technically wrong.  I guess the only thing to do
11659          * would be to check for and report the reset events here, and then
11660          * check for the other unit attention types after we check for a
11661          * reservation conflict.
11662          *
11663          * XXX KDM need to fix this
11664          */
11665         if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) {
11666                 ctl_ua_type ua_type;
11667
11668                 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data,
11669                     SSD_TYPE_NONE);
11670                 if (ua_type != CTL_UA_NONE) {
11671                         mtx_unlock(&lun->lun_lock);
11672                         ctsio->scsi_status = SCSI_STATUS_CHECK_COND;
11673                         ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
11674                         ctsio->sense_len = SSD_FULL_SIZE;
11675                         ctl_done((union ctl_io *)ctsio);
11676                         return (retval);
11677                 }
11678         }
11679
11680
11681         if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) {
11682                 mtx_unlock(&lun->lun_lock);
11683                 ctl_done((union ctl_io *)ctsio);
11684                 return (retval);
11685         }
11686
11687         /*
11688          * XXX CHD this is where we want to send IO to other side if
11689          * this LUN is secondary on this SC. We will need to make a copy
11690          * of the IO and flag the IO on this side as SENT_2OTHER and the flag
11691          * the copy we send as FROM_OTHER.
11692          * We also need to stuff the address of the original IO so we can
11693          * find it easily. Something similar will need be done on the other
11694          * side so when we are done we can find the copy.
11695          */
11696         if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
11697             (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 &&
11698             (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) {
11699                 union ctl_ha_msg msg_info;
11700                 int isc_retval;
11701
11702                 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
11703                 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
11704                 mtx_unlock(&lun->lun_lock);
11705
11706                 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE;
11707                 msg_info.hdr.original_sc = (union ctl_io *)ctsio;
11708                 msg_info.hdr.serializing_sc = NULL;
11709                 msg_info.hdr.nexus = ctsio->io_hdr.nexus;
11710                 msg_info.scsi.tag_num = ctsio->tag_num;
11711                 msg_info.scsi.tag_type = ctsio->tag_type;
11712                 msg_info.scsi.cdb_len = ctsio->cdb_len;
11713                 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN);
11714
11715                 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
11716                     sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data),
11717                     M_WAITOK)) > CTL_HA_STATUS_SUCCESS) {
11718                         ctl_set_busy(ctsio);
11719                         ctl_done((union ctl_io *)ctsio);
11720                         return (retval);
11721                 }
11722                 return (retval);
11723         }
11724
11725         switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
11726                               (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr,
11727                               ctl_ooaq, ooa_links))) {
11728         case CTL_ACTION_BLOCK:
11729                 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
11730                 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
11731                                   blocked_links);
11732                 mtx_unlock(&lun->lun_lock);
11733                 return (retval);
11734         case CTL_ACTION_PASS:
11735         case CTL_ACTION_SKIP:
11736                 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
11737                 mtx_unlock(&lun->lun_lock);
11738                 ctl_enqueue_rtr((union ctl_io *)ctsio);
11739                 break;
11740         case CTL_ACTION_OVERLAP:
11741                 mtx_unlock(&lun->lun_lock);
11742                 ctl_set_overlapped_cmd(ctsio);
11743                 ctl_done((union ctl_io *)ctsio);
11744                 break;
11745         case CTL_ACTION_OVERLAP_TAG:
11746                 mtx_unlock(&lun->lun_lock);
11747                 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff);
11748                 ctl_done((union ctl_io *)ctsio);
11749                 break;
11750         case CTL_ACTION_ERROR:
11751         default:
11752                 mtx_unlock(&lun->lun_lock);
11753                 ctl_set_internal_failure(ctsio,
11754                                          /*sks_valid*/ 0,
11755                                          /*retry_count*/ 0);
11756                 ctl_done((union ctl_io *)ctsio);
11757                 break;
11758         }
11759         return (retval);
11760 }
11761
11762 const struct ctl_cmd_entry *
11763 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa)
11764 {
11765         const struct ctl_cmd_entry *entry;
11766         int service_action;
11767
11768         entry = &ctl_cmd_table[ctsio->cdb[0]];
11769         if (sa)
11770                 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0);
11771         if (entry->flags & CTL_CMD_FLAG_SA5) {
11772                 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK;
11773                 entry = &((const struct ctl_cmd_entry *)
11774                     entry->execute)[service_action];
11775         }
11776         return (entry);
11777 }
11778
11779 const struct ctl_cmd_entry *
11780 ctl_validate_command(struct ctl_scsiio *ctsio)
11781 {
11782         const struct ctl_cmd_entry *entry;
11783         int i, sa;
11784         uint8_t diff;
11785
11786         entry = ctl_get_cmd_entry(ctsio, &sa);
11787         if (entry->execute == NULL) {
11788                 if (sa)
11789                         ctl_set_invalid_field(ctsio,
11790                                               /*sks_valid*/ 1,
11791                                               /*command*/ 1,
11792                                               /*field*/ 1,
11793                                               /*bit_valid*/ 1,
11794                                               /*bit*/ 4);
11795                 else
11796                         ctl_set_invalid_opcode(ctsio);
11797                 ctl_done((union ctl_io *)ctsio);
11798                 return (NULL);
11799         }
11800         KASSERT(entry->length > 0,
11801             ("Not defined length for command 0x%02x/0x%02x",
11802              ctsio->cdb[0], ctsio->cdb[1]));
11803         for (i = 1; i < entry->length; i++) {
11804                 diff = ctsio->cdb[i] & ~entry->usage[i - 1];
11805                 if (diff == 0)
11806                         continue;
11807                 ctl_set_invalid_field(ctsio,
11808                                       /*sks_valid*/ 1,
11809                                       /*command*/ 1,
11810                                       /*field*/ i,
11811                                       /*bit_valid*/ 1,
11812                                       /*bit*/ fls(diff) - 1);
11813                 ctl_done((union ctl_io *)ctsio);
11814                 return (NULL);
11815         }
11816         return (entry);
11817 }
11818
11819 static int
11820 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry)
11821 {
11822
11823         switch (lun_type) {
11824         case T_DIRECT:
11825                 if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0)
11826                         return (0);
11827                 break;
11828         case T_PROCESSOR:
11829                 if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0)
11830                         return (0);
11831                 break;
11832         case T_CDROM:
11833                 if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0)
11834                         return (0);
11835                 break;
11836         default:
11837                 return (0);
11838         }
11839         return (1);
11840 }
11841
11842 static int
11843 ctl_scsiio(struct ctl_scsiio *ctsio)
11844 {
11845         int retval;
11846         const struct ctl_cmd_entry *entry;
11847
11848         retval = CTL_RETVAL_COMPLETE;
11849
11850         CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0]));
11851
11852         entry = ctl_get_cmd_entry(ctsio, NULL);
11853
11854         /*
11855          * If this I/O has been aborted, just send it straight to
11856          * ctl_done() without executing it.
11857          */
11858         if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) {
11859                 ctl_done((union ctl_io *)ctsio);
11860                 goto bailout;
11861         }
11862
11863         /*
11864          * All the checks should have been handled by ctl_scsiio_precheck().
11865          * We should be clear now to just execute the I/O.
11866          */
11867         retval = entry->execute(ctsio);
11868
11869 bailout:
11870         return (retval);
11871 }
11872
11873 /*
11874  * Since we only implement one target right now, a bus reset simply resets
11875  * our single target.
11876  */
11877 static int
11878 ctl_bus_reset(struct ctl_softc *softc, union ctl_io *io)
11879 {
11880         return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET));
11881 }
11882
11883 static int
11884 ctl_target_reset(struct ctl_softc *softc, union ctl_io *io,
11885                  ctl_ua_type ua_type)
11886 {
11887         struct ctl_port *port;
11888         struct ctl_lun *lun;
11889         int retval;
11890
11891         if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
11892                 union ctl_ha_msg msg_info;
11893
11894                 msg_info.hdr.nexus = io->io_hdr.nexus;
11895                 if (ua_type==CTL_UA_TARG_RESET)
11896                         msg_info.task.task_action = CTL_TASK_TARGET_RESET;
11897                 else
11898                         msg_info.task.task_action = CTL_TASK_BUS_RESET;
11899                 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
11900                 msg_info.hdr.original_sc = NULL;
11901                 msg_info.hdr.serializing_sc = NULL;
11902                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
11903                     sizeof(msg_info.task), M_WAITOK);
11904         }
11905         retval = 0;
11906
11907         mtx_lock(&softc->ctl_lock);
11908         port = ctl_io_port(&io->io_hdr);
11909         STAILQ_FOREACH(lun, &softc->lun_list, links) {
11910                 if (port != NULL &&
11911                     ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
11912                         continue;
11913                 retval += ctl_do_lun_reset(lun, io, ua_type);
11914         }
11915         mtx_unlock(&softc->ctl_lock);
11916         io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
11917         return (retval);
11918 }
11919
11920 /*
11921  * The LUN should always be set.  The I/O is optional, and is used to
11922  * distinguish between I/Os sent by this initiator, and by other
11923  * initiators.  We set unit attention for initiators other than this one.
11924  * SAM-3 is vague on this point.  It does say that a unit attention should
11925  * be established for other initiators when a LUN is reset (see section
11926  * 5.7.3), but it doesn't specifically say that the unit attention should
11927  * be established for this particular initiator when a LUN is reset.  Here
11928  * is the relevant text, from SAM-3 rev 8:
11929  *
11930  * 5.7.2 When a SCSI initiator port aborts its own tasks
11931  *
11932  * When a SCSI initiator port causes its own task(s) to be aborted, no
11933  * notification that the task(s) have been aborted shall be returned to
11934  * the SCSI initiator port other than the completion response for the
11935  * command or task management function action that caused the task(s) to
11936  * be aborted and notification(s) associated with related effects of the
11937  * action (e.g., a reset unit attention condition).
11938  *
11939  * XXX KDM for now, we're setting unit attention for all initiators.
11940  */
11941 static int
11942 ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
11943 {
11944         union ctl_io *xio;
11945 #if 0
11946         uint32_t initidx;
11947 #endif
11948         int i;
11949
11950         mtx_lock(&lun->lun_lock);
11951         /*
11952          * Run through the OOA queue and abort each I/O.
11953          */
11954         for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
11955              xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
11956                 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS;
11957         }
11958
11959         /*
11960          * This version sets unit attention for every
11961          */
11962 #if 0
11963         initidx = ctl_get_initindex(&io->io_hdr.nexus);
11964         ctl_est_ua_all(lun, initidx, ua_type);
11965 #else
11966         ctl_est_ua_all(lun, -1, ua_type);
11967 #endif
11968
11969         /*
11970          * A reset (any kind, really) clears reservations established with
11971          * RESERVE/RELEASE.  It does not clear reservations established
11972          * with PERSISTENT RESERVE OUT, but we don't support that at the
11973          * moment anyway.  See SPC-2, section 5.6.  SPC-3 doesn't address
11974          * reservations made with the RESERVE/RELEASE commands, because
11975          * those commands are obsolete in SPC-3.
11976          */
11977         lun->flags &= ~CTL_LUN_RESERVED;
11978
11979 #ifdef CTL_WITH_CA
11980         for (i = 0; i < CTL_MAX_INITIATORS; i++)
11981                 ctl_clear_mask(lun->have_ca, i);
11982 #endif
11983         lun->prevent_count = 0;
11984         for (i = 0; i < CTL_MAX_INITIATORS; i++)
11985                 ctl_clear_mask(lun->prevent, i);
11986         mtx_unlock(&lun->lun_lock);
11987
11988         return (0);
11989 }
11990
11991 static int
11992 ctl_lun_reset(struct ctl_softc *softc, union ctl_io *io)
11993 {
11994         struct ctl_lun *lun;
11995         uint32_t targ_lun;
11996         int retval;
11997
11998         targ_lun = io->io_hdr.nexus.targ_mapped_lun;
11999         mtx_lock(&softc->ctl_lock);
12000         if ((targ_lun >= CTL_MAX_LUNS) ||
12001             (lun = softc->ctl_luns[targ_lun]) == NULL) {
12002                 mtx_unlock(&softc->ctl_lock);
12003                 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
12004                 return (1);
12005         }
12006         retval = ctl_do_lun_reset(lun, io, CTL_UA_LUN_RESET);
12007         mtx_unlock(&softc->ctl_lock);
12008         io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12009
12010         if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) {
12011                 union ctl_ha_msg msg_info;
12012
12013                 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
12014                 msg_info.hdr.nexus = io->io_hdr.nexus;
12015                 msg_info.task.task_action = CTL_TASK_LUN_RESET;
12016                 msg_info.hdr.original_sc = NULL;
12017                 msg_info.hdr.serializing_sc = NULL;
12018                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
12019                     sizeof(msg_info.task), M_WAITOK);
12020         }
12021         return (retval);
12022 }
12023
12024 static void
12025 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id,
12026     int other_sc)
12027 {
12028         union ctl_io *xio;
12029
12030         mtx_assert(&lun->lun_lock, MA_OWNED);
12031
12032         /*
12033          * Run through the OOA queue and attempt to find the given I/O.
12034          * The target port, initiator ID, tag type and tag number have to
12035          * match the values that we got from the initiator.  If we have an
12036          * untagged command to abort, simply abort the first untagged command
12037          * we come to.  We only allow one untagged command at a time of course.
12038          */
12039         for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
12040              xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
12041
12042                 if ((targ_port == UINT32_MAX ||
12043                      targ_port == xio->io_hdr.nexus.targ_port) &&
12044                     (init_id == UINT32_MAX ||
12045                      init_id == xio->io_hdr.nexus.initid)) {
12046                         if (targ_port != xio->io_hdr.nexus.targ_port ||
12047                             init_id != xio->io_hdr.nexus.initid)
12048                                 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS;
12049                         xio->io_hdr.flags |= CTL_FLAG_ABORT;
12050                         if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) {
12051                                 union ctl_ha_msg msg_info;
12052
12053                                 msg_info.hdr.nexus = xio->io_hdr.nexus;
12054                                 msg_info.task.task_action = CTL_TASK_ABORT_TASK;
12055                                 msg_info.task.tag_num = xio->scsiio.tag_num;
12056                                 msg_info.task.tag_type = xio->scsiio.tag_type;
12057                                 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
12058                                 msg_info.hdr.original_sc = NULL;
12059                                 msg_info.hdr.serializing_sc = NULL;
12060                                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
12061                                     sizeof(msg_info.task), M_NOWAIT);
12062                         }
12063                 }
12064         }
12065 }
12066
12067 static int
12068 ctl_abort_task_set(union ctl_io *io)
12069 {
12070         struct ctl_softc *softc = control_softc;
12071         struct ctl_lun *lun;
12072         uint32_t targ_lun;
12073
12074         /*
12075          * Look up the LUN.
12076          */
12077         targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12078         mtx_lock(&softc->ctl_lock);
12079         if ((targ_lun >= CTL_MAX_LUNS) ||
12080             (lun = softc->ctl_luns[targ_lun]) == NULL) {
12081                 mtx_unlock(&softc->ctl_lock);
12082                 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
12083                 return (1);
12084         }
12085
12086         mtx_lock(&lun->lun_lock);
12087         mtx_unlock(&softc->ctl_lock);
12088         if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) {
12089                 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port,
12090                     io->io_hdr.nexus.initid,
12091                     (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
12092         } else { /* CTL_TASK_CLEAR_TASK_SET */
12093                 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX,
12094                     (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
12095         }
12096         mtx_unlock(&lun->lun_lock);
12097         io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12098         return (0);
12099 }
12100
12101 static int
12102 ctl_i_t_nexus_reset(union ctl_io *io)
12103 {
12104         struct ctl_softc *softc = control_softc;
12105         struct ctl_lun *lun;
12106         uint32_t initidx;
12107
12108         if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
12109                 union ctl_ha_msg msg_info;
12110
12111                 msg_info.hdr.nexus = io->io_hdr.nexus;
12112                 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET;
12113                 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
12114                 msg_info.hdr.original_sc = NULL;
12115                 msg_info.hdr.serializing_sc = NULL;
12116                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
12117                     sizeof(msg_info.task), M_WAITOK);
12118         }
12119
12120         initidx = ctl_get_initindex(&io->io_hdr.nexus);
12121         mtx_lock(&softc->ctl_lock);
12122         STAILQ_FOREACH(lun, &softc->lun_list, links) {
12123                 mtx_lock(&lun->lun_lock);
12124                 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port,
12125                     io->io_hdr.nexus.initid, 1);
12126 #ifdef CTL_WITH_CA
12127                 ctl_clear_mask(lun->have_ca, initidx);
12128 #endif
12129                 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx))
12130                         lun->flags &= ~CTL_LUN_RESERVED;
12131                 if (ctl_is_set(lun->prevent, initidx)) {
12132                         ctl_clear_mask(lun->prevent, initidx);
12133                         lun->prevent_count--;
12134                 }
12135                 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS);
12136                 mtx_unlock(&lun->lun_lock);
12137         }
12138         mtx_unlock(&softc->ctl_lock);
12139         io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12140         return (0);
12141 }
12142
12143 static int
12144 ctl_abort_task(union ctl_io *io)
12145 {
12146         union ctl_io *xio;
12147         struct ctl_lun *lun;
12148         struct ctl_softc *softc;
12149 #if 0
12150         struct sbuf sb;
12151         char printbuf[128];
12152 #endif
12153         int found;
12154         uint32_t targ_lun;
12155
12156         softc = control_softc;
12157         found = 0;
12158
12159         /*
12160          * Look up the LUN.
12161          */
12162         targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12163         mtx_lock(&softc->ctl_lock);
12164         if ((targ_lun >= CTL_MAX_LUNS) ||
12165             (lun = softc->ctl_luns[targ_lun]) == NULL) {
12166                 mtx_unlock(&softc->ctl_lock);
12167                 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
12168                 return (1);
12169         }
12170
12171 #if 0
12172         printf("ctl_abort_task: called for lun %lld, tag %d type %d\n",
12173                lun->lun, io->taskio.tag_num, io->taskio.tag_type);
12174 #endif
12175
12176         mtx_lock(&lun->lun_lock);
12177         mtx_unlock(&softc->ctl_lock);
12178         /*
12179          * Run through the OOA queue and attempt to find the given I/O.
12180          * The target port, initiator ID, tag type and tag number have to
12181          * match the values that we got from the initiator.  If we have an
12182          * untagged command to abort, simply abort the first untagged command
12183          * we come to.  We only allow one untagged command at a time of course.
12184          */
12185         for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
12186              xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
12187 #if 0
12188                 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN);
12189
12190                 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ",
12191                             lun->lun, xio->scsiio.tag_num,
12192                             xio->scsiio.tag_type,
12193                             (xio->io_hdr.blocked_links.tqe_prev
12194                             == NULL) ? "" : " BLOCKED",
12195                             (xio->io_hdr.flags &
12196                             CTL_FLAG_DMA_INPROG) ? " DMA" : "",
12197                             (xio->io_hdr.flags &
12198                             CTL_FLAG_ABORT) ? " ABORT" : "",
12199                             (xio->io_hdr.flags &
12200                             CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : ""));
12201                 ctl_scsi_command_string(&xio->scsiio, NULL, &sb);
12202                 sbuf_finish(&sb);
12203                 printf("%s\n", sbuf_data(&sb));
12204 #endif
12205
12206                 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port)
12207                  || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid)
12208                  || (xio->io_hdr.flags & CTL_FLAG_ABORT))
12209                         continue;
12210
12211                 /*
12212                  * If the abort says that the task is untagged, the
12213                  * task in the queue must be untagged.  Otherwise,
12214                  * we just check to see whether the tag numbers
12215                  * match.  This is because the QLogic firmware
12216                  * doesn't pass back the tag type in an abort
12217                  * request.
12218                  */
12219 #if 0
12220                 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED)
12221                   && (io->taskio.tag_type == CTL_TAG_UNTAGGED))
12222                  || (xio->scsiio.tag_num == io->taskio.tag_num))
12223 #endif
12224                 /*
12225                  * XXX KDM we've got problems with FC, because it
12226                  * doesn't send down a tag type with aborts.  So we
12227                  * can only really go by the tag number...
12228                  * This may cause problems with parallel SCSI.
12229                  * Need to figure that out!!
12230                  */
12231                 if (xio->scsiio.tag_num == io->taskio.tag_num) {
12232                         xio->io_hdr.flags |= CTL_FLAG_ABORT;
12233                         found = 1;
12234                         if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 &&
12235                             !(lun->flags & CTL_LUN_PRIMARY_SC)) {
12236                                 union ctl_ha_msg msg_info;
12237
12238                                 msg_info.hdr.nexus = io->io_hdr.nexus;
12239                                 msg_info.task.task_action = CTL_TASK_ABORT_TASK;
12240                                 msg_info.task.tag_num = io->taskio.tag_num;
12241                                 msg_info.task.tag_type = io->taskio.tag_type;
12242                                 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
12243                                 msg_info.hdr.original_sc = NULL;
12244                                 msg_info.hdr.serializing_sc = NULL;
12245 #if 0
12246                                 printf("Sent Abort to other side\n");
12247 #endif
12248                                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
12249                                     sizeof(msg_info.task), M_NOWAIT);
12250                         }
12251 #if 0
12252                         printf("ctl_abort_task: found I/O to abort\n");
12253 #endif
12254                 }
12255         }
12256         mtx_unlock(&lun->lun_lock);
12257
12258         if (found == 0) {
12259                 /*
12260                  * This isn't really an error.  It's entirely possible for
12261                  * the abort and command completion to cross on the wire.
12262                  * This is more of an informative/diagnostic error.
12263                  */
12264 #if 0
12265                 printf("ctl_abort_task: ABORT sent for nonexistent I/O: "
12266                        "%u:%u:%u tag %d type %d\n",
12267                        io->io_hdr.nexus.initid,
12268                        io->io_hdr.nexus.targ_port,
12269                        io->io_hdr.nexus.targ_lun, io->taskio.tag_num,
12270                        io->taskio.tag_type);
12271 #endif
12272         }
12273         io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12274         return (0);
12275 }
12276
12277 static int
12278 ctl_query_task(union ctl_io *io, int task_set)
12279 {
12280         union ctl_io *xio;
12281         struct ctl_lun *lun;
12282         struct ctl_softc *softc;
12283         int found = 0;
12284         uint32_t targ_lun;
12285
12286         softc = control_softc;
12287         targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12288         mtx_lock(&softc->ctl_lock);
12289         if ((targ_lun >= CTL_MAX_LUNS) ||
12290             (lun = softc->ctl_luns[targ_lun]) == NULL) {
12291                 mtx_unlock(&softc->ctl_lock);
12292                 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
12293                 return (1);
12294         }
12295         mtx_lock(&lun->lun_lock);
12296         mtx_unlock(&softc->ctl_lock);
12297         for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
12298              xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
12299
12300                 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port)
12301                  || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid)
12302                  || (xio->io_hdr.flags & CTL_FLAG_ABORT))
12303                         continue;
12304
12305                 if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) {
12306                         found = 1;
12307                         break;
12308                 }
12309         }
12310         mtx_unlock(&lun->lun_lock);
12311         if (found)
12312                 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED;
12313         else
12314                 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12315         return (0);
12316 }
12317
12318 static int
12319 ctl_query_async_event(union ctl_io *io)
12320 {
12321         struct ctl_lun *lun;
12322         struct ctl_softc *softc;
12323         ctl_ua_type ua;
12324         uint32_t targ_lun, initidx;
12325
12326         softc = control_softc;
12327         targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12328         mtx_lock(&softc->ctl_lock);
12329         if ((targ_lun >= CTL_MAX_LUNS) ||
12330             (lun = softc->ctl_luns[targ_lun]) == NULL) {
12331                 mtx_unlock(&softc->ctl_lock);
12332                 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
12333                 return (1);
12334         }
12335         mtx_lock(&lun->lun_lock);
12336         mtx_unlock(&softc->ctl_lock);
12337         initidx = ctl_get_initindex(&io->io_hdr.nexus);
12338         ua = ctl_build_qae(lun, initidx, io->taskio.task_resp);
12339         mtx_unlock(&lun->lun_lock);
12340         if (ua != CTL_UA_NONE)
12341                 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED;
12342         else
12343                 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12344         return (0);
12345 }
12346
12347 static void
12348 ctl_run_task(union ctl_io *io)
12349 {
12350         struct ctl_softc *softc = control_softc;
12351         int retval = 1;
12352
12353         CTL_DEBUG_PRINT(("ctl_run_task\n"));
12354         KASSERT(io->io_hdr.io_type == CTL_IO_TASK,
12355             ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type));
12356         io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED;
12357         bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp));
12358         switch (io->taskio.task_action) {
12359         case CTL_TASK_ABORT_TASK:
12360                 retval = ctl_abort_task(io);
12361                 break;
12362         case CTL_TASK_ABORT_TASK_SET:
12363         case CTL_TASK_CLEAR_TASK_SET:
12364                 retval = ctl_abort_task_set(io);
12365                 break;
12366         case CTL_TASK_CLEAR_ACA:
12367                 break;
12368         case CTL_TASK_I_T_NEXUS_RESET:
12369                 retval = ctl_i_t_nexus_reset(io);
12370                 break;
12371         case CTL_TASK_LUN_RESET:
12372                 retval = ctl_lun_reset(softc, io);
12373                 break;
12374         case CTL_TASK_TARGET_RESET:
12375                 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET);
12376                 break;
12377         case CTL_TASK_BUS_RESET:
12378                 retval = ctl_bus_reset(softc, io);
12379                 break;
12380         case CTL_TASK_PORT_LOGIN:
12381                 break;
12382         case CTL_TASK_PORT_LOGOUT:
12383                 break;
12384         case CTL_TASK_QUERY_TASK:
12385                 retval = ctl_query_task(io, 0);
12386                 break;
12387         case CTL_TASK_QUERY_TASK_SET:
12388                 retval = ctl_query_task(io, 1);
12389                 break;
12390         case CTL_TASK_QUERY_ASYNC_EVENT:
12391                 retval = ctl_query_async_event(io);
12392                 break;
12393         default:
12394                 printf("%s: got unknown task management event %d\n",
12395                        __func__, io->taskio.task_action);
12396                 break;
12397         }
12398         if (retval == 0)
12399                 io->io_hdr.status = CTL_SUCCESS;
12400         else
12401                 io->io_hdr.status = CTL_ERROR;
12402         ctl_done(io);
12403 }
12404
12405 /*
12406  * For HA operation.  Handle commands that come in from the other
12407  * controller.
12408  */
12409 static void
12410 ctl_handle_isc(union ctl_io *io)
12411 {
12412         int free_io;
12413         struct ctl_lun *lun;
12414         struct ctl_softc *softc = control_softc;
12415         uint32_t targ_lun;
12416
12417         targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12418         lun = softc->ctl_luns[targ_lun];
12419
12420         switch (io->io_hdr.msg_type) {
12421         case CTL_MSG_SERIALIZE:
12422                 free_io = ctl_serialize_other_sc_cmd(&io->scsiio);
12423                 break;
12424         case CTL_MSG_R2R: {
12425                 const struct ctl_cmd_entry *entry;
12426
12427                 /*
12428                  * This is only used in SER_ONLY mode.
12429                  */
12430                 free_io = 0;
12431                 entry = ctl_get_cmd_entry(&io->scsiio, NULL);
12432                 mtx_lock(&lun->lun_lock);
12433                 if (ctl_scsiio_lun_check(lun,
12434                     entry, (struct ctl_scsiio *)io) != 0) {
12435                         mtx_unlock(&lun->lun_lock);
12436                         ctl_done(io);
12437                         break;
12438                 }
12439                 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
12440                 mtx_unlock(&lun->lun_lock);
12441                 ctl_enqueue_rtr(io);
12442                 break;
12443         }
12444         case CTL_MSG_FINISH_IO:
12445                 if (softc->ha_mode == CTL_HA_MODE_XFER) {
12446                         free_io = 0;
12447                         ctl_done(io);
12448                 } else {
12449                         free_io = 1;
12450                         mtx_lock(&lun->lun_lock);
12451                         TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr,
12452                                      ooa_links);
12453                         ctl_check_blocked(lun);
12454                         mtx_unlock(&lun->lun_lock);
12455                 }
12456                 break;
12457         case CTL_MSG_PERS_ACTION:
12458                 ctl_hndl_per_res_out_on_other_sc(
12459                         (union ctl_ha_msg *)&io->presio.pr_msg);
12460                 free_io = 1;
12461                 break;
12462         case CTL_MSG_BAD_JUJU:
12463                 free_io = 0;
12464                 ctl_done(io);
12465                 break;
12466         case CTL_MSG_DATAMOVE:
12467                 /* Only used in XFER mode */
12468                 free_io = 0;
12469                 ctl_datamove_remote(io);
12470                 break;
12471         case CTL_MSG_DATAMOVE_DONE:
12472                 /* Only used in XFER mode */
12473                 free_io = 0;
12474                 io->scsiio.be_move_done(io);
12475                 break;
12476         case CTL_MSG_FAILOVER:
12477                 ctl_failover_lun(io);
12478                 free_io = 1;
12479                 break;
12480         default:
12481                 free_io = 1;
12482                 printf("%s: Invalid message type %d\n",
12483                        __func__, io->io_hdr.msg_type);
12484                 break;
12485         }
12486         if (free_io)
12487                 ctl_free_io(io);
12488
12489 }
12490
12491
12492 /*
12493  * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if
12494  * there is no match.
12495  */
12496 static ctl_lun_error_pattern
12497 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc)
12498 {
12499         const struct ctl_cmd_entry *entry;
12500         ctl_lun_error_pattern filtered_pattern, pattern;
12501
12502         pattern = desc->error_pattern;
12503
12504         /*
12505          * XXX KDM we need more data passed into this function to match a
12506          * custom pattern, and we actually need to implement custom pattern
12507          * matching.
12508          */
12509         if (pattern & CTL_LUN_PAT_CMD)
12510                 return (CTL_LUN_PAT_CMD);
12511
12512         if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY)
12513                 return (CTL_LUN_PAT_ANY);
12514
12515         entry = ctl_get_cmd_entry(ctsio, NULL);
12516
12517         filtered_pattern = entry->pattern & pattern;
12518
12519         /*
12520          * If the user requested specific flags in the pattern (e.g.
12521          * CTL_LUN_PAT_RANGE), make sure the command supports all of those
12522          * flags.
12523          *
12524          * If the user did not specify any flags, it doesn't matter whether
12525          * or not the command supports the flags.
12526          */
12527         if ((filtered_pattern & ~CTL_LUN_PAT_MASK) !=
12528              (pattern & ~CTL_LUN_PAT_MASK))
12529                 return (CTL_LUN_PAT_NONE);
12530
12531         /*
12532          * If the user asked for a range check, see if the requested LBA
12533          * range overlaps with this command's LBA range.
12534          */
12535         if (filtered_pattern & CTL_LUN_PAT_RANGE) {
12536                 uint64_t lba1;
12537                 uint64_t len1;
12538                 ctl_action action;
12539                 int retval;
12540
12541                 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1);
12542                 if (retval != 0)
12543                         return (CTL_LUN_PAT_NONE);
12544
12545                 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba,
12546                                               desc->lba_range.len, FALSE);
12547                 /*
12548                  * A "pass" means that the LBA ranges don't overlap, so
12549                  * this doesn't match the user's range criteria.
12550                  */
12551                 if (action == CTL_ACTION_PASS)
12552                         return (CTL_LUN_PAT_NONE);
12553         }
12554
12555         return (filtered_pattern);
12556 }
12557
12558 static void
12559 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io)
12560 {
12561         struct ctl_error_desc *desc, *desc2;
12562
12563         mtx_assert(&lun->lun_lock, MA_OWNED);
12564
12565         STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
12566                 ctl_lun_error_pattern pattern;
12567                 /*
12568                  * Check to see whether this particular command matches
12569                  * the pattern in the descriptor.
12570                  */
12571                 pattern = ctl_cmd_pattern_match(&io->scsiio, desc);
12572                 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE)
12573                         continue;
12574
12575                 switch (desc->lun_error & CTL_LUN_INJ_TYPE) {
12576                 case CTL_LUN_INJ_ABORTED:
12577                         ctl_set_aborted(&io->scsiio);
12578                         break;
12579                 case CTL_LUN_INJ_MEDIUM_ERR:
12580                         ctl_set_medium_error(&io->scsiio,
12581                             (io->io_hdr.flags & CTL_FLAG_DATA_MASK) !=
12582                              CTL_FLAG_DATA_OUT);
12583                         break;
12584                 case CTL_LUN_INJ_UA:
12585                         /* 29h/00h  POWER ON, RESET, OR BUS DEVICE RESET
12586                          * OCCURRED */
12587                         ctl_set_ua(&io->scsiio, 0x29, 0x00);
12588                         break;
12589                 case CTL_LUN_INJ_CUSTOM:
12590                         /*
12591                          * We're assuming the user knows what he is doing.
12592                          * Just copy the sense information without doing
12593                          * checks.
12594                          */
12595                         bcopy(&desc->custom_sense, &io->scsiio.sense_data,
12596                               MIN(sizeof(desc->custom_sense),
12597                                   sizeof(io->scsiio.sense_data)));
12598                         io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND;
12599                         io->scsiio.sense_len = SSD_FULL_SIZE;
12600                         io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
12601                         break;
12602                 case CTL_LUN_INJ_NONE:
12603                 default:
12604                         /*
12605                          * If this is an error injection type we don't know
12606                          * about, clear the continuous flag (if it is set)
12607                          * so it will get deleted below.
12608                          */
12609                         desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS;
12610                         break;
12611                 }
12612                 /*
12613                  * By default, each error injection action is a one-shot
12614                  */
12615                 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS)
12616                         continue;
12617
12618                 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links);
12619
12620                 free(desc, M_CTL);
12621         }
12622 }
12623
12624 #ifdef CTL_IO_DELAY
12625 static void
12626 ctl_datamove_timer_wakeup(void *arg)
12627 {
12628         union ctl_io *io;
12629
12630         io = (union ctl_io *)arg;
12631
12632         ctl_datamove(io);
12633 }
12634 #endif /* CTL_IO_DELAY */
12635
12636 void
12637 ctl_datamove(union ctl_io *io)
12638 {
12639         struct ctl_lun *lun;
12640         void (*fe_datamove)(union ctl_io *io);
12641
12642         mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED);
12643
12644         CTL_DEBUG_PRINT(("ctl_datamove\n"));
12645
12646         lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
12647 #ifdef CTL_TIME_IO
12648         if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
12649                 char str[256];
12650                 char path_str[64];
12651                 struct sbuf sb;
12652
12653                 ctl_scsi_path_string(io, path_str, sizeof(path_str));
12654                 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
12655
12656                 sbuf_cat(&sb, path_str);
12657                 switch (io->io_hdr.io_type) {
12658                 case CTL_IO_SCSI:
12659                         ctl_scsi_command_string(&io->scsiio, NULL, &sb);
12660                         sbuf_printf(&sb, "\n");
12661                         sbuf_cat(&sb, path_str);
12662                         sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
12663                                     io->scsiio.tag_num, io->scsiio.tag_type);
12664                         break;
12665                 case CTL_IO_TASK:
12666                         sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, "
12667                                     "Tag Type: %d\n", io->taskio.task_action,
12668                                     io->taskio.tag_num, io->taskio.tag_type);
12669                         break;
12670                 default:
12671                         panic("%s: Invalid CTL I/O type %d\n",
12672                             __func__, io->io_hdr.io_type);
12673                 }
12674                 sbuf_cat(&sb, path_str);
12675                 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n",
12676                             (intmax_t)time_uptime - io->io_hdr.start_time);
12677                 sbuf_finish(&sb);
12678                 printf("%s", sbuf_data(&sb));
12679         }
12680 #endif /* CTL_TIME_IO */
12681
12682 #ifdef CTL_IO_DELAY
12683         if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
12684                 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
12685         } else {
12686                 if ((lun != NULL)
12687                  && (lun->delay_info.datamove_delay > 0)) {
12688
12689                         callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1);
12690                         io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
12691                         callout_reset(&io->io_hdr.delay_callout,
12692                                       lun->delay_info.datamove_delay * hz,
12693                                       ctl_datamove_timer_wakeup, io);
12694                         if (lun->delay_info.datamove_type ==
12695                             CTL_DELAY_TYPE_ONESHOT)
12696                                 lun->delay_info.datamove_delay = 0;
12697                         return;
12698                 }
12699         }
12700 #endif
12701
12702         /*
12703          * This command has been aborted.  Set the port status, so we fail
12704          * the data move.
12705          */
12706         if (io->io_hdr.flags & CTL_FLAG_ABORT) {
12707                 printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n",
12708                        io->scsiio.tag_num, io->io_hdr.nexus.initid,
12709                        io->io_hdr.nexus.targ_port,
12710                        io->io_hdr.nexus.targ_lun);
12711                 io->io_hdr.port_status = 31337;
12712                 /*
12713                  * Note that the backend, in this case, will get the
12714                  * callback in its context.  In other cases it may get
12715                  * called in the frontend's interrupt thread context.
12716                  */
12717                 io->scsiio.be_move_done(io);
12718                 return;
12719         }
12720
12721         /* Don't confuse frontend with zero length data move. */
12722         if (io->scsiio.kern_data_len == 0) {
12723                 io->scsiio.be_move_done(io);
12724                 return;
12725         }
12726
12727         fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
12728         fe_datamove(io);
12729 }
12730
12731 static void
12732 ctl_send_datamove_done(union ctl_io *io, int have_lock)
12733 {
12734         union ctl_ha_msg msg;
12735 #ifdef CTL_TIME_IO
12736         struct bintime cur_bt;
12737 #endif
12738
12739         memset(&msg, 0, sizeof(msg));
12740         msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
12741         msg.hdr.original_sc = io;
12742         msg.hdr.serializing_sc = io->io_hdr.serializing_sc;
12743         msg.hdr.nexus = io->io_hdr.nexus;
12744         msg.hdr.status = io->io_hdr.status;
12745         msg.scsi.tag_num = io->scsiio.tag_num;
12746         msg.scsi.tag_type = io->scsiio.tag_type;
12747         msg.scsi.scsi_status = io->scsiio.scsi_status;
12748         memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
12749                io->scsiio.sense_len);
12750         msg.scsi.sense_len = io->scsiio.sense_len;
12751         msg.scsi.sense_residual = io->scsiio.sense_residual;
12752         msg.scsi.fetd_status = io->io_hdr.port_status;
12753         msg.scsi.residual = io->scsiio.residual;
12754         io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
12755         if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
12756                 ctl_failover_io(io, /*have_lock*/ have_lock);
12757                 return;
12758         }
12759         ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
12760             sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) +
12761             msg.scsi.sense_len, M_WAITOK);
12762
12763 #ifdef CTL_TIME_IO
12764         getbinuptime(&cur_bt);
12765         bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
12766         bintime_add(&io->io_hdr.dma_bt, &cur_bt);
12767 #endif
12768         io->io_hdr.num_dmas++;
12769 }
12770
12771 /*
12772  * The DMA to the remote side is done, now we need to tell the other side
12773  * we're done so it can continue with its data movement.
12774  */
12775 static void
12776 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq)
12777 {
12778         union ctl_io *io;
12779         int i;
12780
12781         io = rq->context;
12782
12783         if (rq->ret != CTL_HA_STATUS_SUCCESS) {
12784                 printf("%s: ISC DMA write failed with error %d", __func__,
12785                        rq->ret);
12786                 ctl_set_internal_failure(&io->scsiio,
12787                                          /*sks_valid*/ 1,
12788                                          /*retry_count*/ rq->ret);
12789         }
12790
12791         ctl_dt_req_free(rq);
12792
12793         for (i = 0; i < io->scsiio.kern_sg_entries; i++)
12794                 free(io->io_hdr.local_sglist[i].addr, M_CTL);
12795         free(io->io_hdr.remote_sglist, M_CTL);
12796         io->io_hdr.remote_sglist = NULL;
12797         io->io_hdr.local_sglist = NULL;
12798
12799         /*
12800          * The data is in local and remote memory, so now we need to send
12801          * status (good or back) back to the other side.
12802          */
12803         ctl_send_datamove_done(io, /*have_lock*/ 0);
12804 }
12805
12806 /*
12807  * We've moved the data from the host/controller into local memory.  Now we
12808  * need to push it over to the remote controller's memory.
12809  */
12810 static int
12811 ctl_datamove_remote_dm_write_cb(union ctl_io *io)
12812 {
12813         int retval;
12814
12815         retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE,
12816                                           ctl_datamove_remote_write_cb);
12817         return (retval);
12818 }
12819
12820 static void
12821 ctl_datamove_remote_write(union ctl_io *io)
12822 {
12823         int retval;
12824         void (*fe_datamove)(union ctl_io *io);
12825
12826         /*
12827          * - Get the data from the host/HBA into local memory.
12828          * - DMA memory from the local controller to the remote controller.
12829          * - Send status back to the remote controller.
12830          */
12831
12832         retval = ctl_datamove_remote_sgl_setup(io);
12833         if (retval != 0)
12834                 return;
12835
12836         /* Switch the pointer over so the FETD knows what to do */
12837         io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist;
12838
12839         /*
12840          * Use a custom move done callback, since we need to send completion
12841          * back to the other controller, not to the backend on this side.
12842          */
12843         io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb;
12844
12845         fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
12846         fe_datamove(io);
12847 }
12848
12849 static int
12850 ctl_datamove_remote_dm_read_cb(union ctl_io *io)
12851 {
12852 #if 0
12853         char str[256];
12854         char path_str[64];
12855         struct sbuf sb;
12856 #endif
12857         int i;
12858
12859         for (i = 0; i < io->scsiio.kern_sg_entries; i++)
12860                 free(io->io_hdr.local_sglist[i].addr, M_CTL);
12861         free(io->io_hdr.remote_sglist, M_CTL);
12862         io->io_hdr.remote_sglist = NULL;
12863         io->io_hdr.local_sglist = NULL;
12864
12865 #if 0
12866         scsi_path_string(io, path_str, sizeof(path_str));
12867         sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
12868         sbuf_cat(&sb, path_str);
12869         scsi_command_string(&io->scsiio, NULL, &sb);
12870         sbuf_printf(&sb, "\n");
12871         sbuf_cat(&sb, path_str);
12872         sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
12873                     io->scsiio.tag_num, io->scsiio.tag_type);
12874         sbuf_cat(&sb, path_str);
12875         sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__,
12876                     io->io_hdr.flags, io->io_hdr.status);
12877         sbuf_finish(&sb);
12878         printk("%s", sbuf_data(&sb));
12879 #endif
12880
12881
12882         /*
12883          * The read is done, now we need to send status (good or bad) back
12884          * to the other side.
12885          */
12886         ctl_send_datamove_done(io, /*have_lock*/ 0);
12887
12888         return (0);
12889 }
12890
12891 static void
12892 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq)
12893 {
12894         union ctl_io *io;
12895         void (*fe_datamove)(union ctl_io *io);
12896
12897         io = rq->context;
12898
12899         if (rq->ret != CTL_HA_STATUS_SUCCESS) {
12900                 printf("%s: ISC DMA read failed with error %d\n", __func__,
12901                        rq->ret);
12902                 ctl_set_internal_failure(&io->scsiio,
12903                                          /*sks_valid*/ 1,
12904                                          /*retry_count*/ rq->ret);
12905         }
12906
12907         ctl_dt_req_free(rq);
12908
12909         /* Switch the pointer over so the FETD knows what to do */
12910         io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist;
12911
12912         /*
12913          * Use a custom move done callback, since we need to send completion
12914          * back to the other controller, not to the backend on this side.
12915          */
12916         io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb;
12917
12918         /* XXX KDM add checks like the ones in ctl_datamove? */
12919
12920         fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove;
12921         fe_datamove(io);
12922 }
12923
12924 static int
12925 ctl_datamove_remote_sgl_setup(union ctl_io *io)
12926 {
12927         struct ctl_sg_entry *local_sglist;
12928         uint32_t len_to_go;
12929         int retval;
12930         int i;
12931
12932         retval = 0;
12933         local_sglist = io->io_hdr.local_sglist;
12934         len_to_go = io->scsiio.kern_data_len;
12935
12936         /*
12937          * The difficult thing here is that the size of the various
12938          * S/G segments may be different than the size from the
12939          * remote controller.  That'll make it harder when DMAing
12940          * the data back to the other side.
12941          */
12942         for (i = 0; len_to_go > 0; i++) {
12943                 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT);
12944                 local_sglist[i].addr =
12945                     malloc(local_sglist[i].len, M_CTL, M_WAITOK);
12946
12947                 len_to_go -= local_sglist[i].len;
12948         }
12949         /*
12950          * Reset the number of S/G entries accordingly.  The original
12951          * number of S/G entries is available in rem_sg_entries.
12952          */
12953         io->scsiio.kern_sg_entries = i;
12954
12955 #if 0
12956         printf("%s: kern_sg_entries = %d\n", __func__,
12957                io->scsiio.kern_sg_entries);
12958         for (i = 0; i < io->scsiio.kern_sg_entries; i++)
12959                 printf("%s: sg[%d] = %p, %lu\n", __func__, i,
12960                        local_sglist[i].addr, local_sglist[i].len);
12961 #endif
12962
12963         return (retval);
12964 }
12965
12966 static int
12967 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
12968                          ctl_ha_dt_cb callback)
12969 {
12970         struct ctl_ha_dt_req *rq;
12971         struct ctl_sg_entry *remote_sglist, *local_sglist;
12972         uint32_t local_used, remote_used, total_used;
12973         int i, j, isc_ret;
12974
12975         rq = ctl_dt_req_alloc();
12976
12977         /*
12978          * If we failed to allocate the request, and if the DMA didn't fail
12979          * anyway, set busy status.  This is just a resource allocation
12980          * failure.
12981          */
12982         if ((rq == NULL)
12983          && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
12984              (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS))
12985                 ctl_set_busy(&io->scsiio);
12986
12987         if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
12988             (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
12989
12990                 if (rq != NULL)
12991                         ctl_dt_req_free(rq);
12992
12993                 /*
12994                  * The data move failed.  We need to return status back
12995                  * to the other controller.  No point in trying to DMA
12996                  * data to the remote controller.
12997                  */
12998
12999                 ctl_send_datamove_done(io, /*have_lock*/ 0);
13000
13001                 return (1);
13002         }
13003
13004         local_sglist = io->io_hdr.local_sglist;
13005         remote_sglist = io->io_hdr.remote_sglist;
13006         local_used = 0;
13007         remote_used = 0;
13008         total_used = 0;
13009
13010         /*
13011          * Pull/push the data over the wire from/to the other controller.
13012          * This takes into account the possibility that the local and
13013          * remote sglists may not be identical in terms of the size of
13014          * the elements and the number of elements.
13015          *
13016          * One fundamental assumption here is that the length allocated for
13017          * both the local and remote sglists is identical.  Otherwise, we've
13018          * essentially got a coding error of some sort.
13019          */
13020         isc_ret = CTL_HA_STATUS_SUCCESS;
13021         for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) {
13022                 uint32_t cur_len;
13023                 uint8_t *tmp_ptr;
13024
13025                 rq->command = command;
13026                 rq->context = io;
13027
13028                 /*
13029                  * Both pointers should be aligned.  But it is possible
13030                  * that the allocation length is not.  They should both
13031                  * also have enough slack left over at the end, though,
13032                  * to round up to the next 8 byte boundary.
13033                  */
13034                 cur_len = MIN(local_sglist[i].len - local_used,
13035                               remote_sglist[j].len - remote_used);
13036                 rq->size = cur_len;
13037
13038                 tmp_ptr = (uint8_t *)local_sglist[i].addr;
13039                 tmp_ptr += local_used;
13040
13041 #if 0
13042                 /* Use physical addresses when talking to ISC hardware */
13043                 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) {
13044                         /* XXX KDM use busdma */
13045                         rq->local = vtophys(tmp_ptr);
13046                 } else
13047                         rq->local = tmp_ptr;
13048 #else
13049                 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0,
13050                     ("HA does not support BUS_ADDR"));
13051                 rq->local = tmp_ptr;
13052 #endif
13053
13054                 tmp_ptr = (uint8_t *)remote_sglist[j].addr;
13055                 tmp_ptr += remote_used;
13056                 rq->remote = tmp_ptr;
13057
13058                 rq->callback = NULL;
13059
13060                 local_used += cur_len;
13061                 if (local_used >= local_sglist[i].len) {
13062                         i++;
13063                         local_used = 0;
13064                 }
13065
13066                 remote_used += cur_len;
13067                 if (remote_used >= remote_sglist[j].len) {
13068                         j++;
13069                         remote_used = 0;
13070                 }
13071                 total_used += cur_len;
13072
13073                 if (total_used >= io->scsiio.kern_data_len)
13074                         rq->callback = callback;
13075
13076 #if 0
13077                 printf("%s: %s: local %p remote %p size %d\n", __func__,
13078                        (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ",
13079                        rq->local, rq->remote, rq->size);
13080 #endif
13081
13082                 isc_ret = ctl_dt_single(rq);
13083                 if (isc_ret > CTL_HA_STATUS_SUCCESS)
13084                         break;
13085         }
13086         if (isc_ret != CTL_HA_STATUS_WAIT) {
13087                 rq->ret = isc_ret;
13088                 callback(rq);
13089         }
13090
13091         return (0);
13092 }
13093
13094 static void
13095 ctl_datamove_remote_read(union ctl_io *io)
13096 {
13097         int retval;
13098         int i;
13099
13100         /*
13101          * This will send an error to the other controller in the case of a
13102          * failure.
13103          */
13104         retval = ctl_datamove_remote_sgl_setup(io);
13105         if (retval != 0)
13106                 return;
13107
13108         retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ,
13109                                           ctl_datamove_remote_read_cb);
13110         if (retval != 0) {
13111                 /*
13112                  * Make sure we free memory if there was an error..  The
13113                  * ctl_datamove_remote_xfer() function will send the
13114                  * datamove done message, or call the callback with an
13115                  * error if there is a problem.
13116                  */
13117                 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
13118                         free(io->io_hdr.local_sglist[i].addr, M_CTL);
13119                 free(io->io_hdr.remote_sglist, M_CTL);
13120                 io->io_hdr.remote_sglist = NULL;
13121                 io->io_hdr.local_sglist = NULL;
13122         }
13123 }
13124
13125 /*
13126  * Process a datamove request from the other controller.  This is used for
13127  * XFER mode only, not SER_ONLY mode.  For writes, we DMA into local memory
13128  * first.  Once that is complete, the data gets DMAed into the remote
13129  * controller's memory.  For reads, we DMA from the remote controller's
13130  * memory into our memory first, and then move it out to the FETD.
13131  */
13132 static void
13133 ctl_datamove_remote(union ctl_io *io)
13134 {
13135
13136         mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED);
13137
13138         if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
13139                 ctl_failover_io(io, /*have_lock*/ 0);
13140                 return;
13141         }
13142
13143         /*
13144          * Note that we look for an aborted I/O here, but don't do some of
13145          * the other checks that ctl_datamove() normally does.
13146          * We don't need to run the datamove delay code, since that should
13147          * have been done if need be on the other controller.
13148          */
13149         if (io->io_hdr.flags & CTL_FLAG_ABORT) {
13150                 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__,
13151                        io->scsiio.tag_num, io->io_hdr.nexus.initid,
13152                        io->io_hdr.nexus.targ_port,
13153                        io->io_hdr.nexus.targ_lun);
13154                 io->io_hdr.port_status = 31338;
13155                 ctl_send_datamove_done(io, /*have_lock*/ 0);
13156                 return;
13157         }
13158
13159         if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT)
13160                 ctl_datamove_remote_write(io);
13161         else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN)
13162                 ctl_datamove_remote_read(io);
13163         else {
13164                 io->io_hdr.port_status = 31339;
13165                 ctl_send_datamove_done(io, /*have_lock*/ 0);
13166         }
13167 }
13168
13169 static void
13170 ctl_process_done(union ctl_io *io)
13171 {
13172         struct ctl_lun *lun;
13173         struct ctl_softc *softc = control_softc;
13174         void (*fe_done)(union ctl_io *io);
13175         union ctl_ha_msg msg;
13176         uint32_t targ_port = io->io_hdr.nexus.targ_port;
13177
13178         CTL_DEBUG_PRINT(("ctl_process_done\n"));
13179         fe_done = softc->ctl_ports[targ_port]->fe_done;
13180
13181 #ifdef CTL_TIME_IO
13182         if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
13183                 char str[256];
13184                 char path_str[64];
13185                 struct sbuf sb;
13186
13187                 ctl_scsi_path_string(io, path_str, sizeof(path_str));
13188                 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
13189
13190                 sbuf_cat(&sb, path_str);
13191                 switch (io->io_hdr.io_type) {
13192                 case CTL_IO_SCSI:
13193                         ctl_scsi_command_string(&io->scsiio, NULL, &sb);
13194                         sbuf_printf(&sb, "\n");
13195                         sbuf_cat(&sb, path_str);
13196                         sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
13197                                     io->scsiio.tag_num, io->scsiio.tag_type);
13198                         break;
13199                 case CTL_IO_TASK:
13200                         sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, "
13201                                     "Tag Type: %d\n", io->taskio.task_action,
13202                                     io->taskio.tag_num, io->taskio.tag_type);
13203                         break;
13204                 default:
13205                         panic("%s: Invalid CTL I/O type %d\n",
13206                             __func__, io->io_hdr.io_type);
13207                 }
13208                 sbuf_cat(&sb, path_str);
13209                 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n",
13210                             (intmax_t)time_uptime - io->io_hdr.start_time);
13211                 sbuf_finish(&sb);
13212                 printf("%s", sbuf_data(&sb));
13213         }
13214 #endif /* CTL_TIME_IO */
13215
13216         switch (io->io_hdr.io_type) {
13217         case CTL_IO_SCSI:
13218                 break;
13219         case CTL_IO_TASK:
13220                 if (ctl_debug & CTL_DEBUG_INFO)
13221                         ctl_io_error_print(io, NULL);
13222                 fe_done(io);
13223                 return;
13224         default:
13225                 panic("%s: Invalid CTL I/O type %d\n",
13226                     __func__, io->io_hdr.io_type);
13227         }
13228
13229         lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
13230         if (lun == NULL) {
13231                 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n",
13232                                  io->io_hdr.nexus.targ_mapped_lun));
13233                 goto bailout;
13234         }
13235
13236         mtx_lock(&lun->lun_lock);
13237
13238         /*
13239          * Check to see if we have any errors to inject here.  We only
13240          * inject errors for commands that don't already have errors set.
13241          */
13242         if (!STAILQ_EMPTY(&lun->error_list) &&
13243             ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) &&
13244             ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0))
13245                 ctl_inject_error(lun, io);
13246
13247         /*
13248          * XXX KDM how do we treat commands that aren't completed
13249          * successfully?
13250          *
13251          * XXX KDM should we also track I/O latency?
13252          */
13253         if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS &&
13254             io->io_hdr.io_type == CTL_IO_SCSI) {
13255 #ifdef CTL_TIME_IO
13256                 struct bintime cur_bt;
13257 #endif
13258                 int type;
13259
13260                 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
13261                     CTL_FLAG_DATA_IN)
13262                         type = CTL_STATS_READ;
13263                 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
13264                     CTL_FLAG_DATA_OUT)
13265                         type = CTL_STATS_WRITE;
13266                 else
13267                         type = CTL_STATS_NO_IO;
13268
13269                 lun->stats.ports[targ_port].bytes[type] +=
13270                     io->scsiio.kern_total_len;
13271                 lun->stats.ports[targ_port].operations[type]++;
13272 #ifdef CTL_TIME_IO
13273                 bintime_add(&lun->stats.ports[targ_port].dma_time[type],
13274                    &io->io_hdr.dma_bt);
13275                 getbinuptime(&cur_bt);
13276                 bintime_sub(&cur_bt, &io->io_hdr.start_bt);
13277                 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt);
13278 #endif
13279                 lun->stats.ports[targ_port].num_dmas[type] +=
13280                     io->io_hdr.num_dmas;
13281         }
13282
13283         /*
13284          * Remove this from the OOA queue.
13285          */
13286         TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
13287 #ifdef CTL_TIME_IO
13288         if (TAILQ_EMPTY(&lun->ooa_queue))
13289                 lun->last_busy = getsbinuptime();
13290 #endif
13291
13292         /*
13293          * Run through the blocked queue on this LUN and see if anything
13294          * has become unblocked, now that this transaction is done.
13295          */
13296         ctl_check_blocked(lun);
13297
13298         /*
13299          * If the LUN has been invalidated, free it if there is nothing
13300          * left on its OOA queue.
13301          */
13302         if ((lun->flags & CTL_LUN_INVALID)
13303          && TAILQ_EMPTY(&lun->ooa_queue)) {
13304                 mtx_unlock(&lun->lun_lock);
13305                 mtx_lock(&softc->ctl_lock);
13306                 ctl_free_lun(lun);
13307                 mtx_unlock(&softc->ctl_lock);
13308         } else
13309                 mtx_unlock(&lun->lun_lock);
13310
13311 bailout:
13312
13313         /*
13314          * If this command has been aborted, make sure we set the status
13315          * properly.  The FETD is responsible for freeing the I/O and doing
13316          * whatever it needs to do to clean up its state.
13317          */
13318         if (io->io_hdr.flags & CTL_FLAG_ABORT)
13319                 ctl_set_task_aborted(&io->scsiio);
13320
13321         /*
13322          * If enabled, print command error status.
13323          */
13324         if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS &&
13325             (ctl_debug & CTL_DEBUG_INFO) != 0)
13326                 ctl_io_error_print(io, NULL);
13327
13328         /*
13329          * Tell the FETD or the other shelf controller we're done with this
13330          * command.  Note that only SCSI commands get to this point.  Task
13331          * management commands are completed above.
13332          */
13333         if ((softc->ha_mode != CTL_HA_MODE_XFER) &&
13334             (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) {
13335                 memset(&msg, 0, sizeof(msg));
13336                 msg.hdr.msg_type = CTL_MSG_FINISH_IO;
13337                 msg.hdr.serializing_sc = io->io_hdr.serializing_sc;
13338                 msg.hdr.nexus = io->io_hdr.nexus;
13339                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
13340                     sizeof(msg.scsi) - sizeof(msg.scsi.sense_data),
13341                     M_WAITOK);
13342         }
13343
13344         fe_done(io);
13345 }
13346
13347 #ifdef CTL_WITH_CA
13348 /*
13349  * Front end should call this if it doesn't do autosense.  When the request
13350  * sense comes back in from the initiator, we'll dequeue this and send it.
13351  */
13352 int
13353 ctl_queue_sense(union ctl_io *io)
13354 {
13355         struct ctl_lun *lun;
13356         struct ctl_port *port;
13357         struct ctl_softc *softc;
13358         uint32_t initidx, targ_lun;
13359
13360         softc = control_softc;
13361
13362         CTL_DEBUG_PRINT(("ctl_queue_sense\n"));
13363
13364         /*
13365          * LUN lookup will likely move to the ctl_work_thread() once we
13366          * have our new queueing infrastructure (that doesn't put things on
13367          * a per-LUN queue initially).  That is so that we can handle
13368          * things like an INQUIRY to a LUN that we don't have enabled.  We
13369          * can't deal with that right now.
13370          */
13371         mtx_lock(&softc->ctl_lock);
13372
13373         /*
13374          * If we don't have a LUN for this, just toss the sense
13375          * information.
13376          */
13377         port = ctl_io_port(&ctsio->io_hdr);
13378         targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
13379         if ((targ_lun < CTL_MAX_LUNS)
13380          && (softc->ctl_luns[targ_lun] != NULL))
13381                 lun = softc->ctl_luns[targ_lun];
13382         else
13383                 goto bailout;
13384
13385         initidx = ctl_get_initindex(&io->io_hdr.nexus);
13386
13387         mtx_lock(&lun->lun_lock);
13388         /*
13389          * Already have CA set for this LUN...toss the sense information.
13390          */
13391         if (ctl_is_set(lun->have_ca, initidx)) {
13392                 mtx_unlock(&lun->lun_lock);
13393                 goto bailout;
13394         }
13395
13396         memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data,
13397                MIN(sizeof(lun->pending_sense[initidx]),
13398                sizeof(io->scsiio.sense_data)));
13399         ctl_set_mask(lun->have_ca, initidx);
13400         mtx_unlock(&lun->lun_lock);
13401
13402 bailout:
13403         mtx_unlock(&softc->ctl_lock);
13404
13405         ctl_free_io(io);
13406
13407         return (CTL_RETVAL_COMPLETE);
13408 }
13409 #endif
13410
13411 /*
13412  * Primary command inlet from frontend ports.  All SCSI and task I/O
13413  * requests must go through this function.
13414  */
13415 int
13416 ctl_queue(union ctl_io *io)
13417 {
13418         struct ctl_port *port;
13419
13420         CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0]));
13421
13422 #ifdef CTL_TIME_IO
13423         io->io_hdr.start_time = time_uptime;
13424         getbinuptime(&io->io_hdr.start_bt);
13425 #endif /* CTL_TIME_IO */
13426
13427         /* Map FE-specific LUN ID into global one. */
13428         port = ctl_io_port(&io->io_hdr);
13429         io->io_hdr.nexus.targ_mapped_lun =
13430             ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
13431
13432         switch (io->io_hdr.io_type) {
13433         case CTL_IO_SCSI:
13434         case CTL_IO_TASK:
13435                 if (ctl_debug & CTL_DEBUG_CDB)
13436                         ctl_io_print(io);
13437                 ctl_enqueue_incoming(io);
13438                 break;
13439         default:
13440                 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type);
13441                 return (EINVAL);
13442         }
13443
13444         return (CTL_RETVAL_COMPLETE);
13445 }
13446
13447 #ifdef CTL_IO_DELAY
13448 static void
13449 ctl_done_timer_wakeup(void *arg)
13450 {
13451         union ctl_io *io;
13452
13453         io = (union ctl_io *)arg;
13454         ctl_done(io);
13455 }
13456 #endif /* CTL_IO_DELAY */
13457
13458 void
13459 ctl_serseq_done(union ctl_io *io)
13460 {
13461         struct ctl_lun *lun;
13462
13463         lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
13464         if (lun->be_lun == NULL ||
13465             lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF)
13466                 return;
13467         mtx_lock(&lun->lun_lock);
13468         io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE;
13469         ctl_check_blocked(lun);
13470         mtx_unlock(&lun->lun_lock);
13471 }
13472
13473 void
13474 ctl_done(union ctl_io *io)
13475 {
13476
13477         /*
13478          * Enable this to catch duplicate completion issues.
13479          */
13480 #if 0
13481         if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) {
13482                 printf("%s: type %d msg %d cdb %x iptl: "
13483                        "%u:%u:%u tag 0x%04x "
13484                        "flag %#x status %x\n",
13485                         __func__,
13486                         io->io_hdr.io_type,
13487                         io->io_hdr.msg_type,
13488                         io->scsiio.cdb[0],
13489                         io->io_hdr.nexus.initid,
13490                         io->io_hdr.nexus.targ_port,
13491                         io->io_hdr.nexus.targ_lun,
13492                         (io->io_hdr.io_type ==
13493                         CTL_IO_TASK) ?
13494                         io->taskio.tag_num :
13495                         io->scsiio.tag_num,
13496                         io->io_hdr.flags,
13497                         io->io_hdr.status);
13498         } else
13499                 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE;
13500 #endif
13501
13502         /*
13503          * This is an internal copy of an I/O, and should not go through
13504          * the normal done processing logic.
13505          */
13506         if (io->io_hdr.flags & CTL_FLAG_INT_COPY)
13507                 return;
13508
13509 #ifdef CTL_IO_DELAY
13510         if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
13511                 struct ctl_lun *lun;
13512
13513                 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
13514
13515                 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
13516         } else {
13517                 struct ctl_lun *lun;
13518
13519                 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
13520
13521                 if ((lun != NULL)
13522                  && (lun->delay_info.done_delay > 0)) {
13523
13524                         callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1);
13525                         io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
13526                         callout_reset(&io->io_hdr.delay_callout,
13527                                       lun->delay_info.done_delay * hz,
13528                                       ctl_done_timer_wakeup, io);
13529                         if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT)
13530                                 lun->delay_info.done_delay = 0;
13531                         return;
13532                 }
13533         }
13534 #endif /* CTL_IO_DELAY */
13535
13536         ctl_enqueue_done(io);
13537 }
13538
13539 static void
13540 ctl_work_thread(void *arg)
13541 {
13542         struct ctl_thread *thr = (struct ctl_thread *)arg;
13543         struct ctl_softc *softc = thr->ctl_softc;
13544         union ctl_io *io;
13545         int retval;
13546
13547         CTL_DEBUG_PRINT(("ctl_work_thread starting\n"));
13548
13549         for (;;) {
13550                 /*
13551                  * We handle the queues in this order:
13552                  * - ISC
13553                  * - done queue (to free up resources, unblock other commands)
13554                  * - RtR queue
13555                  * - incoming queue
13556                  *
13557                  * If those queues are empty, we break out of the loop and
13558                  * go to sleep.
13559                  */
13560                 mtx_lock(&thr->queue_lock);
13561                 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue);
13562                 if (io != NULL) {
13563                         STAILQ_REMOVE_HEAD(&thr->isc_queue, links);
13564                         mtx_unlock(&thr->queue_lock);
13565                         ctl_handle_isc(io);
13566                         continue;
13567                 }
13568                 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue);
13569                 if (io != NULL) {
13570                         STAILQ_REMOVE_HEAD(&thr->done_queue, links);
13571                         /* clear any blocked commands, call fe_done */
13572                         mtx_unlock(&thr->queue_lock);
13573                         ctl_process_done(io);
13574                         continue;
13575                 }
13576                 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue);
13577                 if (io != NULL) {
13578                         STAILQ_REMOVE_HEAD(&thr->incoming_queue, links);
13579                         mtx_unlock(&thr->queue_lock);
13580                         if (io->io_hdr.io_type == CTL_IO_TASK)
13581                                 ctl_run_task(io);
13582                         else
13583                                 ctl_scsiio_precheck(softc, &io->scsiio);
13584                         continue;
13585                 }
13586                 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue);
13587                 if (io != NULL) {
13588                         STAILQ_REMOVE_HEAD(&thr->rtr_queue, links);
13589                         mtx_unlock(&thr->queue_lock);
13590                         retval = ctl_scsiio(&io->scsiio);
13591                         if (retval != CTL_RETVAL_COMPLETE)
13592                                 CTL_DEBUG_PRINT(("ctl_scsiio failed\n"));
13593                         continue;
13594                 }
13595
13596                 /* Sleep until we have something to do. */
13597                 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0);
13598         }
13599 }
13600
13601 static void
13602 ctl_lun_thread(void *arg)
13603 {
13604         struct ctl_softc *softc = (struct ctl_softc *)arg;
13605         struct ctl_be_lun *be_lun;
13606
13607         CTL_DEBUG_PRINT(("ctl_lun_thread starting\n"));
13608
13609         for (;;) {
13610                 mtx_lock(&softc->ctl_lock);
13611                 be_lun = STAILQ_FIRST(&softc->pending_lun_queue);
13612                 if (be_lun != NULL) {
13613                         STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links);
13614                         mtx_unlock(&softc->ctl_lock);
13615                         ctl_create_lun(be_lun);
13616                         continue;
13617                 }
13618
13619                 /* Sleep until we have something to do. */
13620                 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock,
13621                     PDROP | PRIBIO, "-", 0);
13622         }
13623 }
13624
13625 static void
13626 ctl_thresh_thread(void *arg)
13627 {
13628         struct ctl_softc *softc = (struct ctl_softc *)arg;
13629         struct ctl_lun *lun;
13630         struct scsi_da_rw_recovery_page *rwpage;
13631         struct ctl_logical_block_provisioning_page *page;
13632         const char *attr;
13633         union ctl_ha_msg msg;
13634         uint64_t thres, val;
13635         int i, e, set;
13636
13637         CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n"));
13638
13639         for (;;) {
13640                 mtx_lock(&softc->ctl_lock);
13641                 STAILQ_FOREACH(lun, &softc->lun_list, links) {
13642                         if ((lun->flags & CTL_LUN_DISABLED) ||
13643                             (lun->flags & CTL_LUN_NO_MEDIA) ||
13644                             lun->backend->lun_attr == NULL)
13645                                 continue;
13646                         if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
13647                             softc->ha_mode == CTL_HA_MODE_XFER)
13648                                 continue;
13649                         rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT];
13650                         if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0)
13651                                 continue;
13652                         e = 0;
13653                         page = &lun->mode_pages.lbp_page[CTL_PAGE_CURRENT];
13654                         for (i = 0; i < CTL_NUM_LBP_THRESH; i++) {
13655                                 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0)
13656                                         continue;
13657                                 thres = scsi_4btoul(page->descr[i].count);
13658                                 thres <<= CTL_LBP_EXPONENT;
13659                                 switch (page->descr[i].resource) {
13660                                 case 0x01:
13661                                         attr = "blocksavail";
13662                                         break;
13663                                 case 0x02:
13664                                         attr = "blocksused";
13665                                         break;
13666                                 case 0xf1:
13667                                         attr = "poolblocksavail";
13668                                         break;
13669                                 case 0xf2:
13670                                         attr = "poolblocksused";
13671                                         break;
13672                                 default:
13673                                         continue;
13674                                 }
13675                                 mtx_unlock(&softc->ctl_lock); // XXX
13676                                 val = lun->backend->lun_attr(
13677                                     lun->be_lun->be_lun, attr);
13678                                 mtx_lock(&softc->ctl_lock);
13679                                 if (val == UINT64_MAX)
13680                                         continue;
13681                                 if ((page->descr[i].flags & SLBPPD_ARMING_MASK)
13682                                     == SLBPPD_ARMING_INC)
13683                                         e = (val >= thres);
13684                                 else
13685                                         e = (val <= thres);
13686                                 if (e)
13687                                         break;
13688                         }
13689                         mtx_lock(&lun->lun_lock);
13690                         if (e) {
13691                                 scsi_u64to8b((uint8_t *)&page->descr[i] -
13692                                     (uint8_t *)page, lun->ua_tpt_info);
13693                                 if (lun->lasttpt == 0 ||
13694                                     time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) {
13695                                         lun->lasttpt = time_uptime;
13696                                         ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES);
13697                                         set = 1;
13698                                 } else
13699                                         set = 0;
13700                         } else {
13701                                 lun->lasttpt = 0;
13702                                 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES);
13703                                 set = -1;
13704                         }
13705                         mtx_unlock(&lun->lun_lock);
13706                         if (set != 0 &&
13707                             lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
13708                                 /* Send msg to other side. */
13709                                 bzero(&msg.ua, sizeof(msg.ua));
13710                                 msg.hdr.msg_type = CTL_MSG_UA;
13711                                 msg.hdr.nexus.initid = -1;
13712                                 msg.hdr.nexus.targ_port = -1;
13713                                 msg.hdr.nexus.targ_lun = lun->lun;
13714                                 msg.hdr.nexus.targ_mapped_lun = lun->lun;
13715                                 msg.ua.ua_all = 1;
13716                                 msg.ua.ua_set = (set > 0);
13717                                 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES;
13718                                 memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8);
13719                                 mtx_unlock(&softc->ctl_lock); // XXX
13720                                 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
13721                                     sizeof(msg.ua), M_WAITOK);
13722                                 mtx_lock(&softc->ctl_lock);
13723                         }
13724                 }
13725                 mtx_unlock(&softc->ctl_lock);
13726                 pause("-", CTL_LBP_PERIOD * hz);
13727         }
13728 }
13729
13730 static void
13731 ctl_enqueue_incoming(union ctl_io *io)
13732 {
13733         struct ctl_softc *softc = control_softc;
13734         struct ctl_thread *thr;
13735         u_int idx;
13736
13737         idx = (io->io_hdr.nexus.targ_port * 127 +
13738                io->io_hdr.nexus.initid) % worker_threads;
13739         thr = &softc->threads[idx];
13740         mtx_lock(&thr->queue_lock);
13741         STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links);
13742         mtx_unlock(&thr->queue_lock);
13743         wakeup(thr);
13744 }
13745
13746 static void
13747 ctl_enqueue_rtr(union ctl_io *io)
13748 {
13749         struct ctl_softc *softc = control_softc;
13750         struct ctl_thread *thr;
13751
13752         thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
13753         mtx_lock(&thr->queue_lock);
13754         STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links);
13755         mtx_unlock(&thr->queue_lock);
13756         wakeup(thr);
13757 }
13758
13759 static void
13760 ctl_enqueue_done(union ctl_io *io)
13761 {
13762         struct ctl_softc *softc = control_softc;
13763         struct ctl_thread *thr;
13764
13765         thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
13766         mtx_lock(&thr->queue_lock);
13767         STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links);
13768         mtx_unlock(&thr->queue_lock);
13769         wakeup(thr);
13770 }
13771
13772 static void
13773 ctl_enqueue_isc(union ctl_io *io)
13774 {
13775         struct ctl_softc *softc = control_softc;
13776         struct ctl_thread *thr;
13777
13778         thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
13779         mtx_lock(&thr->queue_lock);
13780         STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links);
13781         mtx_unlock(&thr->queue_lock);
13782         wakeup(thr);
13783 }
13784
13785 /*
13786  *  vim: ts=8
13787  */