]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/cam/ctl/ctl.c
MFC r271606:
[FreeBSD/stable/10.git] / sys / cam / ctl / ctl.c
1 /*-
2  * Copyright (c) 2003-2009 Silicon Graphics International Corp.
3  * Copyright (c) 2012 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Edward Tomasz Napierala
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions, and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    substantially similar to the "NO WARRANTY" disclaimer below
17  *    ("Disclaimer") and any redistribution must be conditioned upon
18  *    including a substantially similar Disclaimer requirement for further
19  *    binary redistribution.
20  *
21  * NO WARRANTY
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
25  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
30  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
31  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGES.
33  *
34  * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl.c#8 $
35  */
36 /*
37  * CAM Target Layer, a SCSI device emulation subsystem.
38  *
39  * Author: Ken Merry <ken@FreeBSD.org>
40  */
41
42 #define _CTL_C
43
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/types.h>
51 #include <sys/kthread.h>
52 #include <sys/bio.h>
53 #include <sys/fcntl.h>
54 #include <sys/lock.h>
55 #include <sys/module.h>
56 #include <sys/mutex.h>
57 #include <sys/condvar.h>
58 #include <sys/malloc.h>
59 #include <sys/conf.h>
60 #include <sys/ioccom.h>
61 #include <sys/queue.h>
62 #include <sys/sbuf.h>
63 #include <sys/smp.h>
64 #include <sys/endian.h>
65 #include <sys/sysctl.h>
66
67 #include <cam/cam.h>
68 #include <cam/scsi/scsi_all.h>
69 #include <cam/scsi/scsi_da.h>
70 #include <cam/ctl/ctl_io.h>
71 #include <cam/ctl/ctl.h>
72 #include <cam/ctl/ctl_frontend.h>
73 #include <cam/ctl/ctl_frontend_internal.h>
74 #include <cam/ctl/ctl_util.h>
75 #include <cam/ctl/ctl_backend.h>
76 #include <cam/ctl/ctl_ioctl.h>
77 #include <cam/ctl/ctl_ha.h>
78 #include <cam/ctl/ctl_private.h>
79 #include <cam/ctl/ctl_debug.h>
80 #include <cam/ctl/ctl_scsi_all.h>
81 #include <cam/ctl/ctl_error.h>
82
83 struct ctl_softc *control_softc = NULL;
84
85 /*
86  * Size and alignment macros needed for Copan-specific HA hardware.  These
87  * can go away when the HA code is re-written, and uses busdma for any
88  * hardware.
89  */
90 #define CTL_ALIGN_8B(target, source, type)                              \
91         if (((uint32_t)source & 0x7) != 0)                              \
92                 target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\
93         else                                                            \
94                 target = (type)source;
95
96 #define CTL_SIZE_8B(target, size)                                       \
97         if ((size & 0x7) != 0)                                          \
98                 target = size + (0x8 - (size & 0x7));                   \
99         else                                                            \
100                 target = size;
101
102 #define CTL_ALIGN_8B_MARGIN     16
103
104 /*
105  * Template mode pages.
106  */
107
108 /*
109  * Note that these are default values only.  The actual values will be
110  * filled in when the user does a mode sense.
111  */
112 static struct copan_power_subpage power_page_default = {
113         /*page_code*/ PWR_PAGE_CODE | SMPH_SPF,
114         /*subpage*/ PWR_SUBPAGE_CODE,
115         /*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00,
116                          (sizeof(struct copan_power_subpage) - 4) & 0x00ff},
117         /*page_version*/ PWR_VERSION,
118         /* total_luns */ 26,
119         /* max_active_luns*/ PWR_DFLT_MAX_LUNS,
120         /*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0,
121                       0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
122                       0, 0, 0, 0, 0, 0}
123 };
124
125 static struct copan_power_subpage power_page_changeable = {
126         /*page_code*/ PWR_PAGE_CODE | SMPH_SPF,
127         /*subpage*/ PWR_SUBPAGE_CODE,
128         /*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00,
129                          (sizeof(struct copan_power_subpage) - 4) & 0x00ff},
130         /*page_version*/ 0,
131         /* total_luns */ 0,
132         /* max_active_luns*/ 0,
133         /*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0,
134                       0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
135                       0, 0, 0, 0, 0, 0}
136 };
137
138 static struct copan_aps_subpage aps_page_default = {
139         APS_PAGE_CODE | SMPH_SPF, //page_code
140         APS_SUBPAGE_CODE, //subpage
141         {(sizeof(struct copan_aps_subpage) - 4) & 0xff00,
142          (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length
143         APS_VERSION, //page_version
144         0, //lock_active
145         {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
146         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
147         0, 0, 0, 0, 0} //reserved
148 };
149
150 static struct copan_aps_subpage aps_page_changeable = {
151         APS_PAGE_CODE | SMPH_SPF, //page_code
152         APS_SUBPAGE_CODE, //subpage
153         {(sizeof(struct copan_aps_subpage) - 4) & 0xff00,
154          (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length
155         0, //page_version
156         0, //lock_active
157         {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
158         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
159         0, 0, 0, 0, 0} //reserved
160 };
161
162 static struct copan_debugconf_subpage debugconf_page_default = {
163         DBGCNF_PAGE_CODE | SMPH_SPF,    /* page_code */
164         DBGCNF_SUBPAGE_CODE,            /* subpage */
165         {(sizeof(struct copan_debugconf_subpage) - 4) >> 8,
166          (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */
167         DBGCNF_VERSION,                 /* page_version */
168         {CTL_TIME_IO_DEFAULT_SECS>>8,
169          CTL_TIME_IO_DEFAULT_SECS>>0},  /* ctl_time_io_secs */
170 };
171
172 static struct copan_debugconf_subpage debugconf_page_changeable = {
173         DBGCNF_PAGE_CODE | SMPH_SPF,    /* page_code */
174         DBGCNF_SUBPAGE_CODE,            /* subpage */
175         {(sizeof(struct copan_debugconf_subpage) - 4) >> 8,
176          (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */
177         0,                              /* page_version */
178         {0xff,0xff},                    /* ctl_time_io_secs */
179 };
180
181 static struct scsi_format_page format_page_default = {
182         /*page_code*/SMS_FORMAT_DEVICE_PAGE,
183         /*page_length*/sizeof(struct scsi_format_page) - 2,
184         /*tracks_per_zone*/ {0, 0},
185         /*alt_sectors_per_zone*/ {0, 0},
186         /*alt_tracks_per_zone*/ {0, 0},
187         /*alt_tracks_per_lun*/ {0, 0},
188         /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff,
189                                 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff},
190         /*bytes_per_sector*/ {0, 0},
191         /*interleave*/ {0, 0},
192         /*track_skew*/ {0, 0},
193         /*cylinder_skew*/ {0, 0},
194         /*flags*/ SFP_HSEC,
195         /*reserved*/ {0, 0, 0}
196 };
197
198 static struct scsi_format_page format_page_changeable = {
199         /*page_code*/SMS_FORMAT_DEVICE_PAGE,
200         /*page_length*/sizeof(struct scsi_format_page) - 2,
201         /*tracks_per_zone*/ {0, 0},
202         /*alt_sectors_per_zone*/ {0, 0},
203         /*alt_tracks_per_zone*/ {0, 0},
204         /*alt_tracks_per_lun*/ {0, 0},
205         /*sectors_per_track*/ {0, 0},
206         /*bytes_per_sector*/ {0, 0},
207         /*interleave*/ {0, 0},
208         /*track_skew*/ {0, 0},
209         /*cylinder_skew*/ {0, 0},
210         /*flags*/ 0,
211         /*reserved*/ {0, 0, 0}
212 };
213
214 static struct scsi_rigid_disk_page rigid_disk_page_default = {
215         /*page_code*/SMS_RIGID_DISK_PAGE,
216         /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2,
217         /*cylinders*/ {0, 0, 0},
218         /*heads*/ CTL_DEFAULT_HEADS,
219         /*start_write_precomp*/ {0, 0, 0},
220         /*start_reduced_current*/ {0, 0, 0},
221         /*step_rate*/ {0, 0},
222         /*landing_zone_cylinder*/ {0, 0, 0},
223         /*rpl*/ SRDP_RPL_DISABLED,
224         /*rotational_offset*/ 0,
225         /*reserved1*/ 0,
226         /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff,
227                            CTL_DEFAULT_ROTATION_RATE & 0xff},
228         /*reserved2*/ {0, 0}
229 };
230
231 static struct scsi_rigid_disk_page rigid_disk_page_changeable = {
232         /*page_code*/SMS_RIGID_DISK_PAGE,
233         /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2,
234         /*cylinders*/ {0, 0, 0},
235         /*heads*/ 0,
236         /*start_write_precomp*/ {0, 0, 0},
237         /*start_reduced_current*/ {0, 0, 0},
238         /*step_rate*/ {0, 0},
239         /*landing_zone_cylinder*/ {0, 0, 0},
240         /*rpl*/ 0,
241         /*rotational_offset*/ 0,
242         /*reserved1*/ 0,
243         /*rotation_rate*/ {0, 0},
244         /*reserved2*/ {0, 0}
245 };
246
247 static struct scsi_caching_page caching_page_default = {
248         /*page_code*/SMS_CACHING_PAGE,
249         /*page_length*/sizeof(struct scsi_caching_page) - 2,
250         /*flags1*/ SCP_DISC | SCP_WCE,
251         /*ret_priority*/ 0,
252         /*disable_pf_transfer_len*/ {0xff, 0xff},
253         /*min_prefetch*/ {0, 0},
254         /*max_prefetch*/ {0xff, 0xff},
255         /*max_pf_ceiling*/ {0xff, 0xff},
256         /*flags2*/ 0,
257         /*cache_segments*/ 0,
258         /*cache_seg_size*/ {0, 0},
259         /*reserved*/ 0,
260         /*non_cache_seg_size*/ {0, 0, 0}
261 };
262
263 static struct scsi_caching_page caching_page_changeable = {
264         /*page_code*/SMS_CACHING_PAGE,
265         /*page_length*/sizeof(struct scsi_caching_page) - 2,
266         /*flags1*/ SCP_WCE | SCP_RCD,
267         /*ret_priority*/ 0,
268         /*disable_pf_transfer_len*/ {0, 0},
269         /*min_prefetch*/ {0, 0},
270         /*max_prefetch*/ {0, 0},
271         /*max_pf_ceiling*/ {0, 0},
272         /*flags2*/ 0,
273         /*cache_segments*/ 0,
274         /*cache_seg_size*/ {0, 0},
275         /*reserved*/ 0,
276         /*non_cache_seg_size*/ {0, 0, 0}
277 };
278
279 static struct scsi_control_page control_page_default = {
280         /*page_code*/SMS_CONTROL_MODE_PAGE,
281         /*page_length*/sizeof(struct scsi_control_page) - 2,
282         /*rlec*/0,
283         /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED,
284         /*eca_and_aen*/0,
285         /*flags4*/SCP_TAS,
286         /*aen_holdoff_period*/{0, 0},
287         /*busy_timeout_period*/{0, 0},
288         /*extended_selftest_completion_time*/{0, 0}
289 };
290
291 static struct scsi_control_page control_page_changeable = {
292         /*page_code*/SMS_CONTROL_MODE_PAGE,
293         /*page_length*/sizeof(struct scsi_control_page) - 2,
294         /*rlec*/SCP_DSENSE,
295         /*queue_flags*/SCP_QUEUE_ALG_MASK,
296         /*eca_and_aen*/0,
297         /*flags4*/0,
298         /*aen_holdoff_period*/{0, 0},
299         /*busy_timeout_period*/{0, 0},
300         /*extended_selftest_completion_time*/{0, 0}
301 };
302
303
304 /*
305  * XXX KDM move these into the softc.
306  */
307 static int rcv_sync_msg;
308 static int persis_offset;
309 static uint8_t ctl_pause_rtr;
310 static int     ctl_is_single = 1;
311 static int     index_to_aps_page;
312
313 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer");
314 static int worker_threads = -1;
315 TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads);
316 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN,
317     &worker_threads, 1, "Number of worker threads");
318 static int verbose = 0;
319 TUNABLE_INT("kern.cam.ctl.verbose", &verbose);
320 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, verbose, CTLFLAG_RWTUN,
321     &verbose, 0, "Show SCSI errors returned to initiator");
322
323 /*
324  * Supported pages (0x00), Serial number (0x80), Device ID (0x83),
325  * Extended INQUIRY Data (0x86), Mode Page Policy (0x87),
326  * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0),
327  * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2)
328  */
329 #define SCSI_EVPD_NUM_SUPPORTED_PAGES   10
330
331 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event,
332                                   int param);
333 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest);
334 static int ctl_init(void);
335 void ctl_shutdown(void);
336 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td);
337 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td);
338 static void ctl_ioctl_online(void *arg);
339 static void ctl_ioctl_offline(void *arg);
340 static int ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id);
341 static int ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id);
342 static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio);
343 static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio);
344 static int ctl_ioctl_submit_wait(union ctl_io *io);
345 static void ctl_ioctl_datamove(union ctl_io *io);
346 static void ctl_ioctl_done(union ctl_io *io);
347 static void ctl_ioctl_hard_startstop_callback(void *arg,
348                                               struct cfi_metatask *metatask);
349 static void ctl_ioctl_bbrread_callback(void *arg,struct cfi_metatask *metatask);
350 static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
351                               struct ctl_ooa *ooa_hdr,
352                               struct ctl_ooa_entry *kern_entries);
353 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
354                      struct thread *td);
355 static uint32_t ctl_map_lun(int port_num, uint32_t lun);
356 static uint32_t ctl_map_lun_back(int port_num, uint32_t lun);
357 #ifdef unused
358 static union ctl_io *ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port,
359                                    uint32_t targ_target, uint32_t targ_lun,
360                                    int can_wait);
361 static void ctl_kfree_io(union ctl_io *io);
362 #endif /* unused */
363 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
364                          struct ctl_be_lun *be_lun, struct ctl_id target_id);
365 static int ctl_free_lun(struct ctl_lun *lun);
366 static void ctl_create_lun(struct ctl_be_lun *be_lun);
367 /**
368 static void ctl_failover_change_pages(struct ctl_softc *softc,
369                                       struct ctl_scsiio *ctsio, int master);
370 **/
371
372 static int ctl_do_mode_select(union ctl_io *io);
373 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun,
374                            uint64_t res_key, uint64_t sa_res_key,
375                            uint8_t type, uint32_t residx,
376                            struct ctl_scsiio *ctsio,
377                            struct scsi_per_res_out *cdb,
378                            struct scsi_per_res_out_parms* param);
379 static void ctl_pro_preempt_other(struct ctl_lun *lun,
380                                   union ctl_ha_msg *msg);
381 static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg);
382 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len);
383 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len);
384 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len);
385 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len);
386 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len);
387 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio,
388                                          int alloc_len);
389 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio,
390                                          int alloc_len);
391 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len);
392 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len);
393 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio);
394 static int ctl_inquiry_std(struct ctl_scsiio *ctsio);
395 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len);
396 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2);
397 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun,
398     union ctl_io *pending_io, union ctl_io *ooa_io);
399 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
400                                 union ctl_io *starting_io);
401 static int ctl_check_blocked(struct ctl_lun *lun);
402 static int ctl_scsiio_lun_check(struct ctl_softc *ctl_softc,
403                                 struct ctl_lun *lun,
404                                 const struct ctl_cmd_entry *entry,
405                                 struct ctl_scsiio *ctsio);
406 //static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc);
407 static void ctl_failover(void);
408 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc,
409                                struct ctl_scsiio *ctsio);
410 static int ctl_scsiio(struct ctl_scsiio *ctsio);
411
412 static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io);
413 static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
414                             ctl_ua_type ua_type);
415 static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io,
416                          ctl_ua_type ua_type);
417 static int ctl_abort_task(union ctl_io *io);
418 static int ctl_abort_task_set(union ctl_io *io);
419 static int ctl_i_t_nexus_reset(union ctl_io *io);
420 static void ctl_run_task(union ctl_io *io);
421 #ifdef CTL_IO_DELAY
422 static void ctl_datamove_timer_wakeup(void *arg);
423 static void ctl_done_timer_wakeup(void *arg);
424 #endif /* CTL_IO_DELAY */
425
426 static void ctl_send_datamove_done(union ctl_io *io, int have_lock);
427 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq);
428 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io);
429 static void ctl_datamove_remote_write(union ctl_io *io);
430 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io);
431 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq);
432 static int ctl_datamove_remote_sgl_setup(union ctl_io *io);
433 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
434                                     ctl_ha_dt_cb callback);
435 static void ctl_datamove_remote_read(union ctl_io *io);
436 static void ctl_datamove_remote(union ctl_io *io);
437 static int ctl_process_done(union ctl_io *io);
438 static void ctl_lun_thread(void *arg);
439 static void ctl_work_thread(void *arg);
440 static void ctl_enqueue_incoming(union ctl_io *io);
441 static void ctl_enqueue_rtr(union ctl_io *io);
442 static void ctl_enqueue_done(union ctl_io *io);
443 static void ctl_enqueue_isc(union ctl_io *io);
444 static const struct ctl_cmd_entry *
445     ctl_get_cmd_entry(struct ctl_scsiio *ctsio);
446 static const struct ctl_cmd_entry *
447     ctl_validate_command(struct ctl_scsiio *ctsio);
448 static int ctl_cmd_applicable(uint8_t lun_type,
449     const struct ctl_cmd_entry *entry);
450
451 /*
452  * Load the serialization table.  This isn't very pretty, but is probably
453  * the easiest way to do it.
454  */
455 #include "ctl_ser_table.c"
456
457 /*
458  * We only need to define open, close and ioctl routines for this driver.
459  */
460 static struct cdevsw ctl_cdevsw = {
461         .d_version =    D_VERSION,
462         .d_flags =      0,
463         .d_open =       ctl_open,
464         .d_close =      ctl_close,
465         .d_ioctl =      ctl_ioctl,
466         .d_name =       "ctl",
467 };
468
469
470 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL");
471 MALLOC_DEFINE(M_CTLIO, "ctlio", "Memory used for CTL requests");
472
473 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *);
474
475 static moduledata_t ctl_moduledata = {
476         "ctl",
477         ctl_module_event_handler,
478         NULL
479 };
480
481 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD);
482 MODULE_VERSION(ctl, 1);
483
484 static struct ctl_frontend ioctl_frontend =
485 {
486         .name = "ioctl",
487 };
488
489 static void
490 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc,
491                             union ctl_ha_msg *msg_info)
492 {
493         struct ctl_scsiio *ctsio;
494
495         if (msg_info->hdr.original_sc == NULL) {
496                 printf("%s: original_sc == NULL!\n", __func__);
497                 /* XXX KDM now what? */
498                 return;
499         }
500
501         ctsio = &msg_info->hdr.original_sc->scsiio;
502         ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
503         ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
504         ctsio->io_hdr.status = msg_info->hdr.status;
505         ctsio->scsi_status = msg_info->scsi.scsi_status;
506         ctsio->sense_len = msg_info->scsi.sense_len;
507         ctsio->sense_residual = msg_info->scsi.sense_residual;
508         ctsio->residual = msg_info->scsi.residual;
509         memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data,
510                sizeof(ctsio->sense_data));
511         memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
512                &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen));
513         ctl_enqueue_isc((union ctl_io *)ctsio);
514 }
515
516 static void
517 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc,
518                                 union ctl_ha_msg *msg_info)
519 {
520         struct ctl_scsiio *ctsio;
521
522         if (msg_info->hdr.serializing_sc == NULL) {
523                 printf("%s: serializing_sc == NULL!\n", __func__);
524                 /* XXX KDM now what? */
525                 return;
526         }
527
528         ctsio = &msg_info->hdr.serializing_sc->scsiio;
529 #if 0
530         /*
531          * Attempt to catch the situation where an I/O has
532          * been freed, and we're using it again.
533          */
534         if (ctsio->io_hdr.io_type == 0xff) {
535                 union ctl_io *tmp_io;
536                 tmp_io = (union ctl_io *)ctsio;
537                 printf("%s: %p use after free!\n", __func__,
538                        ctsio);
539                 printf("%s: type %d msg %d cdb %x iptl: "
540                        "%d:%d:%d:%d tag 0x%04x "
541                        "flag %#x status %x\n",
542                         __func__,
543                         tmp_io->io_hdr.io_type,
544                         tmp_io->io_hdr.msg_type,
545                         tmp_io->scsiio.cdb[0],
546                         tmp_io->io_hdr.nexus.initid.id,
547                         tmp_io->io_hdr.nexus.targ_port,
548                         tmp_io->io_hdr.nexus.targ_target.id,
549                         tmp_io->io_hdr.nexus.targ_lun,
550                         (tmp_io->io_hdr.io_type ==
551                         CTL_IO_TASK) ?
552                         tmp_io->taskio.tag_num :
553                         tmp_io->scsiio.tag_num,
554                         tmp_io->io_hdr.flags,
555                         tmp_io->io_hdr.status);
556         }
557 #endif
558         ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
559         ctl_enqueue_isc((union ctl_io *)ctsio);
560 }
561
562 /*
563  * ISC (Inter Shelf Communication) event handler.  Events from the HA
564  * subsystem come in here.
565  */
566 static void
567 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
568 {
569         struct ctl_softc *ctl_softc;
570         union ctl_io *io;
571         struct ctl_prio *presio;
572         ctl_ha_status isc_status;
573
574         ctl_softc = control_softc;
575         io = NULL;
576
577
578 #if 0
579         printf("CTL: Isc Msg event %d\n", event);
580 #endif
581         if (event == CTL_HA_EVT_MSG_RECV) {
582                 union ctl_ha_msg msg_info;
583
584                 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info,
585                                              sizeof(msg_info), /*wait*/ 0);
586 #if 0
587                 printf("CTL: msg_type %d\n", msg_info.msg_type);
588 #endif
589                 if (isc_status != 0) {
590                         printf("Error receiving message, status = %d\n",
591                                isc_status);
592                         return;
593                 }
594
595                 switch (msg_info.hdr.msg_type) {
596                 case CTL_MSG_SERIALIZE:
597 #if 0
598                         printf("Serialize\n");
599 #endif
600                         io = ctl_alloc_io((void *)ctl_softc->othersc_pool);
601                         if (io == NULL) {
602                                 printf("ctl_isc_event_handler: can't allocate "
603                                        "ctl_io!\n");
604                                 /* Bad Juju */
605                                 /* Need to set busy and send msg back */
606                                 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
607                                 msg_info.hdr.status = CTL_SCSI_ERROR;
608                                 msg_info.scsi.scsi_status = SCSI_STATUS_BUSY;
609                                 msg_info.scsi.sense_len = 0;
610                                 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
611                                     sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){
612                                 }
613                                 goto bailout;
614                         }
615                         ctl_zero_io(io);
616                         // populate ctsio from msg_info
617                         io->io_hdr.io_type = CTL_IO_SCSI;
618                         io->io_hdr.msg_type = CTL_MSG_SERIALIZE;
619                         io->io_hdr.original_sc = msg_info.hdr.original_sc;
620 #if 0
621                         printf("pOrig %x\n", (int)msg_info.original_sc);
622 #endif
623                         io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC |
624                                             CTL_FLAG_IO_ACTIVE;
625                         /*
626                          * If we're in serialization-only mode, we don't
627                          * want to go through full done processing.  Thus
628                          * the COPY flag.
629                          *
630                          * XXX KDM add another flag that is more specific.
631                          */
632                         if (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)
633                                 io->io_hdr.flags |= CTL_FLAG_INT_COPY;
634                         io->io_hdr.nexus = msg_info.hdr.nexus;
635 #if 0
636                         printf("targ %d, port %d, iid %d, lun %d\n",
637                                io->io_hdr.nexus.targ_target.id,
638                                io->io_hdr.nexus.targ_port,
639                                io->io_hdr.nexus.initid.id,
640                                io->io_hdr.nexus.targ_lun);
641 #endif
642                         io->scsiio.tag_num = msg_info.scsi.tag_num;
643                         io->scsiio.tag_type = msg_info.scsi.tag_type;
644                         memcpy(io->scsiio.cdb, msg_info.scsi.cdb,
645                                CTL_MAX_CDBLEN);
646                         if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
647                                 const struct ctl_cmd_entry *entry;
648
649                                 entry = ctl_get_cmd_entry(&io->scsiio);
650                                 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
651                                 io->io_hdr.flags |=
652                                         entry->flags & CTL_FLAG_DATA_MASK;
653                         }
654                         ctl_enqueue_isc(io);
655                         break;
656
657                 /* Performed on the Originating SC, XFER mode only */
658                 case CTL_MSG_DATAMOVE: {
659                         struct ctl_sg_entry *sgl;
660                         int i, j;
661
662                         io = msg_info.hdr.original_sc;
663                         if (io == NULL) {
664                                 printf("%s: original_sc == NULL!\n", __func__);
665                                 /* XXX KDM do something here */
666                                 break;
667                         }
668                         io->io_hdr.msg_type = CTL_MSG_DATAMOVE;
669                         io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
670                         /*
671                          * Keep track of this, we need to send it back over
672                          * when the datamove is complete.
673                          */
674                         io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
675
676                         if (msg_info.dt.sg_sequence == 0) {
677                                 /*
678                                  * XXX KDM we use the preallocated S/G list
679                                  * here, but we'll need to change this to
680                                  * dynamic allocation if we need larger S/G
681                                  * lists.
682                                  */
683                                 if (msg_info.dt.kern_sg_entries >
684                                     sizeof(io->io_hdr.remote_sglist) /
685                                     sizeof(io->io_hdr.remote_sglist[0])) {
686                                         printf("%s: number of S/G entries "
687                                             "needed %u > allocated num %zd\n",
688                                             __func__,
689                                             msg_info.dt.kern_sg_entries,
690                                             sizeof(io->io_hdr.remote_sglist)/
691                                             sizeof(io->io_hdr.remote_sglist[0]));
692                                 
693                                         /*
694                                          * XXX KDM send a message back to
695                                          * the other side to shut down the
696                                          * DMA.  The error will come back
697                                          * through via the normal channel.
698                                          */
699                                         break;
700                                 }
701                                 sgl = io->io_hdr.remote_sglist;
702                                 memset(sgl, 0,
703                                        sizeof(io->io_hdr.remote_sglist));
704
705                                 io->scsiio.kern_data_ptr = (uint8_t *)sgl;
706
707                                 io->scsiio.kern_sg_entries =
708                                         msg_info.dt.kern_sg_entries;
709                                 io->scsiio.rem_sg_entries =
710                                         msg_info.dt.kern_sg_entries;
711                                 io->scsiio.kern_data_len =
712                                         msg_info.dt.kern_data_len;
713                                 io->scsiio.kern_total_len =
714                                         msg_info.dt.kern_total_len;
715                                 io->scsiio.kern_data_resid =
716                                         msg_info.dt.kern_data_resid;
717                                 io->scsiio.kern_rel_offset =
718                                         msg_info.dt.kern_rel_offset;
719                                 /*
720                                  * Clear out per-DMA flags.
721                                  */
722                                 io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK;
723                                 /*
724                                  * Add per-DMA flags that are set for this
725                                  * particular DMA request.
726                                  */
727                                 io->io_hdr.flags |= msg_info.dt.flags &
728                                                     CTL_FLAG_RDMA_MASK;
729                         } else
730                                 sgl = (struct ctl_sg_entry *)
731                                         io->scsiio.kern_data_ptr;
732
733                         for (i = msg_info.dt.sent_sg_entries, j = 0;
734                              i < (msg_info.dt.sent_sg_entries +
735                              msg_info.dt.cur_sg_entries); i++, j++) {
736                                 sgl[i].addr = msg_info.dt.sg_list[j].addr;
737                                 sgl[i].len = msg_info.dt.sg_list[j].len;
738
739 #if 0
740                                 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n",
741                                        __func__,
742                                        msg_info.dt.sg_list[j].addr,
743                                        msg_info.dt.sg_list[j].len,
744                                        sgl[i].addr, sgl[i].len, j, i);
745 #endif
746                         }
747 #if 0
748                         memcpy(&sgl[msg_info.dt.sent_sg_entries],
749                                msg_info.dt.sg_list,
750                                sizeof(*sgl) * msg_info.dt.cur_sg_entries);
751 #endif
752
753                         /*
754                          * If this is the last piece of the I/O, we've got
755                          * the full S/G list.  Queue processing in the thread.
756                          * Otherwise wait for the next piece.
757                          */
758                         if (msg_info.dt.sg_last != 0)
759                                 ctl_enqueue_isc(io);
760                         break;
761                 }
762                 /* Performed on the Serializing (primary) SC, XFER mode only */
763                 case CTL_MSG_DATAMOVE_DONE: {
764                         if (msg_info.hdr.serializing_sc == NULL) {
765                                 printf("%s: serializing_sc == NULL!\n",
766                                        __func__);
767                                 /* XXX KDM now what? */
768                                 break;
769                         }
770                         /*
771                          * We grab the sense information here in case
772                          * there was a failure, so we can return status
773                          * back to the initiator.
774                          */
775                         io = msg_info.hdr.serializing_sc;
776                         io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
777                         io->io_hdr.status = msg_info.hdr.status;
778                         io->scsiio.scsi_status = msg_info.scsi.scsi_status;
779                         io->scsiio.sense_len = msg_info.scsi.sense_len;
780                         io->scsiio.sense_residual =msg_info.scsi.sense_residual;
781                         io->io_hdr.port_status = msg_info.scsi.fetd_status;
782                         io->scsiio.residual = msg_info.scsi.residual;
783                         memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data,
784                                sizeof(io->scsiio.sense_data));
785                         ctl_enqueue_isc(io);
786                         break;
787                 }
788
789                 /* Preformed on Originating SC, SER_ONLY mode */
790                 case CTL_MSG_R2R:
791                         io = msg_info.hdr.original_sc;
792                         if (io == NULL) {
793                                 printf("%s: Major Bummer\n", __func__);
794                                 return;
795                         } else {
796 #if 0
797                                 printf("pOrig %x\n",(int) ctsio);
798 #endif
799                         }
800                         io->io_hdr.msg_type = CTL_MSG_R2R;
801                         io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
802                         ctl_enqueue_isc(io);
803                         break;
804
805                 /*
806                  * Performed on Serializing(i.e. primary SC) SC in SER_ONLY
807                  * mode.
808                  * Performed on the Originating (i.e. secondary) SC in XFER
809                  * mode
810                  */
811                 case CTL_MSG_FINISH_IO:
812                         if (ctl_softc->ha_mode == CTL_HA_MODE_XFER)
813                                 ctl_isc_handler_finish_xfer(ctl_softc,
814                                                             &msg_info);
815                         else
816                                 ctl_isc_handler_finish_ser_only(ctl_softc,
817                                                                 &msg_info);
818                         break;
819
820                 /* Preformed on Originating SC */
821                 case CTL_MSG_BAD_JUJU:
822                         io = msg_info.hdr.original_sc;
823                         if (io == NULL) {
824                                 printf("%s: Bad JUJU!, original_sc is NULL!\n",
825                                        __func__);
826                                 break;
827                         }
828                         ctl_copy_sense_data(&msg_info, io);
829                         /*
830                          * IO should have already been cleaned up on other
831                          * SC so clear this flag so we won't send a message
832                          * back to finish the IO there.
833                          */
834                         io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
835                         io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
836
837                         /* io = msg_info.hdr.serializing_sc; */
838                         io->io_hdr.msg_type = CTL_MSG_BAD_JUJU;
839                         ctl_enqueue_isc(io);
840                         break;
841
842                 /* Handle resets sent from the other side */
843                 case CTL_MSG_MANAGE_TASKS: {
844                         struct ctl_taskio *taskio;
845                         taskio = (struct ctl_taskio *)ctl_alloc_io(
846                                 (void *)ctl_softc->othersc_pool);
847                         if (taskio == NULL) {
848                                 printf("ctl_isc_event_handler: can't allocate "
849                                        "ctl_io!\n");
850                                 /* Bad Juju */
851                                 /* should I just call the proper reset func
852                                    here??? */
853                                 goto bailout;
854                         }
855                         ctl_zero_io((union ctl_io *)taskio);
856                         taskio->io_hdr.io_type = CTL_IO_TASK;
857                         taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC;
858                         taskio->io_hdr.nexus = msg_info.hdr.nexus;
859                         taskio->task_action = msg_info.task.task_action;
860                         taskio->tag_num = msg_info.task.tag_num;
861                         taskio->tag_type = msg_info.task.tag_type;
862 #ifdef CTL_TIME_IO
863                         taskio->io_hdr.start_time = time_uptime;
864                         getbintime(&taskio->io_hdr.start_bt);
865 #if 0
866                         cs_prof_gettime(&taskio->io_hdr.start_ticks);
867 #endif
868 #endif /* CTL_TIME_IO */
869                         ctl_run_task((union ctl_io *)taskio);
870                         break;
871                 }
872                 /* Persistent Reserve action which needs attention */
873                 case CTL_MSG_PERS_ACTION:
874                         presio = (struct ctl_prio *)ctl_alloc_io(
875                                 (void *)ctl_softc->othersc_pool);
876                         if (presio == NULL) {
877                                 printf("ctl_isc_event_handler: can't allocate "
878                                        "ctl_io!\n");
879                                 /* Bad Juju */
880                                 /* Need to set busy and send msg back */
881                                 goto bailout;
882                         }
883                         ctl_zero_io((union ctl_io *)presio);
884                         presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION;
885                         presio->pr_msg = msg_info.pr;
886                         ctl_enqueue_isc((union ctl_io *)presio);
887                         break;
888                 case CTL_MSG_SYNC_FE:
889                         rcv_sync_msg = 1;
890                         break;
891                 case CTL_MSG_APS_LOCK: {
892                         // It's quicker to execute this then to
893                         // queue it.
894                         struct ctl_lun *lun;
895                         struct ctl_page_index *page_index;
896                         struct copan_aps_subpage *current_sp;
897                         uint32_t targ_lun;
898
899                         targ_lun = msg_info.hdr.nexus.targ_mapped_lun;
900                         lun = ctl_softc->ctl_luns[targ_lun];
901                         mtx_lock(&lun->lun_lock);
902                         page_index = &lun->mode_pages.index[index_to_aps_page];
903                         current_sp = (struct copan_aps_subpage *)
904                                      (page_index->page_data +
905                                      (page_index->page_len * CTL_PAGE_CURRENT));
906
907                         current_sp->lock_active = msg_info.aps.lock_flag;
908                         mtx_unlock(&lun->lun_lock);
909                         break;
910                 }
911                 default:
912                         printf("How did I get here?\n");
913                 }
914         } else if (event == CTL_HA_EVT_MSG_SENT) {
915                 if (param != CTL_HA_STATUS_SUCCESS) {
916                         printf("Bad status from ctl_ha_msg_send status %d\n",
917                                param);
918                 }
919                 return;
920         } else if (event == CTL_HA_EVT_DISCONNECT) {
921                 printf("CTL: Got a disconnect from Isc\n");
922                 return;
923         } else {
924                 printf("ctl_isc_event_handler: Unknown event %d\n", event);
925                 return;
926         }
927
928 bailout:
929         return;
930 }
931
932 static void
933 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest)
934 {
935         struct scsi_sense_data *sense;
936
937         sense = &dest->scsiio.sense_data;
938         bcopy(&src->scsi.sense_data, sense, sizeof(*sense));
939         dest->scsiio.scsi_status = src->scsi.scsi_status;
940         dest->scsiio.sense_len = src->scsi.sense_len;
941         dest->io_hdr.status = src->hdr.status;
942 }
943
944 static int
945 ctl_init(void)
946 {
947         struct ctl_softc *softc;
948         struct ctl_io_pool *internal_pool, *emergency_pool, *other_pool;
949         struct ctl_port *port;
950         uint8_t sc_id =0;
951         int i, error, retval;
952         //int isc_retval;
953
954         retval = 0;
955         ctl_pause_rtr = 0;
956         rcv_sync_msg = 0;
957
958         control_softc = malloc(sizeof(*control_softc), M_DEVBUF,
959                                M_WAITOK | M_ZERO);
960         softc = control_softc;
961
962         softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600,
963                               "cam/ctl");
964
965         softc->dev->si_drv1 = softc;
966
967         /*
968          * By default, return a "bad LUN" peripheral qualifier for unknown
969          * LUNs.  The user can override this default using the tunable or
970          * sysctl.  See the comment in ctl_inquiry_std() for more details.
971          */
972         softc->inquiry_pq_no_lun = 1;
973         TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun",
974                           &softc->inquiry_pq_no_lun);
975         sysctl_ctx_init(&softc->sysctl_ctx);
976         softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
977                 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl",
978                 CTLFLAG_RD, 0, "CAM Target Layer");
979
980         if (softc->sysctl_tree == NULL) {
981                 printf("%s: unable to allocate sysctl tree\n", __func__);
982                 destroy_dev(softc->dev);
983                 free(control_softc, M_DEVBUF);
984                 control_softc = NULL;
985                 return (ENOMEM);
986         }
987
988         SYSCTL_ADD_INT(&softc->sysctl_ctx,
989                        SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
990                        "inquiry_pq_no_lun", CTLFLAG_RW,
991                        &softc->inquiry_pq_no_lun, 0,
992                        "Report no lun possible for invalid LUNs");
993
994         mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF);
995         mtx_init(&softc->pool_lock, "CTL pool mutex", NULL, MTX_DEF);
996         softc->open_count = 0;
997
998         /*
999          * Default to actually sending a SYNCHRONIZE CACHE command down to
1000          * the drive.
1001          */
1002         softc->flags = CTL_FLAG_REAL_SYNC;
1003
1004         /*
1005          * In Copan's HA scheme, the "master" and "slave" roles are
1006          * figured out through the slot the controller is in.  Although it
1007          * is an active/active system, someone has to be in charge.
1008          */
1009 #ifdef NEEDTOPORT
1010         scmicro_rw(SCMICRO_GET_SHELF_ID, &sc_id);
1011 #endif
1012
1013         if (sc_id == 0) {
1014                 softc->flags |= CTL_FLAG_MASTER_SHELF;
1015                 persis_offset = 0;
1016         } else
1017                 persis_offset = CTL_MAX_INITIATORS;
1018
1019         /*
1020          * XXX KDM need to figure out where we want to get our target ID
1021          * and WWID.  Is it different on each port?
1022          */
1023         softc->target.id = 0;
1024         softc->target.wwid[0] = 0x12345678;
1025         softc->target.wwid[1] = 0x87654321;
1026         STAILQ_INIT(&softc->lun_list);
1027         STAILQ_INIT(&softc->pending_lun_queue);
1028         STAILQ_INIT(&softc->fe_list);
1029         STAILQ_INIT(&softc->port_list);
1030         STAILQ_INIT(&softc->be_list);
1031         STAILQ_INIT(&softc->io_pools);
1032         ctl_tpc_init(softc);
1033
1034         if (ctl_pool_create(softc, CTL_POOL_INTERNAL, CTL_POOL_ENTRIES_INTERNAL,
1035                             &internal_pool)!= 0){
1036                 printf("ctl: can't allocate %d entry internal pool, "
1037                        "exiting\n", CTL_POOL_ENTRIES_INTERNAL);
1038                 return (ENOMEM);
1039         }
1040
1041         if (ctl_pool_create(softc, CTL_POOL_EMERGENCY,
1042                             CTL_POOL_ENTRIES_EMERGENCY, &emergency_pool) != 0) {
1043                 printf("ctl: can't allocate %d entry emergency pool, "
1044                        "exiting\n", CTL_POOL_ENTRIES_EMERGENCY);
1045                 ctl_pool_free(internal_pool);
1046                 return (ENOMEM);
1047         }
1048
1049         if (ctl_pool_create(softc, CTL_POOL_4OTHERSC, CTL_POOL_ENTRIES_OTHER_SC,
1050                             &other_pool) != 0)
1051         {
1052                 printf("ctl: can't allocate %d entry other SC pool, "
1053                        "exiting\n", CTL_POOL_ENTRIES_OTHER_SC);
1054                 ctl_pool_free(internal_pool);
1055                 ctl_pool_free(emergency_pool);
1056                 return (ENOMEM);
1057         }
1058
1059         softc->internal_pool = internal_pool;
1060         softc->emergency_pool = emergency_pool;
1061         softc->othersc_pool = other_pool;
1062
1063         if (worker_threads <= 0)
1064                 worker_threads = max(1, mp_ncpus / 4);
1065         if (worker_threads > CTL_MAX_THREADS)
1066                 worker_threads = CTL_MAX_THREADS;
1067
1068         for (i = 0; i < worker_threads; i++) {
1069                 struct ctl_thread *thr = &softc->threads[i];
1070
1071                 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF);
1072                 thr->ctl_softc = softc;
1073                 STAILQ_INIT(&thr->incoming_queue);
1074                 STAILQ_INIT(&thr->rtr_queue);
1075                 STAILQ_INIT(&thr->done_queue);
1076                 STAILQ_INIT(&thr->isc_queue);
1077
1078                 error = kproc_kthread_add(ctl_work_thread, thr,
1079                     &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i);
1080                 if (error != 0) {
1081                         printf("error creating CTL work thread!\n");
1082                         ctl_pool_free(internal_pool);
1083                         ctl_pool_free(emergency_pool);
1084                         ctl_pool_free(other_pool);
1085                         return (error);
1086                 }
1087         }
1088         error = kproc_kthread_add(ctl_lun_thread, softc,
1089             &softc->ctl_proc, NULL, 0, 0, "ctl", "lun");
1090         if (error != 0) {
1091                 printf("error creating CTL lun thread!\n");
1092                 ctl_pool_free(internal_pool);
1093                 ctl_pool_free(emergency_pool);
1094                 ctl_pool_free(other_pool);
1095                 return (error);
1096         }
1097         if (bootverbose)
1098                 printf("ctl: CAM Target Layer loaded\n");
1099
1100         /*
1101          * Initialize the ioctl front end.
1102          */
1103         ctl_frontend_register(&ioctl_frontend);
1104         port = &softc->ioctl_info.port;
1105         port->frontend = &ioctl_frontend;
1106         sprintf(softc->ioctl_info.port_name, "ioctl");
1107         port->port_type = CTL_PORT_IOCTL;
1108         port->num_requested_ctl_io = 100;
1109         port->port_name = softc->ioctl_info.port_name;
1110         port->port_online = ctl_ioctl_online;
1111         port->port_offline = ctl_ioctl_offline;
1112         port->onoff_arg = &softc->ioctl_info;
1113         port->lun_enable = ctl_ioctl_lun_enable;
1114         port->lun_disable = ctl_ioctl_lun_disable;
1115         port->targ_lun_arg = &softc->ioctl_info;
1116         port->fe_datamove = ctl_ioctl_datamove;
1117         port->fe_done = ctl_ioctl_done;
1118         port->max_targets = 15;
1119         port->max_target_id = 15;
1120
1121         if (ctl_port_register(&softc->ioctl_info.port,
1122                           (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0) {
1123                 printf("ctl: ioctl front end registration failed, will "
1124                        "continue anyway\n");
1125         }
1126
1127 #ifdef CTL_IO_DELAY
1128         if (sizeof(struct callout) > CTL_TIMER_BYTES) {
1129                 printf("sizeof(struct callout) %zd > CTL_TIMER_BYTES %zd\n",
1130                        sizeof(struct callout), CTL_TIMER_BYTES);
1131                 return (EINVAL);
1132         }
1133 #endif /* CTL_IO_DELAY */
1134
1135         return (0);
1136 }
1137
1138 void
1139 ctl_shutdown(void)
1140 {
1141         struct ctl_softc *softc;
1142         struct ctl_lun *lun, *next_lun;
1143         struct ctl_io_pool *pool;
1144
1145         softc = (struct ctl_softc *)control_softc;
1146
1147         if (ctl_port_deregister(&softc->ioctl_info.port) != 0)
1148                 printf("ctl: ioctl front end deregistration failed\n");
1149
1150         mtx_lock(&softc->ctl_lock);
1151
1152         /*
1153          * Free up each LUN.
1154          */
1155         for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){
1156                 next_lun = STAILQ_NEXT(lun, links);
1157                 ctl_free_lun(lun);
1158         }
1159
1160         mtx_unlock(&softc->ctl_lock);
1161
1162         ctl_frontend_deregister(&ioctl_frontend);
1163
1164         /*
1165          * This will rip the rug out from under any FETDs or anyone else
1166          * that has a pool allocated.  Since we increment our module
1167          * refcount any time someone outside the main CTL module allocates
1168          * a pool, we shouldn't have any problems here.  The user won't be
1169          * able to unload the CTL module until client modules have
1170          * successfully unloaded.
1171          */
1172         while ((pool = STAILQ_FIRST(&softc->io_pools)) != NULL)
1173                 ctl_pool_free(pool);
1174
1175 #if 0
1176         ctl_shutdown_thread(softc->work_thread);
1177         mtx_destroy(&softc->queue_lock);
1178 #endif
1179
1180         ctl_tpc_shutdown(softc);
1181         mtx_destroy(&softc->pool_lock);
1182         mtx_destroy(&softc->ctl_lock);
1183
1184         destroy_dev(softc->dev);
1185
1186         sysctl_ctx_free(&softc->sysctl_ctx);
1187
1188         free(control_softc, M_DEVBUF);
1189         control_softc = NULL;
1190
1191         if (bootverbose)
1192                 printf("ctl: CAM Target Layer unloaded\n");
1193 }
1194
1195 static int
1196 ctl_module_event_handler(module_t mod, int what, void *arg)
1197 {
1198
1199         switch (what) {
1200         case MOD_LOAD:
1201                 return (ctl_init());
1202         case MOD_UNLOAD:
1203                 return (EBUSY);
1204         default:
1205                 return (EOPNOTSUPP);
1206         }
1207 }
1208
1209 /*
1210  * XXX KDM should we do some access checks here?  Bump a reference count to
1211  * prevent a CTL module from being unloaded while someone has it open?
1212  */
1213 static int
1214 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td)
1215 {
1216         return (0);
1217 }
1218
1219 static int
1220 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td)
1221 {
1222         return (0);
1223 }
1224
1225 int
1226 ctl_port_enable(ctl_port_type port_type)
1227 {
1228         struct ctl_softc *softc;
1229         struct ctl_port *port;
1230
1231         if (ctl_is_single == 0) {
1232                 union ctl_ha_msg msg_info;
1233                 int isc_retval;
1234
1235 #if 0
1236                 printf("%s: HA mode, synchronizing frontend enable\n",
1237                         __func__);
1238 #endif
1239                 msg_info.hdr.msg_type = CTL_MSG_SYNC_FE;
1240                 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1241                         sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) {
1242                         printf("Sync msg send error retval %d\n", isc_retval);
1243                 }
1244                 if (!rcv_sync_msg) {
1245                         isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info,
1246                                 sizeof(msg_info), 1);
1247                 }
1248 #if 0
1249                 printf("CTL:Frontend Enable\n");
1250         } else {
1251                 printf("%s: single mode, skipping frontend synchronization\n",
1252                         __func__);
1253 #endif
1254         }
1255
1256         softc = control_softc;
1257
1258         STAILQ_FOREACH(port, &softc->port_list, links) {
1259                 if (port_type & port->port_type)
1260                 {
1261 #if 0
1262                         printf("port %d\n", port->targ_port);
1263 #endif
1264                         ctl_port_online(port);
1265                 }
1266         }
1267
1268         return (0);
1269 }
1270
1271 int
1272 ctl_port_disable(ctl_port_type port_type)
1273 {
1274         struct ctl_softc *softc;
1275         struct ctl_port *port;
1276
1277         softc = control_softc;
1278
1279         STAILQ_FOREACH(port, &softc->port_list, links) {
1280                 if (port_type & port->port_type)
1281                         ctl_port_offline(port);
1282         }
1283
1284         return (0);
1285 }
1286
1287 /*
1288  * Returns 0 for success, 1 for failure.
1289  * Currently the only failure mode is if there aren't enough entries
1290  * allocated.  So, in case of a failure, look at num_entries_dropped,
1291  * reallocate and try again.
1292  */
1293 int
1294 ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced,
1295               int *num_entries_filled, int *num_entries_dropped,
1296               ctl_port_type port_type, int no_virtual)
1297 {
1298         struct ctl_softc *softc;
1299         struct ctl_port *port;
1300         int entries_dropped, entries_filled;
1301         int retval;
1302         int i;
1303
1304         softc = control_softc;
1305
1306         retval = 0;
1307         entries_filled = 0;
1308         entries_dropped = 0;
1309
1310         i = 0;
1311         mtx_lock(&softc->ctl_lock);
1312         STAILQ_FOREACH(port, &softc->port_list, links) {
1313                 struct ctl_port_entry *entry;
1314
1315                 if ((port->port_type & port_type) == 0)
1316                         continue;
1317
1318                 if ((no_virtual != 0)
1319                  && (port->virtual_port != 0))
1320                         continue;
1321
1322                 if (entries_filled >= num_entries_alloced) {
1323                         entries_dropped++;
1324                         continue;
1325                 }
1326                 entry = &entries[i];
1327
1328                 entry->port_type = port->port_type;
1329                 strlcpy(entry->port_name, port->port_name,
1330                         sizeof(entry->port_name));
1331                 entry->physical_port = port->physical_port;
1332                 entry->virtual_port = port->virtual_port;
1333                 entry->wwnn = port->wwnn;
1334                 entry->wwpn = port->wwpn;
1335
1336                 i++;
1337                 entries_filled++;
1338         }
1339
1340         mtx_unlock(&softc->ctl_lock);
1341
1342         if (entries_dropped > 0)
1343                 retval = 1;
1344
1345         *num_entries_dropped = entries_dropped;
1346         *num_entries_filled = entries_filled;
1347
1348         return (retval);
1349 }
1350
1351 static void
1352 ctl_ioctl_online(void *arg)
1353 {
1354         struct ctl_ioctl_info *ioctl_info;
1355
1356         ioctl_info = (struct ctl_ioctl_info *)arg;
1357
1358         ioctl_info->flags |= CTL_IOCTL_FLAG_ENABLED;
1359 }
1360
1361 static void
1362 ctl_ioctl_offline(void *arg)
1363 {
1364         struct ctl_ioctl_info *ioctl_info;
1365
1366         ioctl_info = (struct ctl_ioctl_info *)arg;
1367
1368         ioctl_info->flags &= ~CTL_IOCTL_FLAG_ENABLED;
1369 }
1370
1371 /*
1372  * Remove an initiator by port number and initiator ID.
1373  * Returns 0 for success, -1 for failure.
1374  */
1375 int
1376 ctl_remove_initiator(struct ctl_port *port, int iid)
1377 {
1378         struct ctl_softc *softc = control_softc;
1379
1380         mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
1381
1382         if (iid > CTL_MAX_INIT_PER_PORT) {
1383                 printf("%s: initiator ID %u > maximun %u!\n",
1384                        __func__, iid, CTL_MAX_INIT_PER_PORT);
1385                 return (-1);
1386         }
1387
1388         mtx_lock(&softc->ctl_lock);
1389         port->wwpn_iid[iid].in_use--;
1390         port->wwpn_iid[iid].last_use = time_uptime;
1391         mtx_unlock(&softc->ctl_lock);
1392
1393         return (0);
1394 }
1395
1396 /*
1397  * Add an initiator to the initiator map.
1398  * Returns iid for success, < 0 for failure.
1399  */
1400 int
1401 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name)
1402 {
1403         struct ctl_softc *softc = control_softc;
1404         time_t best_time;
1405         int i, best;
1406
1407         mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
1408
1409         if (iid >= CTL_MAX_INIT_PER_PORT) {
1410                 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n",
1411                        __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT);
1412                 free(name, M_CTL);
1413                 return (-1);
1414         }
1415
1416         mtx_lock(&softc->ctl_lock);
1417
1418         if (iid < 0 && (wwpn != 0 || name != NULL)) {
1419                 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
1420                         if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) {
1421                                 iid = i;
1422                                 break;
1423                         }
1424                         if (name != NULL && port->wwpn_iid[i].name != NULL &&
1425                             strcmp(name, port->wwpn_iid[i].name) == 0) {
1426                                 iid = i;
1427                                 break;
1428                         }
1429                 }
1430         }
1431
1432         if (iid < 0) {
1433                 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
1434                         if (port->wwpn_iid[i].in_use == 0 &&
1435                             port->wwpn_iid[i].wwpn == 0 &&
1436                             port->wwpn_iid[i].name == NULL) {
1437                                 iid = i;
1438                                 break;
1439                         }
1440                 }
1441         }
1442
1443         if (iid < 0) {
1444                 best = -1;
1445                 best_time = INT32_MAX;
1446                 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
1447                         if (port->wwpn_iid[i].in_use == 0) {
1448                                 if (port->wwpn_iid[i].last_use < best_time) {
1449                                         best = i;
1450                                         best_time = port->wwpn_iid[i].last_use;
1451                                 }
1452                         }
1453                 }
1454                 iid = best;
1455         }
1456
1457         if (iid < 0) {
1458                 mtx_unlock(&softc->ctl_lock);
1459                 free(name, M_CTL);
1460                 return (-2);
1461         }
1462
1463         if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) {
1464                 /*
1465                  * This is not an error yet.
1466                  */
1467                 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) {
1468 #if 0
1469                         printf("%s: port %d iid %u WWPN %#jx arrived"
1470                             " again\n", __func__, port->targ_port,
1471                             iid, (uintmax_t)wwpn);
1472 #endif
1473                         goto take;
1474                 }
1475                 if (name != NULL && port->wwpn_iid[iid].name != NULL &&
1476                     strcmp(name, port->wwpn_iid[iid].name) == 0) {
1477 #if 0
1478                         printf("%s: port %d iid %u name '%s' arrived"
1479                             " again\n", __func__, port->targ_port,
1480                             iid, name);
1481 #endif
1482                         goto take;
1483                 }
1484
1485                 /*
1486                  * This is an error, but what do we do about it?  The
1487                  * driver is telling us we have a new WWPN for this
1488                  * initiator ID, so we pretty much need to use it.
1489                  */
1490                 printf("%s: port %d iid %u WWPN %#jx '%s' arrived,"
1491                     " but WWPN %#jx '%s' is still at that address\n",
1492                     __func__, port->targ_port, iid, wwpn, name,
1493                     (uintmax_t)port->wwpn_iid[iid].wwpn,
1494                     port->wwpn_iid[iid].name);
1495
1496                 /*
1497                  * XXX KDM clear have_ca and ua_pending on each LUN for
1498                  * this initiator.
1499                  */
1500         }
1501 take:
1502         free(port->wwpn_iid[iid].name, M_CTL);
1503         port->wwpn_iid[iid].name = name;
1504         port->wwpn_iid[iid].wwpn = wwpn;
1505         port->wwpn_iid[iid].in_use++;
1506         mtx_unlock(&softc->ctl_lock);
1507
1508         return (iid);
1509 }
1510
1511 static int
1512 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf)
1513 {
1514         int len;
1515
1516         switch (port->port_type) {
1517         case CTL_PORT_FC:
1518         {
1519                 struct scsi_transportid_fcp *id =
1520                     (struct scsi_transportid_fcp *)buf;
1521                 if (port->wwpn_iid[iid].wwpn == 0)
1522                         return (0);
1523                 memset(id, 0, sizeof(*id));
1524                 id->format_protocol = SCSI_PROTO_FC;
1525                 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name);
1526                 return (sizeof(*id));
1527         }
1528         case CTL_PORT_ISCSI:
1529         {
1530                 struct scsi_transportid_iscsi_port *id =
1531                     (struct scsi_transportid_iscsi_port *)buf;
1532                 if (port->wwpn_iid[iid].name == NULL)
1533                         return (0);
1534                 memset(id, 0, 256);
1535                 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT |
1536                     SCSI_PROTO_ISCSI;
1537                 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1;
1538                 len = roundup2(min(len, 252), 4);
1539                 scsi_ulto2b(len, id->additional_length);
1540                 return (sizeof(*id) + len);
1541         }
1542         case CTL_PORT_SAS:
1543         {
1544                 struct scsi_transportid_sas *id =
1545                     (struct scsi_transportid_sas *)buf;
1546                 if (port->wwpn_iid[iid].wwpn == 0)
1547                         return (0);
1548                 memset(id, 0, sizeof(*id));
1549                 id->format_protocol = SCSI_PROTO_SAS;
1550                 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address);
1551                 return (sizeof(*id));
1552         }
1553         default:
1554         {
1555                 struct scsi_transportid_spi *id =
1556                     (struct scsi_transportid_spi *)buf;
1557                 memset(id, 0, sizeof(*id));
1558                 id->format_protocol = SCSI_PROTO_SPI;
1559                 scsi_ulto2b(iid, id->scsi_addr);
1560                 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id);
1561                 return (sizeof(*id));
1562         }
1563         }
1564 }
1565
1566 static int
1567 ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id)
1568 {
1569         return (0);
1570 }
1571
1572 static int
1573 ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id)
1574 {
1575         return (0);
1576 }
1577
1578 /*
1579  * Data movement routine for the CTL ioctl frontend port.
1580  */
1581 static int
1582 ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
1583 {
1584         struct ctl_sg_entry *ext_sglist, *kern_sglist;
1585         struct ctl_sg_entry ext_entry, kern_entry;
1586         int ext_sglen, ext_sg_entries, kern_sg_entries;
1587         int ext_sg_start, ext_offset;
1588         int len_to_copy, len_copied;
1589         int kern_watermark, ext_watermark;
1590         int ext_sglist_malloced;
1591         int i, j;
1592
1593         ext_sglist_malloced = 0;
1594         ext_sg_start = 0;
1595         ext_offset = 0;
1596
1597         CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n"));
1598
1599         /*
1600          * If this flag is set, fake the data transfer.
1601          */
1602         if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) {
1603                 ctsio->ext_data_filled = ctsio->ext_data_len;
1604                 goto bailout;
1605         }
1606
1607         /*
1608          * To simplify things here, if we have a single buffer, stick it in
1609          * a S/G entry and just make it a single entry S/G list.
1610          */
1611         if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) {
1612                 int len_seen;
1613
1614                 ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
1615
1616                 ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL,
1617                                                            M_WAITOK);
1618                 ext_sglist_malloced = 1;
1619                 if (copyin(ctsio->ext_data_ptr, ext_sglist,
1620                                    ext_sglen) != 0) {
1621                         ctl_set_internal_failure(ctsio,
1622                                                  /*sks_valid*/ 0,
1623                                                  /*retry_count*/ 0);
1624                         goto bailout;
1625                 }
1626                 ext_sg_entries = ctsio->ext_sg_entries;
1627                 len_seen = 0;
1628                 for (i = 0; i < ext_sg_entries; i++) {
1629                         if ((len_seen + ext_sglist[i].len) >=
1630                              ctsio->ext_data_filled) {
1631                                 ext_sg_start = i;
1632                                 ext_offset = ctsio->ext_data_filled - len_seen;
1633                                 break;
1634                         }
1635                         len_seen += ext_sglist[i].len;
1636                 }
1637         } else {
1638                 ext_sglist = &ext_entry;
1639                 ext_sglist->addr = ctsio->ext_data_ptr;
1640                 ext_sglist->len = ctsio->ext_data_len;
1641                 ext_sg_entries = 1;
1642                 ext_sg_start = 0;
1643                 ext_offset = ctsio->ext_data_filled;
1644         }
1645
1646         if (ctsio->kern_sg_entries > 0) {
1647                 kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
1648                 kern_sg_entries = ctsio->kern_sg_entries;
1649         } else {
1650                 kern_sglist = &kern_entry;
1651                 kern_sglist->addr = ctsio->kern_data_ptr;
1652                 kern_sglist->len = ctsio->kern_data_len;
1653                 kern_sg_entries = 1;
1654         }
1655
1656
1657         kern_watermark = 0;
1658         ext_watermark = ext_offset;
1659         len_copied = 0;
1660         for (i = ext_sg_start, j = 0;
1661              i < ext_sg_entries && j < kern_sg_entries;) {
1662                 uint8_t *ext_ptr, *kern_ptr;
1663
1664                 len_to_copy = ctl_min(ext_sglist[i].len - ext_watermark,
1665                                       kern_sglist[j].len - kern_watermark);
1666
1667                 ext_ptr = (uint8_t *)ext_sglist[i].addr;
1668                 ext_ptr = ext_ptr + ext_watermark;
1669                 if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
1670                         /*
1671                          * XXX KDM fix this!
1672                          */
1673                         panic("need to implement bus address support");
1674 #if 0
1675                         kern_ptr = bus_to_virt(kern_sglist[j].addr);
1676 #endif
1677                 } else
1678                         kern_ptr = (uint8_t *)kern_sglist[j].addr;
1679                 kern_ptr = kern_ptr + kern_watermark;
1680
1681                 kern_watermark += len_to_copy;
1682                 ext_watermark += len_to_copy;
1683
1684                 if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
1685                      CTL_FLAG_DATA_IN) {
1686                         CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
1687                                          "bytes to user\n", len_to_copy));
1688                         CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
1689                                          "to %p\n", kern_ptr, ext_ptr));
1690                         if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) {
1691                                 ctl_set_internal_failure(ctsio,
1692                                                          /*sks_valid*/ 0,
1693                                                          /*retry_count*/ 0);
1694                                 goto bailout;
1695                         }
1696                 } else {
1697                         CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
1698                                          "bytes from user\n", len_to_copy));
1699                         CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
1700                                          "to %p\n", ext_ptr, kern_ptr));
1701                         if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){
1702                                 ctl_set_internal_failure(ctsio,
1703                                                          /*sks_valid*/ 0,
1704                                                          /*retry_count*/0);
1705                                 goto bailout;
1706                         }
1707                 }
1708
1709                 len_copied += len_to_copy;
1710
1711                 if (ext_sglist[i].len == ext_watermark) {
1712                         i++;
1713                         ext_watermark = 0;
1714                 }
1715
1716                 if (kern_sglist[j].len == kern_watermark) {
1717                         j++;
1718                         kern_watermark = 0;
1719                 }
1720         }
1721
1722         ctsio->ext_data_filled += len_copied;
1723
1724         CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, "
1725                          "kern_sg_entries: %d\n", ext_sg_entries,
1726                          kern_sg_entries));
1727         CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, "
1728                          "kern_data_len = %d\n", ctsio->ext_data_len,
1729                          ctsio->kern_data_len));
1730
1731
1732         /* XXX KDM set residual?? */
1733 bailout:
1734
1735         if (ext_sglist_malloced != 0)
1736                 free(ext_sglist, M_CTL);
1737
1738         return (CTL_RETVAL_COMPLETE);
1739 }
1740
1741 /*
1742  * Serialize a command that went down the "wrong" side, and so was sent to
1743  * this controller for execution.  The logic is a little different than the
1744  * standard case in ctl_scsiio_precheck().  Errors in this case need to get
1745  * sent back to the other side, but in the success case, we execute the
1746  * command on this side (XFER mode) or tell the other side to execute it
1747  * (SER_ONLY mode).
1748  */
1749 static int
1750 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
1751 {
1752         struct ctl_softc *ctl_softc;
1753         union ctl_ha_msg msg_info;
1754         struct ctl_lun *lun;
1755         int retval = 0;
1756         uint32_t targ_lun;
1757
1758         ctl_softc = control_softc;
1759
1760         targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
1761         lun = ctl_softc->ctl_luns[targ_lun];
1762         if (lun==NULL)
1763         {
1764                 /*
1765                  * Why isn't LUN defined? The other side wouldn't
1766                  * send a cmd if the LUN is undefined.
1767                  */
1768                 printf("%s: Bad JUJU!, LUN is NULL!\n", __func__);
1769
1770                 /* "Logical unit not supported" */
1771                 ctl_set_sense_data(&msg_info.scsi.sense_data,
1772                                    lun,
1773                                    /*sense_format*/SSD_TYPE_NONE,
1774                                    /*current_error*/ 1,
1775                                    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1776                                    /*asc*/ 0x25,
1777                                    /*ascq*/ 0x00,
1778                                    SSD_ELEM_NONE);
1779
1780                 msg_info.scsi.sense_len = SSD_FULL_SIZE;
1781                 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
1782                 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
1783                 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1784                 msg_info.hdr.serializing_sc = NULL;
1785                 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1786                 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1787                                 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1788                 }
1789                 return(1);
1790
1791         }
1792
1793         mtx_lock(&lun->lun_lock);
1794         TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1795
1796         switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
1797                 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq,
1798                  ooa_links))) {
1799         case CTL_ACTION_BLOCK:
1800                 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
1801                 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
1802                                   blocked_links);
1803                 break;
1804         case CTL_ACTION_PASS:
1805         case CTL_ACTION_SKIP:
1806                 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
1807                         ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
1808                         ctl_enqueue_rtr((union ctl_io *)ctsio);
1809                 } else {
1810
1811                         /* send msg back to other side */
1812                         msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1813                         msg_info.hdr.serializing_sc = (union ctl_io *)ctsio;
1814                         msg_info.hdr.msg_type = CTL_MSG_R2R;
1815 #if 0
1816                         printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc);
1817 #endif
1818                         if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1819                             sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1820                         }
1821                 }
1822                 break;
1823         case CTL_ACTION_OVERLAP:
1824                 /* OVERLAPPED COMMANDS ATTEMPTED */
1825                 ctl_set_sense_data(&msg_info.scsi.sense_data,
1826                                    lun,
1827                                    /*sense_format*/SSD_TYPE_NONE,
1828                                    /*current_error*/ 1,
1829                                    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1830                                    /*asc*/ 0x4E,
1831                                    /*ascq*/ 0x00,
1832                                    SSD_ELEM_NONE);
1833
1834                 msg_info.scsi.sense_len = SSD_FULL_SIZE;
1835                 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
1836                 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
1837                 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1838                 msg_info.hdr.serializing_sc = NULL;
1839                 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1840 #if 0
1841                 printf("BAD JUJU:Major Bummer Overlap\n");
1842 #endif
1843                 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1844                 retval = 1;
1845                 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1846                     sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1847                 }
1848                 break;
1849         case CTL_ACTION_OVERLAP_TAG:
1850                 /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */
1851                 ctl_set_sense_data(&msg_info.scsi.sense_data,
1852                                    lun,
1853                                    /*sense_format*/SSD_TYPE_NONE,
1854                                    /*current_error*/ 1,
1855                                    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1856                                    /*asc*/ 0x4D,
1857                                    /*ascq*/ ctsio->tag_num & 0xff,
1858                                    SSD_ELEM_NONE);
1859
1860                 msg_info.scsi.sense_len = SSD_FULL_SIZE;
1861                 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
1862                 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
1863                 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1864                 msg_info.hdr.serializing_sc = NULL;
1865                 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1866 #if 0
1867                 printf("BAD JUJU:Major Bummer Overlap Tag\n");
1868 #endif
1869                 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1870                 retval = 1;
1871                 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1872                     sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1873                 }
1874                 break;
1875         case CTL_ACTION_ERROR:
1876         default:
1877                 /* "Internal target failure" */
1878                 ctl_set_sense_data(&msg_info.scsi.sense_data,
1879                                    lun,
1880                                    /*sense_format*/SSD_TYPE_NONE,
1881                                    /*current_error*/ 1,
1882                                    /*sense_key*/ SSD_KEY_HARDWARE_ERROR,
1883                                    /*asc*/ 0x44,
1884                                    /*ascq*/ 0x00,
1885                                    SSD_ELEM_NONE);
1886
1887                 msg_info.scsi.sense_len = SSD_FULL_SIZE;
1888                 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
1889                 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
1890                 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1891                 msg_info.hdr.serializing_sc = NULL;
1892                 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1893 #if 0
1894                 printf("BAD JUJU:Major Bummer HW Error\n");
1895 #endif
1896                 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1897                 retval = 1;
1898                 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1899                     sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1900                 }
1901                 break;
1902         }
1903         mtx_unlock(&lun->lun_lock);
1904         return (retval);
1905 }
1906
1907 static int
1908 ctl_ioctl_submit_wait(union ctl_io *io)
1909 {
1910         struct ctl_fe_ioctl_params params;
1911         ctl_fe_ioctl_state last_state;
1912         int done, retval;
1913
1914         retval = 0;
1915
1916         bzero(&params, sizeof(params));
1917
1918         mtx_init(&params.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF);
1919         cv_init(&params.sem, "ctlioccv");
1920         params.state = CTL_IOCTL_INPROG;
1921         last_state = params.state;
1922
1923         io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = &params;
1924
1925         CTL_DEBUG_PRINT(("ctl_ioctl_submit_wait\n"));
1926
1927         /* This shouldn't happen */
1928         if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE)
1929                 return (retval);
1930
1931         done = 0;
1932
1933         do {
1934                 mtx_lock(&params.ioctl_mtx);
1935                 /*
1936                  * Check the state here, and don't sleep if the state has
1937                  * already changed (i.e. wakeup has already occured, but we
1938                  * weren't waiting yet).
1939                  */
1940                 if (params.state == last_state) {
1941                         /* XXX KDM cv_wait_sig instead? */
1942                         cv_wait(&params.sem, &params.ioctl_mtx);
1943                 }
1944                 last_state = params.state;
1945
1946                 switch (params.state) {
1947                 case CTL_IOCTL_INPROG:
1948                         /* Why did we wake up? */
1949                         /* XXX KDM error here? */
1950                         mtx_unlock(&params.ioctl_mtx);
1951                         break;
1952                 case CTL_IOCTL_DATAMOVE:
1953                         CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n"));
1954
1955                         /*
1956                          * change last_state back to INPROG to avoid
1957                          * deadlock on subsequent data moves.
1958                          */
1959                         params.state = last_state = CTL_IOCTL_INPROG;
1960
1961                         mtx_unlock(&params.ioctl_mtx);
1962                         ctl_ioctl_do_datamove(&io->scsiio);
1963                         /*
1964                          * Note that in some cases, most notably writes,
1965                          * this will queue the I/O and call us back later.
1966                          * In other cases, generally reads, this routine
1967                          * will immediately call back and wake us up,
1968                          * probably using our own context.
1969                          */
1970                         io->scsiio.be_move_done(io);
1971                         break;
1972                 case CTL_IOCTL_DONE:
1973                         mtx_unlock(&params.ioctl_mtx);
1974                         CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n"));
1975                         done = 1;
1976                         break;
1977                 default:
1978                         mtx_unlock(&params.ioctl_mtx);
1979                         /* XXX KDM error here? */
1980                         break;
1981                 }
1982         } while (done == 0);
1983
1984         mtx_destroy(&params.ioctl_mtx);
1985         cv_destroy(&params.sem);
1986
1987         return (CTL_RETVAL_COMPLETE);
1988 }
1989
1990 static void
1991 ctl_ioctl_datamove(union ctl_io *io)
1992 {
1993         struct ctl_fe_ioctl_params *params;
1994
1995         params = (struct ctl_fe_ioctl_params *)
1996                 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1997
1998         mtx_lock(&params->ioctl_mtx);
1999         params->state = CTL_IOCTL_DATAMOVE;
2000         cv_broadcast(&params->sem);
2001         mtx_unlock(&params->ioctl_mtx);
2002 }
2003
2004 static void
2005 ctl_ioctl_done(union ctl_io *io)
2006 {
2007         struct ctl_fe_ioctl_params *params;
2008
2009         params = (struct ctl_fe_ioctl_params *)
2010                 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
2011
2012         mtx_lock(&params->ioctl_mtx);
2013         params->state = CTL_IOCTL_DONE;
2014         cv_broadcast(&params->sem);
2015         mtx_unlock(&params->ioctl_mtx);
2016 }
2017
2018 static void
2019 ctl_ioctl_hard_startstop_callback(void *arg, struct cfi_metatask *metatask)
2020 {
2021         struct ctl_fe_ioctl_startstop_info *sd_info;
2022
2023         sd_info = (struct ctl_fe_ioctl_startstop_info *)arg;
2024
2025         sd_info->hs_info.status = metatask->status;
2026         sd_info->hs_info.total_luns = metatask->taskinfo.startstop.total_luns;
2027         sd_info->hs_info.luns_complete =
2028                 metatask->taskinfo.startstop.luns_complete;
2029         sd_info->hs_info.luns_failed = metatask->taskinfo.startstop.luns_failed;
2030
2031         cv_broadcast(&sd_info->sem);
2032 }
2033
2034 static void
2035 ctl_ioctl_bbrread_callback(void *arg, struct cfi_metatask *metatask)
2036 {
2037         struct ctl_fe_ioctl_bbrread_info *fe_bbr_info;
2038
2039         fe_bbr_info = (struct ctl_fe_ioctl_bbrread_info *)arg;
2040
2041         mtx_lock(fe_bbr_info->lock);
2042         fe_bbr_info->bbr_info->status = metatask->status;
2043         fe_bbr_info->bbr_info->bbr_status = metatask->taskinfo.bbrread.status;
2044         fe_bbr_info->wakeup_done = 1;
2045         mtx_unlock(fe_bbr_info->lock);
2046
2047         cv_broadcast(&fe_bbr_info->sem);
2048 }
2049
2050 /*
2051  * Returns 0 for success, errno for failure.
2052  */
2053 static int
2054 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
2055                    struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries)
2056 {
2057         union ctl_io *io;
2058         int retval;
2059
2060         retval = 0;
2061
2062         mtx_lock(&lun->lun_lock);
2063         for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL);
2064              (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
2065              ooa_links)) {
2066                 struct ctl_ooa_entry *entry;
2067
2068                 /*
2069                  * If we've got more than we can fit, just count the
2070                  * remaining entries.
2071                  */
2072                 if (*cur_fill_num >= ooa_hdr->alloc_num)
2073                         continue;
2074
2075                 entry = &kern_entries[*cur_fill_num];
2076
2077                 entry->tag_num = io->scsiio.tag_num;
2078                 entry->lun_num = lun->lun;
2079 #ifdef CTL_TIME_IO
2080                 entry->start_bt = io->io_hdr.start_bt;
2081 #endif
2082                 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len);
2083                 entry->cdb_len = io->scsiio.cdb_len;
2084                 if (io->io_hdr.flags & CTL_FLAG_BLOCKED)
2085                         entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED;
2086
2087                 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG)
2088                         entry->cmd_flags |= CTL_OOACMD_FLAG_DMA;
2089
2090                 if (io->io_hdr.flags & CTL_FLAG_ABORT)
2091                         entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT;
2092
2093                 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR)
2094                         entry->cmd_flags |= CTL_OOACMD_FLAG_RTR;
2095
2096                 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED)
2097                         entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED;
2098         }
2099         mtx_unlock(&lun->lun_lock);
2100
2101         return (retval);
2102 }
2103
2104 static void *
2105 ctl_copyin_alloc(void *user_addr, int len, char *error_str,
2106                  size_t error_str_len)
2107 {
2108         void *kptr;
2109
2110         kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO);
2111
2112         if (copyin(user_addr, kptr, len) != 0) {
2113                 snprintf(error_str, error_str_len, "Error copying %d bytes "
2114                          "from user address %p to kernel address %p", len,
2115                          user_addr, kptr);
2116                 free(kptr, M_CTL);
2117                 return (NULL);
2118         }
2119
2120         return (kptr);
2121 }
2122
2123 static void
2124 ctl_free_args(int num_args, struct ctl_be_arg *args)
2125 {
2126         int i;
2127
2128         if (args == NULL)
2129                 return;
2130
2131         for (i = 0; i < num_args; i++) {
2132                 free(args[i].kname, M_CTL);
2133                 free(args[i].kvalue, M_CTL);
2134         }
2135
2136         free(args, M_CTL);
2137 }
2138
2139 static struct ctl_be_arg *
2140 ctl_copyin_args(int num_args, struct ctl_be_arg *uargs,
2141                 char *error_str, size_t error_str_len)
2142 {
2143         struct ctl_be_arg *args;
2144         int i;
2145
2146         args = ctl_copyin_alloc(uargs, num_args * sizeof(*args),
2147                                 error_str, error_str_len);
2148
2149         if (args == NULL)
2150                 goto bailout;
2151
2152         for (i = 0; i < num_args; i++) {
2153                 args[i].kname = NULL;
2154                 args[i].kvalue = NULL;
2155         }
2156
2157         for (i = 0; i < num_args; i++) {
2158                 uint8_t *tmpptr;
2159
2160                 args[i].kname = ctl_copyin_alloc(args[i].name,
2161                         args[i].namelen, error_str, error_str_len);
2162                 if (args[i].kname == NULL)
2163                         goto bailout;
2164
2165                 if (args[i].kname[args[i].namelen - 1] != '\0') {
2166                         snprintf(error_str, error_str_len, "Argument %d "
2167                                  "name is not NUL-terminated", i);
2168                         goto bailout;
2169                 }
2170
2171                 if (args[i].flags & CTL_BEARG_RD) {
2172                         tmpptr = ctl_copyin_alloc(args[i].value,
2173                                 args[i].vallen, error_str, error_str_len);
2174                         if (tmpptr == NULL)
2175                                 goto bailout;
2176                         if ((args[i].flags & CTL_BEARG_ASCII)
2177                          && (tmpptr[args[i].vallen - 1] != '\0')) {
2178                                 snprintf(error_str, error_str_len, "Argument "
2179                                     "%d value is not NUL-terminated", i);
2180                                 goto bailout;
2181                         }
2182                         args[i].kvalue = tmpptr;
2183                 } else {
2184                         args[i].kvalue = malloc(args[i].vallen,
2185                             M_CTL, M_WAITOK | M_ZERO);
2186                 }
2187         }
2188
2189         return (args);
2190 bailout:
2191
2192         ctl_free_args(num_args, args);
2193
2194         return (NULL);
2195 }
2196
2197 static void
2198 ctl_copyout_args(int num_args, struct ctl_be_arg *args)
2199 {
2200         int i;
2201
2202         for (i = 0; i < num_args; i++) {
2203                 if (args[i].flags & CTL_BEARG_WR)
2204                         copyout(args[i].kvalue, args[i].value, args[i].vallen);
2205         }
2206 }
2207
2208 /*
2209  * Escape characters that are illegal or not recommended in XML.
2210  */
2211 int
2212 ctl_sbuf_printf_esc(struct sbuf *sb, char *str)
2213 {
2214         int retval;
2215
2216         retval = 0;
2217
2218         for (; *str; str++) {
2219                 switch (*str) {
2220                 case '&':
2221                         retval = sbuf_printf(sb, "&amp;");
2222                         break;
2223                 case '>':
2224                         retval = sbuf_printf(sb, "&gt;");
2225                         break;
2226                 case '<':
2227                         retval = sbuf_printf(sb, "&lt;");
2228                         break;
2229                 default:
2230                         retval = sbuf_putc(sb, *str);
2231                         break;
2232                 }
2233
2234                 if (retval != 0)
2235                         break;
2236
2237         }
2238
2239         return (retval);
2240 }
2241
2242 static int
2243 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
2244           struct thread *td)
2245 {
2246         struct ctl_softc *softc;
2247         int retval;
2248
2249         softc = control_softc;
2250
2251         retval = 0;
2252
2253         switch (cmd) {
2254         case CTL_IO: {
2255                 union ctl_io *io;
2256                 void *pool_tmp;
2257
2258                 /*
2259                  * If we haven't been "enabled", don't allow any SCSI I/O
2260                  * to this FETD.
2261                  */
2262                 if ((softc->ioctl_info.flags & CTL_IOCTL_FLAG_ENABLED) == 0) {
2263                         retval = EPERM;
2264                         break;
2265                 }
2266
2267                 io = ctl_alloc_io(softc->ioctl_info.port.ctl_pool_ref);
2268                 if (io == NULL) {
2269                         printf("ctl_ioctl: can't allocate ctl_io!\n");
2270                         retval = ENOSPC;
2271                         break;
2272                 }
2273
2274                 /*
2275                  * Need to save the pool reference so it doesn't get
2276                  * spammed by the user's ctl_io.
2277                  */
2278                 pool_tmp = io->io_hdr.pool;
2279
2280                 memcpy(io, (void *)addr, sizeof(*io));
2281
2282                 io->io_hdr.pool = pool_tmp;
2283                 /*
2284                  * No status yet, so make sure the status is set properly.
2285                  */
2286                 io->io_hdr.status = CTL_STATUS_NONE;
2287
2288                 /*
2289                  * The user sets the initiator ID, target and LUN IDs.
2290                  */
2291                 io->io_hdr.nexus.targ_port = softc->ioctl_info.port.targ_port;
2292                 io->io_hdr.flags |= CTL_FLAG_USER_REQ;
2293                 if ((io->io_hdr.io_type == CTL_IO_SCSI)
2294                  && (io->scsiio.tag_type != CTL_TAG_UNTAGGED))
2295                         io->scsiio.tag_num = softc->ioctl_info.cur_tag_num++;
2296
2297                 retval = ctl_ioctl_submit_wait(io);
2298
2299                 if (retval != 0) {
2300                         ctl_free_io(io);
2301                         break;
2302                 }
2303
2304                 memcpy((void *)addr, io, sizeof(*io));
2305
2306                 /* return this to our pool */
2307                 ctl_free_io(io);
2308
2309                 break;
2310         }
2311         case CTL_ENABLE_PORT:
2312         case CTL_DISABLE_PORT:
2313         case CTL_SET_PORT_WWNS: {
2314                 struct ctl_port *port;
2315                 struct ctl_port_entry *entry;
2316
2317                 entry = (struct ctl_port_entry *)addr;
2318                 
2319                 mtx_lock(&softc->ctl_lock);
2320                 STAILQ_FOREACH(port, &softc->port_list, links) {
2321                         int action, done;
2322
2323                         action = 0;
2324                         done = 0;
2325
2326                         if ((entry->port_type == CTL_PORT_NONE)
2327                          && (entry->targ_port == port->targ_port)) {
2328                                 /*
2329                                  * If the user only wants to enable or
2330                                  * disable or set WWNs on a specific port,
2331                                  * do the operation and we're done.
2332                                  */
2333                                 action = 1;
2334                                 done = 1;
2335                         } else if (entry->port_type & port->port_type) {
2336                                 /*
2337                                  * Compare the user's type mask with the
2338                                  * particular frontend type to see if we
2339                                  * have a match.
2340                                  */
2341                                 action = 1;
2342                                 done = 0;
2343
2344                                 /*
2345                                  * Make sure the user isn't trying to set
2346                                  * WWNs on multiple ports at the same time.
2347                                  */
2348                                 if (cmd == CTL_SET_PORT_WWNS) {
2349                                         printf("%s: Can't set WWNs on "
2350                                                "multiple ports\n", __func__);
2351                                         retval = EINVAL;
2352                                         break;
2353                                 }
2354                         }
2355                         if (action != 0) {
2356                                 /*
2357                                  * XXX KDM we have to drop the lock here,
2358                                  * because the online/offline operations
2359                                  * can potentially block.  We need to
2360                                  * reference count the frontends so they
2361                                  * can't go away,
2362                                  */
2363                                 mtx_unlock(&softc->ctl_lock);
2364
2365                                 if (cmd == CTL_ENABLE_PORT) {
2366                                         struct ctl_lun *lun;
2367
2368                                         STAILQ_FOREACH(lun, &softc->lun_list,
2369                                                        links) {
2370                                                 port->lun_enable(port->targ_lun_arg,
2371                                                     lun->target,
2372                                                     lun->lun);
2373                                         }
2374
2375                                         ctl_port_online(port);
2376                                 } else if (cmd == CTL_DISABLE_PORT) {
2377                                         struct ctl_lun *lun;
2378
2379                                         ctl_port_offline(port);
2380
2381                                         STAILQ_FOREACH(lun, &softc->lun_list,
2382                                                        links) {
2383                                                 port->lun_disable(
2384                                                     port->targ_lun_arg,
2385                                                     lun->target,
2386                                                     lun->lun);
2387                                         }
2388                                 }
2389
2390                                 mtx_lock(&softc->ctl_lock);
2391
2392                                 if (cmd == CTL_SET_PORT_WWNS)
2393                                         ctl_port_set_wwns(port,
2394                                             (entry->flags & CTL_PORT_WWNN_VALID) ?
2395                                             1 : 0, entry->wwnn,
2396                                             (entry->flags & CTL_PORT_WWPN_VALID) ?
2397                                             1 : 0, entry->wwpn);
2398                         }
2399                         if (done != 0)
2400                                 break;
2401                 }
2402                 mtx_unlock(&softc->ctl_lock);
2403                 break;
2404         }
2405         case CTL_GET_PORT_LIST: {
2406                 struct ctl_port *port;
2407                 struct ctl_port_list *list;
2408                 int i;
2409
2410                 list = (struct ctl_port_list *)addr;
2411
2412                 if (list->alloc_len != (list->alloc_num *
2413                     sizeof(struct ctl_port_entry))) {
2414                         printf("%s: CTL_GET_PORT_LIST: alloc_len %u != "
2415                                "alloc_num %u * sizeof(struct ctl_port_entry) "
2416                                "%zu\n", __func__, list->alloc_len,
2417                                list->alloc_num, sizeof(struct ctl_port_entry));
2418                         retval = EINVAL;
2419                         break;
2420                 }
2421                 list->fill_len = 0;
2422                 list->fill_num = 0;
2423                 list->dropped_num = 0;
2424                 i = 0;
2425                 mtx_lock(&softc->ctl_lock);
2426                 STAILQ_FOREACH(port, &softc->port_list, links) {
2427                         struct ctl_port_entry entry, *list_entry;
2428
2429                         if (list->fill_num >= list->alloc_num) {
2430                                 list->dropped_num++;
2431                                 continue;
2432                         }
2433
2434                         entry.port_type = port->port_type;
2435                         strlcpy(entry.port_name, port->port_name,
2436                                 sizeof(entry.port_name));
2437                         entry.targ_port = port->targ_port;
2438                         entry.physical_port = port->physical_port;
2439                         entry.virtual_port = port->virtual_port;
2440                         entry.wwnn = port->wwnn;
2441                         entry.wwpn = port->wwpn;
2442                         if (port->status & CTL_PORT_STATUS_ONLINE)
2443                                 entry.online = 1;
2444                         else
2445                                 entry.online = 0;
2446
2447                         list_entry = &list->entries[i];
2448
2449                         retval = copyout(&entry, list_entry, sizeof(entry));
2450                         if (retval != 0) {
2451                                 printf("%s: CTL_GET_PORT_LIST: copyout "
2452                                        "returned %d\n", __func__, retval);
2453                                 break;
2454                         }
2455                         i++;
2456                         list->fill_num++;
2457                         list->fill_len += sizeof(entry);
2458                 }
2459                 mtx_unlock(&softc->ctl_lock);
2460
2461                 /*
2462                  * If this is non-zero, we had a copyout fault, so there's
2463                  * probably no point in attempting to set the status inside
2464                  * the structure.
2465                  */
2466                 if (retval != 0)
2467                         break;
2468
2469                 if (list->dropped_num > 0)
2470                         list->status = CTL_PORT_LIST_NEED_MORE_SPACE;
2471                 else
2472                         list->status = CTL_PORT_LIST_OK;
2473                 break;
2474         }
2475         case CTL_DUMP_OOA: {
2476                 struct ctl_lun *lun;
2477                 union ctl_io *io;
2478                 char printbuf[128];
2479                 struct sbuf sb;
2480
2481                 mtx_lock(&softc->ctl_lock);
2482                 printf("Dumping OOA queues:\n");
2483                 STAILQ_FOREACH(lun, &softc->lun_list, links) {
2484                         mtx_lock(&lun->lun_lock);
2485                         for (io = (union ctl_io *)TAILQ_FIRST(
2486                              &lun->ooa_queue); io != NULL;
2487                              io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
2488                              ooa_links)) {
2489                                 sbuf_new(&sb, printbuf, sizeof(printbuf),
2490                                          SBUF_FIXEDLEN);
2491                                 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ",
2492                                             (intmax_t)lun->lun,
2493                                             io->scsiio.tag_num,
2494                                             (io->io_hdr.flags &
2495                                             CTL_FLAG_BLOCKED) ? "" : " BLOCKED",
2496                                             (io->io_hdr.flags &
2497                                             CTL_FLAG_DMA_INPROG) ? " DMA" : "",
2498                                             (io->io_hdr.flags &
2499                                             CTL_FLAG_ABORT) ? " ABORT" : "",
2500                                             (io->io_hdr.flags &
2501                                         CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : "");
2502                                 ctl_scsi_command_string(&io->scsiio, NULL, &sb);
2503                                 sbuf_finish(&sb);
2504                                 printf("%s\n", sbuf_data(&sb));
2505                         }
2506                         mtx_unlock(&lun->lun_lock);
2507                 }
2508                 printf("OOA queues dump done\n");
2509                 mtx_unlock(&softc->ctl_lock);
2510                 break;
2511         }
2512         case CTL_GET_OOA: {
2513                 struct ctl_lun *lun;
2514                 struct ctl_ooa *ooa_hdr;
2515                 struct ctl_ooa_entry *entries;
2516                 uint32_t cur_fill_num;
2517
2518                 ooa_hdr = (struct ctl_ooa *)addr;
2519
2520                 if ((ooa_hdr->alloc_len == 0)
2521                  || (ooa_hdr->alloc_num == 0)) {
2522                         printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u "
2523                                "must be non-zero\n", __func__,
2524                                ooa_hdr->alloc_len, ooa_hdr->alloc_num);
2525                         retval = EINVAL;
2526                         break;
2527                 }
2528
2529                 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num *
2530                     sizeof(struct ctl_ooa_entry))) {
2531                         printf("%s: CTL_GET_OOA: alloc len %u must be alloc "
2532                                "num %d * sizeof(struct ctl_ooa_entry) %zd\n",
2533                                __func__, ooa_hdr->alloc_len,
2534                                ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry));
2535                         retval = EINVAL;
2536                         break;
2537                 }
2538
2539                 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO);
2540                 if (entries == NULL) {
2541                         printf("%s: could not allocate %d bytes for OOA "
2542                                "dump\n", __func__, ooa_hdr->alloc_len);
2543                         retval = ENOMEM;
2544                         break;
2545                 }
2546
2547                 mtx_lock(&softc->ctl_lock);
2548                 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0)
2549                  && ((ooa_hdr->lun_num >= CTL_MAX_LUNS)
2550                   || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) {
2551                         mtx_unlock(&softc->ctl_lock);
2552                         free(entries, M_CTL);
2553                         printf("%s: CTL_GET_OOA: invalid LUN %ju\n",
2554                                __func__, (uintmax_t)ooa_hdr->lun_num);
2555                         retval = EINVAL;
2556                         break;
2557                 }
2558
2559                 cur_fill_num = 0;
2560
2561                 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) {
2562                         STAILQ_FOREACH(lun, &softc->lun_list, links) {
2563                                 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,
2564                                         ooa_hdr, entries);
2565                                 if (retval != 0)
2566                                         break;
2567                         }
2568                         if (retval != 0) {
2569                                 mtx_unlock(&softc->ctl_lock);
2570                                 free(entries, M_CTL);
2571                                 break;
2572                         }
2573                 } else {
2574                         lun = softc->ctl_luns[ooa_hdr->lun_num];
2575
2576                         retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr,
2577                                                     entries);
2578                 }
2579                 mtx_unlock(&softc->ctl_lock);
2580
2581                 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num);
2582                 ooa_hdr->fill_len = ooa_hdr->fill_num *
2583                         sizeof(struct ctl_ooa_entry);
2584                 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len);
2585                 if (retval != 0) {
2586                         printf("%s: error copying out %d bytes for OOA dump\n", 
2587                                __func__, ooa_hdr->fill_len);
2588                 }
2589
2590                 getbintime(&ooa_hdr->cur_bt);
2591
2592                 if (cur_fill_num > ooa_hdr->alloc_num) {
2593                         ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num;
2594                         ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE;
2595                 } else {
2596                         ooa_hdr->dropped_num = 0;
2597                         ooa_hdr->status = CTL_OOA_OK;
2598                 }
2599
2600                 free(entries, M_CTL);
2601                 break;
2602         }
2603         case CTL_CHECK_OOA: {
2604                 union ctl_io *io;
2605                 struct ctl_lun *lun;
2606                 struct ctl_ooa_info *ooa_info;
2607
2608
2609                 ooa_info = (struct ctl_ooa_info *)addr;
2610
2611                 if (ooa_info->lun_id >= CTL_MAX_LUNS) {
2612                         ooa_info->status = CTL_OOA_INVALID_LUN;
2613                         break;
2614                 }
2615                 mtx_lock(&softc->ctl_lock);
2616                 lun = softc->ctl_luns[ooa_info->lun_id];
2617                 if (lun == NULL) {
2618                         mtx_unlock(&softc->ctl_lock);
2619                         ooa_info->status = CTL_OOA_INVALID_LUN;
2620                         break;
2621                 }
2622                 mtx_lock(&lun->lun_lock);
2623                 mtx_unlock(&softc->ctl_lock);
2624                 ooa_info->num_entries = 0;
2625                 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
2626                      io != NULL; io = (union ctl_io *)TAILQ_NEXT(
2627                      &io->io_hdr, ooa_links)) {
2628                         ooa_info->num_entries++;
2629                 }
2630                 mtx_unlock(&lun->lun_lock);
2631
2632                 ooa_info->status = CTL_OOA_SUCCESS;
2633
2634                 break;
2635         }
2636         case CTL_HARD_START:
2637         case CTL_HARD_STOP: {
2638                 struct ctl_fe_ioctl_startstop_info ss_info;
2639                 struct cfi_metatask *metatask;
2640                 struct mtx hs_mtx;
2641
2642                 mtx_init(&hs_mtx, "HS Mutex", NULL, MTX_DEF);
2643
2644                 cv_init(&ss_info.sem, "hard start/stop cv" );
2645
2646                 metatask = cfi_alloc_metatask(/*can_wait*/ 1);
2647                 if (metatask == NULL) {
2648                         retval = ENOMEM;
2649                         mtx_destroy(&hs_mtx);
2650                         break;
2651                 }
2652
2653                 if (cmd == CTL_HARD_START)
2654                         metatask->tasktype = CFI_TASK_STARTUP;
2655                 else
2656                         metatask->tasktype = CFI_TASK_SHUTDOWN;
2657
2658                 metatask->callback = ctl_ioctl_hard_startstop_callback;
2659                 metatask->callback_arg = &ss_info;
2660
2661                 cfi_action(metatask);
2662
2663                 /* Wait for the callback */
2664                 mtx_lock(&hs_mtx);
2665                 cv_wait_sig(&ss_info.sem, &hs_mtx);
2666                 mtx_unlock(&hs_mtx);
2667
2668                 /*
2669                  * All information has been copied from the metatask by the
2670                  * time cv_broadcast() is called, so we free the metatask here.
2671                  */
2672                 cfi_free_metatask(metatask);
2673
2674                 memcpy((void *)addr, &ss_info.hs_info, sizeof(ss_info.hs_info));
2675
2676                 mtx_destroy(&hs_mtx);
2677                 break;
2678         }
2679         case CTL_BBRREAD: {
2680                 struct ctl_bbrread_info *bbr_info;
2681                 struct ctl_fe_ioctl_bbrread_info fe_bbr_info;
2682                 struct mtx bbr_mtx;
2683                 struct cfi_metatask *metatask;
2684
2685                 bbr_info = (struct ctl_bbrread_info *)addr;
2686
2687                 bzero(&fe_bbr_info, sizeof(fe_bbr_info));
2688
2689                 bzero(&bbr_mtx, sizeof(bbr_mtx));
2690                 mtx_init(&bbr_mtx, "BBR Mutex", NULL, MTX_DEF);
2691
2692                 fe_bbr_info.bbr_info = bbr_info;
2693                 fe_bbr_info.lock = &bbr_mtx;
2694
2695                 cv_init(&fe_bbr_info.sem, "BBR read cv");
2696                 metatask = cfi_alloc_metatask(/*can_wait*/ 1);
2697
2698                 if (metatask == NULL) {
2699                         mtx_destroy(&bbr_mtx);
2700                         cv_destroy(&fe_bbr_info.sem);
2701                         retval = ENOMEM;
2702                         break;
2703                 }
2704                 metatask->tasktype = CFI_TASK_BBRREAD;
2705                 metatask->callback = ctl_ioctl_bbrread_callback;
2706                 metatask->callback_arg = &fe_bbr_info;
2707                 metatask->taskinfo.bbrread.lun_num = bbr_info->lun_num;
2708                 metatask->taskinfo.bbrread.lba = bbr_info->lba;
2709                 metatask->taskinfo.bbrread.len = bbr_info->len;
2710
2711                 cfi_action(metatask);
2712
2713                 mtx_lock(&bbr_mtx);
2714                 while (fe_bbr_info.wakeup_done == 0)
2715                         cv_wait_sig(&fe_bbr_info.sem, &bbr_mtx);
2716                 mtx_unlock(&bbr_mtx);
2717
2718                 bbr_info->status = metatask->status;
2719                 bbr_info->bbr_status = metatask->taskinfo.bbrread.status;
2720                 bbr_info->scsi_status = metatask->taskinfo.bbrread.scsi_status;
2721                 memcpy(&bbr_info->sense_data,
2722                        &metatask->taskinfo.bbrread.sense_data,
2723                        ctl_min(sizeof(bbr_info->sense_data),
2724                                sizeof(metatask->taskinfo.bbrread.sense_data)));
2725
2726                 cfi_free_metatask(metatask);
2727
2728                 mtx_destroy(&bbr_mtx);
2729                 cv_destroy(&fe_bbr_info.sem);
2730
2731                 break;
2732         }
2733         case CTL_DELAY_IO: {
2734                 struct ctl_io_delay_info *delay_info;
2735 #ifdef CTL_IO_DELAY
2736                 struct ctl_lun *lun;
2737 #endif /* CTL_IO_DELAY */
2738
2739                 delay_info = (struct ctl_io_delay_info *)addr;
2740
2741 #ifdef CTL_IO_DELAY
2742                 mtx_lock(&softc->ctl_lock);
2743
2744                 if ((delay_info->lun_id >= CTL_MAX_LUNS)
2745                  || (softc->ctl_luns[delay_info->lun_id] == NULL)) {
2746                         delay_info->status = CTL_DELAY_STATUS_INVALID_LUN;
2747                 } else {
2748                         lun = softc->ctl_luns[delay_info->lun_id];
2749                         mtx_lock(&lun->lun_lock);
2750
2751                         delay_info->status = CTL_DELAY_STATUS_OK;
2752
2753                         switch (delay_info->delay_type) {
2754                         case CTL_DELAY_TYPE_CONT:
2755                                 break;
2756                         case CTL_DELAY_TYPE_ONESHOT:
2757                                 break;
2758                         default:
2759                                 delay_info->status =
2760                                         CTL_DELAY_STATUS_INVALID_TYPE;
2761                                 break;
2762                         }
2763
2764                         switch (delay_info->delay_loc) {
2765                         case CTL_DELAY_LOC_DATAMOVE:
2766                                 lun->delay_info.datamove_type =
2767                                         delay_info->delay_type;
2768                                 lun->delay_info.datamove_delay =
2769                                         delay_info->delay_secs;
2770                                 break;
2771                         case CTL_DELAY_LOC_DONE:
2772                                 lun->delay_info.done_type =
2773                                         delay_info->delay_type;
2774                                 lun->delay_info.done_delay =
2775                                         delay_info->delay_secs;
2776                                 break;
2777                         default:
2778                                 delay_info->status =
2779                                         CTL_DELAY_STATUS_INVALID_LOC;
2780                                 break;
2781                         }
2782                         mtx_unlock(&lun->lun_lock);
2783                 }
2784
2785                 mtx_unlock(&softc->ctl_lock);
2786 #else
2787                 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED;
2788 #endif /* CTL_IO_DELAY */
2789                 break;
2790         }
2791         case CTL_REALSYNC_SET: {
2792                 int *syncstate;
2793
2794                 syncstate = (int *)addr;
2795
2796                 mtx_lock(&softc->ctl_lock);
2797                 switch (*syncstate) {
2798                 case 0:
2799                         softc->flags &= ~CTL_FLAG_REAL_SYNC;
2800                         break;
2801                 case 1:
2802                         softc->flags |= CTL_FLAG_REAL_SYNC;
2803                         break;
2804                 default:
2805                         retval = EINVAL;
2806                         break;
2807                 }
2808                 mtx_unlock(&softc->ctl_lock);
2809                 break;
2810         }
2811         case CTL_REALSYNC_GET: {
2812                 int *syncstate;
2813
2814                 syncstate = (int*)addr;
2815
2816                 mtx_lock(&softc->ctl_lock);
2817                 if (softc->flags & CTL_FLAG_REAL_SYNC)
2818                         *syncstate = 1;
2819                 else
2820                         *syncstate = 0;
2821                 mtx_unlock(&softc->ctl_lock);
2822
2823                 break;
2824         }
2825         case CTL_SETSYNC:
2826         case CTL_GETSYNC: {
2827                 struct ctl_sync_info *sync_info;
2828                 struct ctl_lun *lun;
2829
2830                 sync_info = (struct ctl_sync_info *)addr;
2831
2832                 mtx_lock(&softc->ctl_lock);
2833                 lun = softc->ctl_luns[sync_info->lun_id];
2834                 if (lun == NULL) {
2835                         mtx_unlock(&softc->ctl_lock);
2836                         sync_info->status = CTL_GS_SYNC_NO_LUN;
2837                 }
2838                 /*
2839                  * Get or set the sync interval.  We're not bounds checking
2840                  * in the set case, hopefully the user won't do something
2841                  * silly.
2842                  */
2843                 mtx_lock(&lun->lun_lock);
2844                 mtx_unlock(&softc->ctl_lock);
2845                 if (cmd == CTL_GETSYNC)
2846                         sync_info->sync_interval = lun->sync_interval;
2847                 else
2848                         lun->sync_interval = sync_info->sync_interval;
2849                 mtx_unlock(&lun->lun_lock);
2850
2851                 sync_info->status = CTL_GS_SYNC_OK;
2852
2853                 break;
2854         }
2855         case CTL_GETSTATS: {
2856                 struct ctl_stats *stats;
2857                 struct ctl_lun *lun;
2858                 int i;
2859
2860                 stats = (struct ctl_stats *)addr;
2861
2862                 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) >
2863                      stats->alloc_len) {
2864                         stats->status = CTL_SS_NEED_MORE_SPACE;
2865                         stats->num_luns = softc->num_luns;
2866                         break;
2867                 }
2868                 /*
2869                  * XXX KDM no locking here.  If the LUN list changes,
2870                  * things can blow up.
2871                  */
2872                 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL;
2873                      i++, lun = STAILQ_NEXT(lun, links)) {
2874                         retval = copyout(&lun->stats, &stats->lun_stats[i],
2875                                          sizeof(lun->stats));
2876                         if (retval != 0)
2877                                 break;
2878                 }
2879                 stats->num_luns = softc->num_luns;
2880                 stats->fill_len = sizeof(struct ctl_lun_io_stats) *
2881                                  softc->num_luns;
2882                 stats->status = CTL_SS_OK;
2883 #ifdef CTL_TIME_IO
2884                 stats->flags = CTL_STATS_FLAG_TIME_VALID;
2885 #else
2886                 stats->flags = CTL_STATS_FLAG_NONE;
2887 #endif
2888                 getnanouptime(&stats->timestamp);
2889                 break;
2890         }
2891         case CTL_ERROR_INJECT: {
2892                 struct ctl_error_desc *err_desc, *new_err_desc;
2893                 struct ctl_lun *lun;
2894
2895                 err_desc = (struct ctl_error_desc *)addr;
2896
2897                 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL,
2898                                       M_WAITOK | M_ZERO);
2899                 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc));
2900
2901                 mtx_lock(&softc->ctl_lock);
2902                 lun = softc->ctl_luns[err_desc->lun_id];
2903                 if (lun == NULL) {
2904                         mtx_unlock(&softc->ctl_lock);
2905                         free(new_err_desc, M_CTL);
2906                         printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n",
2907                                __func__, (uintmax_t)err_desc->lun_id);
2908                         retval = EINVAL;
2909                         break;
2910                 }
2911                 mtx_lock(&lun->lun_lock);
2912                 mtx_unlock(&softc->ctl_lock);
2913
2914                 /*
2915                  * We could do some checking here to verify the validity
2916                  * of the request, but given the complexity of error
2917                  * injection requests, the checking logic would be fairly
2918                  * complex.
2919                  *
2920                  * For now, if the request is invalid, it just won't get
2921                  * executed and might get deleted.
2922                  */
2923                 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links);
2924
2925                 /*
2926                  * XXX KDM check to make sure the serial number is unique,
2927                  * in case we somehow manage to wrap.  That shouldn't
2928                  * happen for a very long time, but it's the right thing to
2929                  * do.
2930                  */
2931                 new_err_desc->serial = lun->error_serial;
2932                 err_desc->serial = lun->error_serial;
2933                 lun->error_serial++;
2934
2935                 mtx_unlock(&lun->lun_lock);
2936                 break;
2937         }
2938         case CTL_ERROR_INJECT_DELETE: {
2939                 struct ctl_error_desc *delete_desc, *desc, *desc2;
2940                 struct ctl_lun *lun;
2941                 int delete_done;
2942
2943                 delete_desc = (struct ctl_error_desc *)addr;
2944                 delete_done = 0;
2945
2946                 mtx_lock(&softc->ctl_lock);
2947                 lun = softc->ctl_luns[delete_desc->lun_id];
2948                 if (lun == NULL) {
2949                         mtx_unlock(&softc->ctl_lock);
2950                         printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n",
2951                                __func__, (uintmax_t)delete_desc->lun_id);
2952                         retval = EINVAL;
2953                         break;
2954                 }
2955                 mtx_lock(&lun->lun_lock);
2956                 mtx_unlock(&softc->ctl_lock);
2957                 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
2958                         if (desc->serial != delete_desc->serial)
2959                                 continue;
2960
2961                         STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc,
2962                                       links);
2963                         free(desc, M_CTL);
2964                         delete_done = 1;
2965                 }
2966                 mtx_unlock(&lun->lun_lock);
2967                 if (delete_done == 0) {
2968                         printf("%s: CTL_ERROR_INJECT_DELETE: can't find "
2969                                "error serial %ju on LUN %u\n", __func__, 
2970                                delete_desc->serial, delete_desc->lun_id);
2971                         retval = EINVAL;
2972                         break;
2973                 }
2974                 break;
2975         }
2976         case CTL_DUMP_STRUCTS: {
2977                 int i, j, k, idx;
2978                 struct ctl_port *port;
2979                 struct ctl_frontend *fe;
2980
2981                 mtx_lock(&softc->ctl_lock);
2982                 printf("CTL Persistent Reservation information start:\n");
2983                 for (i = 0; i < CTL_MAX_LUNS; i++) {
2984                         struct ctl_lun *lun;
2985
2986                         lun = softc->ctl_luns[i];
2987
2988                         if ((lun == NULL)
2989                          || ((lun->flags & CTL_LUN_DISABLED) != 0))
2990                                 continue;
2991
2992                         for (j = 0; j < (CTL_MAX_PORTS * 2); j++) {
2993                                 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){
2994                                         idx = j * CTL_MAX_INIT_PER_PORT + k;
2995                                         if (lun->per_res[idx].registered == 0)
2996                                                 continue;
2997                                         printf("  LUN %d port %d iid %d key "
2998                                                "%#jx\n", i, j, k,
2999                                                (uintmax_t)scsi_8btou64(
3000                                                lun->per_res[idx].res_key.key));
3001                                 }
3002                         }
3003                 }
3004                 printf("CTL Persistent Reservation information end\n");
3005                 printf("CTL Ports:\n");
3006                 STAILQ_FOREACH(port, &softc->port_list, links) {
3007                         printf("  Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN "
3008                                "%#jx WWPN %#jx\n", port->targ_port, port->port_name,
3009                                port->frontend->name, port->port_type,
3010                                port->physical_port, port->virtual_port,
3011                                (uintmax_t)port->wwnn, (uintmax_t)port->wwpn);
3012                         for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
3013                                 if (port->wwpn_iid[j].in_use == 0 &&
3014                                     port->wwpn_iid[j].wwpn == 0 &&
3015                                     port->wwpn_iid[j].name == NULL)
3016                                         continue;
3017
3018                                 printf("    iid %u use %d WWPN %#jx '%s'\n",
3019                                     j, port->wwpn_iid[j].in_use,
3020                                     (uintmax_t)port->wwpn_iid[j].wwpn,
3021                                     port->wwpn_iid[j].name);
3022                         }
3023                 }
3024                 printf("CTL Port information end\n");
3025                 mtx_unlock(&softc->ctl_lock);
3026                 /*
3027                  * XXX KDM calling this without a lock.  We'd likely want
3028                  * to drop the lock before calling the frontend's dump
3029                  * routine anyway.
3030                  */
3031                 printf("CTL Frontends:\n");
3032                 STAILQ_FOREACH(fe, &softc->fe_list, links) {
3033                         printf("  Frontend '%s'\n", fe->name);
3034                         if (fe->fe_dump != NULL)
3035                                 fe->fe_dump();
3036                 }
3037                 printf("CTL Frontend information end\n");
3038                 break;
3039         }
3040         case CTL_LUN_REQ: {
3041                 struct ctl_lun_req *lun_req;
3042                 struct ctl_backend_driver *backend;
3043
3044                 lun_req = (struct ctl_lun_req *)addr;
3045
3046                 backend = ctl_backend_find(lun_req->backend);
3047                 if (backend == NULL) {
3048                         lun_req->status = CTL_LUN_ERROR;
3049                         snprintf(lun_req->error_str,
3050                                  sizeof(lun_req->error_str),
3051                                  "Backend \"%s\" not found.",
3052                                  lun_req->backend);
3053                         break;
3054                 }
3055                 if (lun_req->num_be_args > 0) {
3056                         lun_req->kern_be_args = ctl_copyin_args(
3057                                 lun_req->num_be_args,
3058                                 lun_req->be_args,
3059                                 lun_req->error_str,
3060                                 sizeof(lun_req->error_str));
3061                         if (lun_req->kern_be_args == NULL) {
3062                                 lun_req->status = CTL_LUN_ERROR;
3063                                 break;
3064                         }
3065                 }
3066
3067                 retval = backend->ioctl(dev, cmd, addr, flag, td);
3068
3069                 if (lun_req->num_be_args > 0) {
3070                         ctl_copyout_args(lun_req->num_be_args,
3071                                       lun_req->kern_be_args);
3072                         ctl_free_args(lun_req->num_be_args,
3073                                       lun_req->kern_be_args);
3074                 }
3075                 break;
3076         }
3077         case CTL_LUN_LIST: {
3078                 struct sbuf *sb;
3079                 struct ctl_lun *lun;
3080                 struct ctl_lun_list *list;
3081                 struct ctl_option *opt;
3082
3083                 list = (struct ctl_lun_list *)addr;
3084
3085                 /*
3086                  * Allocate a fixed length sbuf here, based on the length
3087                  * of the user's buffer.  We could allocate an auto-extending
3088                  * buffer, and then tell the user how much larger our
3089                  * amount of data is than his buffer, but that presents
3090                  * some problems:
3091                  *
3092                  * 1.  The sbuf(9) routines use a blocking malloc, and so
3093                  *     we can't hold a lock while calling them with an
3094                  *     auto-extending buffer.
3095                  *
3096                  * 2.  There is not currently a LUN reference counting
3097                  *     mechanism, outside of outstanding transactions on
3098                  *     the LUN's OOA queue.  So a LUN could go away on us
3099                  *     while we're getting the LUN number, backend-specific
3100                  *     information, etc.  Thus, given the way things
3101                  *     currently work, we need to hold the CTL lock while
3102                  *     grabbing LUN information.
3103                  *
3104                  * So, from the user's standpoint, the best thing to do is
3105                  * allocate what he thinks is a reasonable buffer length,
3106                  * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error,
3107                  * double the buffer length and try again.  (And repeat
3108                  * that until he succeeds.)
3109                  */
3110                 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN);
3111                 if (sb == NULL) {
3112                         list->status = CTL_LUN_LIST_ERROR;
3113                         snprintf(list->error_str, sizeof(list->error_str),
3114                                  "Unable to allocate %d bytes for LUN list",
3115                                  list->alloc_len);
3116                         break;
3117                 }
3118
3119                 sbuf_printf(sb, "<ctllunlist>\n");
3120
3121                 mtx_lock(&softc->ctl_lock);
3122                 STAILQ_FOREACH(lun, &softc->lun_list, links) {
3123                         mtx_lock(&lun->lun_lock);
3124                         retval = sbuf_printf(sb, "<lun id=\"%ju\">\n",
3125                                              (uintmax_t)lun->lun);
3126
3127                         /*
3128                          * Bail out as soon as we see that we've overfilled
3129                          * the buffer.
3130                          */
3131                         if (retval != 0)
3132                                 break;
3133
3134                         retval = sbuf_printf(sb, "\t<backend_type>%s"
3135                                              "</backend_type>\n",
3136                                              (lun->backend == NULL) ?  "none" :
3137                                              lun->backend->name);
3138
3139                         if (retval != 0)
3140                                 break;
3141
3142                         retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n",
3143                                              lun->be_lun->lun_type);
3144
3145                         if (retval != 0)
3146                                 break;
3147
3148                         if (lun->backend == NULL) {
3149                                 retval = sbuf_printf(sb, "</lun>\n");
3150                                 if (retval != 0)
3151                                         break;
3152                                 continue;
3153                         }
3154
3155                         retval = sbuf_printf(sb, "\t<size>%ju</size>\n",
3156                                              (lun->be_lun->maxlba > 0) ?
3157                                              lun->be_lun->maxlba + 1 : 0);
3158
3159                         if (retval != 0)
3160                                 break;
3161
3162                         retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n",
3163                                              lun->be_lun->blocksize);
3164
3165                         if (retval != 0)
3166                                 break;
3167
3168                         retval = sbuf_printf(sb, "\t<serial_number>");
3169
3170                         if (retval != 0)
3171                                 break;
3172
3173                         retval = ctl_sbuf_printf_esc(sb,
3174                                                      lun->be_lun->serial_num);
3175
3176                         if (retval != 0)
3177                                 break;
3178
3179                         retval = sbuf_printf(sb, "</serial_number>\n");
3180                 
3181                         if (retval != 0)
3182                                 break;
3183
3184                         retval = sbuf_printf(sb, "\t<device_id>");
3185
3186                         if (retval != 0)
3187                                 break;
3188
3189                         retval = ctl_sbuf_printf_esc(sb,lun->be_lun->device_id);
3190
3191                         if (retval != 0)
3192                                 break;
3193
3194                         retval = sbuf_printf(sb, "</device_id>\n");
3195
3196                         if (retval != 0)
3197                                 break;
3198
3199                         if (lun->backend->lun_info != NULL) {
3200                                 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb);
3201                                 if (retval != 0)
3202                                         break;
3203                         }
3204                         STAILQ_FOREACH(opt, &lun->be_lun->options, links) {
3205                                 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n",
3206                                     opt->name, opt->value, opt->name);
3207                                 if (retval != 0)
3208                                         break;
3209                         }
3210
3211                         retval = sbuf_printf(sb, "</lun>\n");
3212
3213                         if (retval != 0)
3214                                 break;
3215                         mtx_unlock(&lun->lun_lock);
3216                 }
3217                 if (lun != NULL)
3218                         mtx_unlock(&lun->lun_lock);
3219                 mtx_unlock(&softc->ctl_lock);
3220
3221                 if ((retval != 0)
3222                  || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) {
3223                         retval = 0;
3224                         sbuf_delete(sb);
3225                         list->status = CTL_LUN_LIST_NEED_MORE_SPACE;
3226                         snprintf(list->error_str, sizeof(list->error_str),
3227                                  "Out of space, %d bytes is too small",
3228                                  list->alloc_len);
3229                         break;
3230                 }
3231
3232                 sbuf_finish(sb);
3233
3234                 retval = copyout(sbuf_data(sb), list->lun_xml,
3235                                  sbuf_len(sb) + 1);
3236
3237                 list->fill_len = sbuf_len(sb) + 1;
3238                 list->status = CTL_LUN_LIST_OK;
3239                 sbuf_delete(sb);
3240                 break;
3241         }
3242         case CTL_ISCSI: {
3243                 struct ctl_iscsi *ci;
3244                 struct ctl_frontend *fe;
3245
3246                 ci = (struct ctl_iscsi *)addr;
3247
3248                 fe = ctl_frontend_find("iscsi");
3249                 if (fe == NULL) {
3250                         ci->status = CTL_ISCSI_ERROR;
3251                         snprintf(ci->error_str, sizeof(ci->error_str),
3252                             "Frontend \"iscsi\" not found.");
3253                         break;
3254                 }
3255
3256                 retval = fe->ioctl(dev, cmd, addr, flag, td);
3257                 break;
3258         }
3259         case CTL_PORT_REQ: {
3260                 struct ctl_req *req;
3261                 struct ctl_frontend *fe;
3262
3263                 req = (struct ctl_req *)addr;
3264
3265                 fe = ctl_frontend_find(req->driver);
3266                 if (fe == NULL) {
3267                         req->status = CTL_LUN_ERROR;
3268                         snprintf(req->error_str, sizeof(req->error_str),
3269                             "Frontend \"%s\" not found.", req->driver);
3270                         break;
3271                 }
3272                 if (req->num_args > 0) {
3273                         req->kern_args = ctl_copyin_args(req->num_args,
3274                             req->args, req->error_str, sizeof(req->error_str));
3275                         if (req->kern_args == NULL) {
3276                                 req->status = CTL_LUN_ERROR;
3277                                 break;
3278                         }
3279                 }
3280
3281                 retval = fe->ioctl(dev, cmd, addr, flag, td);
3282
3283                 if (req->num_args > 0) {
3284                         ctl_copyout_args(req->num_args, req->kern_args);
3285                         ctl_free_args(req->num_args, req->kern_args);
3286                 }
3287                 break;
3288         }
3289         case CTL_PORT_LIST: {
3290                 struct sbuf *sb;
3291                 struct ctl_port *port;
3292                 struct ctl_lun_list *list;
3293                 struct ctl_option *opt;
3294
3295                 list = (struct ctl_lun_list *)addr;
3296
3297                 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN);
3298                 if (sb == NULL) {
3299                         list->status = CTL_LUN_LIST_ERROR;
3300                         snprintf(list->error_str, sizeof(list->error_str),
3301                                  "Unable to allocate %d bytes for LUN list",
3302                                  list->alloc_len);
3303                         break;
3304                 }
3305
3306                 sbuf_printf(sb, "<ctlportlist>\n");
3307
3308                 mtx_lock(&softc->ctl_lock);
3309                 STAILQ_FOREACH(port, &softc->port_list, links) {
3310                         retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n",
3311                                              (uintmax_t)port->targ_port);
3312
3313                         /*
3314                          * Bail out as soon as we see that we've overfilled
3315                          * the buffer.
3316                          */
3317                         if (retval != 0)
3318                                 break;
3319
3320                         retval = sbuf_printf(sb, "\t<frontend_type>%s"
3321                             "</frontend_type>\n", port->frontend->name);
3322                         if (retval != 0)
3323                                 break;
3324
3325                         retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n",
3326                                              port->port_type);
3327                         if (retval != 0)
3328                                 break;
3329
3330                         retval = sbuf_printf(sb, "\t<online>%s</online>\n",
3331                             (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO");
3332                         if (retval != 0)
3333                                 break;
3334
3335                         retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n",
3336                             port->port_name);
3337                         if (retval != 0)
3338                                 break;
3339
3340                         retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n",
3341                             port->physical_port);
3342                         if (retval != 0)
3343                                 break;
3344
3345                         retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n",
3346                             port->virtual_port);
3347                         if (retval != 0)
3348                                 break;
3349
3350                         retval = sbuf_printf(sb, "\t<wwnn>%#jx</wwnn>\n",
3351                             (uintmax_t)port->wwnn);
3352                         if (retval != 0)
3353                                 break;
3354
3355                         retval = sbuf_printf(sb, "\t<wwpn>%#jx</wwpn>\n",
3356                             (uintmax_t)port->wwpn);
3357                         if (retval != 0)
3358                                 break;
3359
3360                         if (port->port_info != NULL) {
3361                                 retval = port->port_info(port->onoff_arg, sb);
3362                                 if (retval != 0)
3363                                         break;
3364                         }
3365                         STAILQ_FOREACH(opt, &port->options, links) {
3366                                 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n",
3367                                     opt->name, opt->value, opt->name);
3368                                 if (retval != 0)
3369                                         break;
3370                         }
3371
3372                         retval = sbuf_printf(sb, "</targ_port>\n");
3373                         if (retval != 0)
3374                                 break;
3375                 }
3376                 mtx_unlock(&softc->ctl_lock);
3377
3378                 if ((retval != 0)
3379                  || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) {
3380                         retval = 0;
3381                         sbuf_delete(sb);
3382                         list->status = CTL_LUN_LIST_NEED_MORE_SPACE;
3383                         snprintf(list->error_str, sizeof(list->error_str),
3384                                  "Out of space, %d bytes is too small",
3385                                  list->alloc_len);
3386                         break;
3387                 }
3388
3389                 sbuf_finish(sb);
3390
3391                 retval = copyout(sbuf_data(sb), list->lun_xml,
3392                                  sbuf_len(sb) + 1);
3393
3394                 list->fill_len = sbuf_len(sb) + 1;
3395                 list->status = CTL_LUN_LIST_OK;
3396                 sbuf_delete(sb);
3397                 break;
3398         }
3399         default: {
3400                 /* XXX KDM should we fix this? */
3401 #if 0
3402                 struct ctl_backend_driver *backend;
3403                 unsigned int type;
3404                 int found;
3405
3406                 found = 0;
3407
3408                 /*
3409                  * We encode the backend type as the ioctl type for backend
3410                  * ioctls.  So parse it out here, and then search for a
3411                  * backend of this type.
3412                  */
3413                 type = _IOC_TYPE(cmd);
3414
3415                 STAILQ_FOREACH(backend, &softc->be_list, links) {
3416                         if (backend->type == type) {
3417                                 found = 1;
3418                                 break;
3419                         }
3420                 }
3421                 if (found == 0) {
3422                         printf("ctl: unknown ioctl command %#lx or backend "
3423                                "%d\n", cmd, type);
3424                         retval = EINVAL;
3425                         break;
3426                 }
3427                 retval = backend->ioctl(dev, cmd, addr, flag, td);
3428 #endif
3429                 retval = ENOTTY;
3430                 break;
3431         }
3432         }
3433         return (retval);
3434 }
3435
3436 uint32_t
3437 ctl_get_initindex(struct ctl_nexus *nexus)
3438 {
3439         if (nexus->targ_port < CTL_MAX_PORTS)
3440                 return (nexus->initid.id +
3441                         (nexus->targ_port * CTL_MAX_INIT_PER_PORT));
3442         else
3443                 return (nexus->initid.id +
3444                        ((nexus->targ_port - CTL_MAX_PORTS) *
3445                         CTL_MAX_INIT_PER_PORT));
3446 }
3447
3448 uint32_t
3449 ctl_get_resindex(struct ctl_nexus *nexus)
3450 {
3451         return (nexus->initid.id + (nexus->targ_port * CTL_MAX_INIT_PER_PORT));
3452 }
3453
3454 uint32_t
3455 ctl_port_idx(int port_num)
3456 {
3457         if (port_num < CTL_MAX_PORTS)
3458                 return(port_num);
3459         else
3460                 return(port_num - CTL_MAX_PORTS);
3461 }
3462
3463 static uint32_t
3464 ctl_map_lun(int port_num, uint32_t lun_id)
3465 {
3466         struct ctl_port *port;
3467
3468         port = control_softc->ctl_ports[ctl_port_idx(port_num)];
3469         if (port == NULL)
3470                 return (UINT32_MAX);
3471         if (port->lun_map == NULL)
3472                 return (lun_id);
3473         return (port->lun_map(port->targ_lun_arg, lun_id));
3474 }
3475
3476 static uint32_t
3477 ctl_map_lun_back(int port_num, uint32_t lun_id)
3478 {
3479         struct ctl_port *port;
3480         uint32_t i;
3481
3482         port = control_softc->ctl_ports[ctl_port_idx(port_num)];
3483         if (port->lun_map == NULL)
3484                 return (lun_id);
3485         for (i = 0; i < CTL_MAX_LUNS; i++) {
3486                 if (port->lun_map(port->targ_lun_arg, i) == lun_id)
3487                         return (i);
3488         }
3489         return (UINT32_MAX);
3490 }
3491
3492 /*
3493  * Note:  This only works for bitmask sizes that are at least 32 bits, and
3494  * that are a power of 2.
3495  */
3496 int
3497 ctl_ffz(uint32_t *mask, uint32_t size)
3498 {
3499         uint32_t num_chunks, num_pieces;
3500         int i, j;
3501
3502         num_chunks = (size >> 5);
3503         if (num_chunks == 0)
3504                 num_chunks++;
3505         num_pieces = ctl_min((sizeof(uint32_t) * 8), size);
3506
3507         for (i = 0; i < num_chunks; i++) {
3508                 for (j = 0; j < num_pieces; j++) {
3509                         if ((mask[i] & (1 << j)) == 0)
3510                                 return ((i << 5) + j);
3511                 }
3512         }
3513
3514         return (-1);
3515 }
3516
3517 int
3518 ctl_set_mask(uint32_t *mask, uint32_t bit)
3519 {
3520         uint32_t chunk, piece;
3521
3522         chunk = bit >> 5;
3523         piece = bit % (sizeof(uint32_t) * 8);
3524
3525         if ((mask[chunk] & (1 << piece)) != 0)
3526                 return (-1);
3527         else
3528                 mask[chunk] |= (1 << piece);
3529
3530         return (0);
3531 }
3532
3533 int
3534 ctl_clear_mask(uint32_t *mask, uint32_t bit)
3535 {
3536         uint32_t chunk, piece;
3537
3538         chunk = bit >> 5;
3539         piece = bit % (sizeof(uint32_t) * 8);
3540
3541         if ((mask[chunk] & (1 << piece)) == 0)
3542                 return (-1);
3543         else
3544                 mask[chunk] &= ~(1 << piece);
3545
3546         return (0);
3547 }
3548
3549 int
3550 ctl_is_set(uint32_t *mask, uint32_t bit)
3551 {
3552         uint32_t chunk, piece;
3553
3554         chunk = bit >> 5;
3555         piece = bit % (sizeof(uint32_t) * 8);
3556
3557         if ((mask[chunk] & (1 << piece)) == 0)
3558                 return (0);
3559         else
3560                 return (1);
3561 }
3562
3563 #ifdef unused
3564 /*
3565  * The bus, target and lun are optional, they can be filled in later.
3566  * can_wait is used to determine whether we can wait on the malloc or not.
3567  */
3568 union ctl_io*
3569 ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port, uint32_t targ_target,
3570               uint32_t targ_lun, int can_wait)
3571 {
3572         union ctl_io *io;
3573
3574         if (can_wait)
3575                 io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_WAITOK);
3576         else
3577                 io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_NOWAIT);
3578
3579         if (io != NULL) {
3580                 io->io_hdr.io_type = io_type;
3581                 io->io_hdr.targ_port = targ_port;
3582                 /*
3583                  * XXX KDM this needs to change/go away.  We need to move
3584                  * to a preallocated pool of ctl_scsiio structures.
3585                  */
3586                 io->io_hdr.nexus.targ_target.id = targ_target;
3587                 io->io_hdr.nexus.targ_lun = targ_lun;
3588         }
3589
3590         return (io);
3591 }
3592
3593 void
3594 ctl_kfree_io(union ctl_io *io)
3595 {
3596         free(io, M_CTL);
3597 }
3598 #endif /* unused */
3599
3600 /*
3601  * ctl_softc, pool_type, total_ctl_io are passed in.
3602  * npool is passed out.
3603  */
3604 int
3605 ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
3606                 uint32_t total_ctl_io, struct ctl_io_pool **npool)
3607 {
3608         uint32_t i;
3609         union ctl_io *cur_io, *next_io;
3610         struct ctl_io_pool *pool;
3611         int retval;
3612
3613         retval = 0;
3614
3615         pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL,
3616                                             M_NOWAIT | M_ZERO);
3617         if (pool == NULL) {
3618                 retval = ENOMEM;
3619                 goto bailout;
3620         }
3621
3622         pool->type = pool_type;
3623         pool->ctl_softc = ctl_softc;
3624
3625         mtx_lock(&ctl_softc->pool_lock);
3626         pool->id = ctl_softc->cur_pool_id++;
3627         mtx_unlock(&ctl_softc->pool_lock);
3628
3629         pool->flags = CTL_POOL_FLAG_NONE;
3630         pool->refcount = 1;             /* Reference for validity. */
3631         STAILQ_INIT(&pool->free_queue);
3632
3633         /*
3634          * XXX KDM other options here:
3635          * - allocate a page at a time
3636          * - allocate one big chunk of memory.
3637          * Page allocation might work well, but would take a little more
3638          * tracking.
3639          */
3640         for (i = 0; i < total_ctl_io; i++) {
3641                 cur_io = (union ctl_io *)malloc(sizeof(*cur_io), M_CTLIO,
3642                                                 M_NOWAIT);
3643                 if (cur_io == NULL) {
3644                         retval = ENOMEM;
3645                         break;
3646                 }
3647                 cur_io->io_hdr.pool = pool;
3648                 STAILQ_INSERT_TAIL(&pool->free_queue, &cur_io->io_hdr, links);
3649                 pool->total_ctl_io++;
3650                 pool->free_ctl_io++;
3651         }
3652
3653         if (retval != 0) {
3654                 for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue);
3655                      cur_io != NULL; cur_io = next_io) {
3656                         next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr,
3657                                                               links);
3658                         STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr,
3659                                       ctl_io_hdr, links);
3660                         free(cur_io, M_CTLIO);
3661                 }
3662
3663                 free(pool, M_CTL);
3664                 goto bailout;
3665         }
3666         mtx_lock(&ctl_softc->pool_lock);
3667         ctl_softc->num_pools++;
3668         STAILQ_INSERT_TAIL(&ctl_softc->io_pools, pool, links);
3669         /*
3670          * Increment our usage count if this is an external consumer, so we
3671          * can't get unloaded until the external consumer (most likely a
3672          * FETD) unloads and frees his pool.
3673          *
3674          * XXX KDM will this increment the caller's module use count, or
3675          * mine?
3676          */
3677 #if 0
3678         if ((pool_type != CTL_POOL_EMERGENCY)
3679          && (pool_type != CTL_POOL_INTERNAL)
3680          && (pool_type != CTL_POOL_4OTHERSC))
3681                 MOD_INC_USE_COUNT;
3682 #endif
3683
3684         mtx_unlock(&ctl_softc->pool_lock);
3685
3686         *npool = pool;
3687
3688 bailout:
3689
3690         return (retval);
3691 }
3692
3693 static int
3694 ctl_pool_acquire(struct ctl_io_pool *pool)
3695 {
3696
3697         mtx_assert(&pool->ctl_softc->pool_lock, MA_OWNED);
3698
3699         if (pool->flags & CTL_POOL_FLAG_INVALID)
3700                 return (EINVAL);
3701
3702         pool->refcount++;
3703
3704         return (0);
3705 }
3706
3707 static void
3708 ctl_pool_release(struct ctl_io_pool *pool)
3709 {
3710         struct ctl_softc *ctl_softc = pool->ctl_softc;
3711         union ctl_io *io;
3712
3713         mtx_assert(&ctl_softc->pool_lock, MA_OWNED);
3714
3715         if (--pool->refcount != 0)
3716                 return;
3717
3718         while ((io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue)) != NULL) {
3719                 STAILQ_REMOVE(&pool->free_queue, &io->io_hdr, ctl_io_hdr,
3720                               links);
3721                 free(io, M_CTLIO);
3722         }
3723
3724         STAILQ_REMOVE(&ctl_softc->io_pools, pool, ctl_io_pool, links);
3725         ctl_softc->num_pools--;
3726
3727         /*
3728          * XXX KDM will this decrement the caller's usage count or mine?
3729          */
3730 #if 0
3731         if ((pool->type != CTL_POOL_EMERGENCY)
3732          && (pool->type != CTL_POOL_INTERNAL)
3733          && (pool->type != CTL_POOL_4OTHERSC))
3734                 MOD_DEC_USE_COUNT;
3735 #endif
3736
3737         free(pool, M_CTL);
3738 }
3739
3740 void
3741 ctl_pool_free(struct ctl_io_pool *pool)
3742 {
3743         struct ctl_softc *ctl_softc;
3744
3745         if (pool == NULL)
3746                 return;
3747
3748         ctl_softc = pool->ctl_softc;
3749         mtx_lock(&ctl_softc->pool_lock);
3750         pool->flags |= CTL_POOL_FLAG_INVALID;
3751         ctl_pool_release(pool);
3752         mtx_unlock(&ctl_softc->pool_lock);
3753 }
3754
3755 /*
3756  * This routine does not block (except for spinlocks of course).
3757  * It tries to allocate a ctl_io union from the caller's pool as quickly as
3758  * possible.
3759  */
3760 union ctl_io *
3761 ctl_alloc_io(void *pool_ref)
3762 {
3763         union ctl_io *io;
3764         struct ctl_softc *ctl_softc;
3765         struct ctl_io_pool *pool, *npool;
3766         struct ctl_io_pool *emergency_pool;
3767
3768         pool = (struct ctl_io_pool *)pool_ref;
3769
3770         if (pool == NULL) {
3771                 printf("%s: pool is NULL\n", __func__);
3772                 return (NULL);
3773         }
3774
3775         emergency_pool = NULL;
3776
3777         ctl_softc = pool->ctl_softc;
3778
3779         mtx_lock(&ctl_softc->pool_lock);
3780         /*
3781          * First, try to get the io structure from the user's pool.
3782          */
3783         if (ctl_pool_acquire(pool) == 0) {
3784                 io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue);
3785                 if (io != NULL) {
3786                         STAILQ_REMOVE_HEAD(&pool->free_queue, links);
3787                         pool->total_allocated++;
3788                         pool->free_ctl_io--;
3789                         mtx_unlock(&ctl_softc->pool_lock);
3790                         return (io);
3791                 } else
3792                         ctl_pool_release(pool);
3793         }
3794         /*
3795          * If he doesn't have any io structures left, search for an
3796          * emergency pool and grab one from there.
3797          */
3798         STAILQ_FOREACH(npool, &ctl_softc->io_pools, links) {
3799                 if (npool->type != CTL_POOL_EMERGENCY)
3800                         continue;
3801
3802                 if (ctl_pool_acquire(npool) != 0)
3803                         continue;
3804
3805                 emergency_pool = npool;
3806
3807                 io = (union ctl_io *)STAILQ_FIRST(&npool->free_queue);
3808                 if (io != NULL) {
3809                         STAILQ_REMOVE_HEAD(&npool->free_queue, links);
3810                         npool->total_allocated++;
3811                         npool->free_ctl_io--;
3812                         mtx_unlock(&ctl_softc->pool_lock);
3813                         return (io);
3814                 } else
3815                         ctl_pool_release(npool);
3816         }
3817
3818         /* Drop the spinlock before we malloc */
3819         mtx_unlock(&ctl_softc->pool_lock);
3820
3821         /*
3822          * The emergency pool (if it exists) didn't have one, so try an
3823          * atomic (i.e. nonblocking) malloc and see if we get lucky.
3824          */
3825         io = (union ctl_io *)malloc(sizeof(*io), M_CTLIO, M_NOWAIT);
3826         if (io != NULL) {
3827                 /*
3828                  * If the emergency pool exists but is empty, add this
3829                  * ctl_io to its list when it gets freed.
3830                  */
3831                 if (emergency_pool != NULL) {
3832                         mtx_lock(&ctl_softc->pool_lock);
3833                         if (ctl_pool_acquire(emergency_pool) == 0) {
3834                                 io->io_hdr.pool = emergency_pool;
3835                                 emergency_pool->total_ctl_io++;
3836                                 /*
3837                                  * Need to bump this, otherwise
3838                                  * total_allocated and total_freed won't
3839                                  * match when we no longer have anything
3840                                  * outstanding.
3841                                  */
3842                                 emergency_pool->total_allocated++;
3843                         }
3844                         mtx_unlock(&ctl_softc->pool_lock);
3845                 } else
3846                         io->io_hdr.pool = NULL;
3847         }
3848
3849         return (io);
3850 }
3851
3852 void
3853 ctl_free_io(union ctl_io *io)
3854 {
3855         if (io == NULL)
3856                 return;
3857
3858         /*
3859          * If this ctl_io has a pool, return it to that pool.
3860          */
3861         if (io->io_hdr.pool != NULL) {
3862                 struct ctl_io_pool *pool;
3863
3864                 pool = (struct ctl_io_pool *)io->io_hdr.pool;
3865                 mtx_lock(&pool->ctl_softc->pool_lock);
3866                 io->io_hdr.io_type = 0xff;
3867                 STAILQ_INSERT_TAIL(&pool->free_queue, &io->io_hdr, links);
3868                 pool->total_freed++;
3869                 pool->free_ctl_io++;
3870                 ctl_pool_release(pool);
3871                 mtx_unlock(&pool->ctl_softc->pool_lock);
3872         } else {
3873                 /*
3874                  * Otherwise, just free it.  We probably malloced it and
3875                  * the emergency pool wasn't available.
3876                  */
3877                 free(io, M_CTLIO);
3878         }
3879
3880 }
3881
3882 void
3883 ctl_zero_io(union ctl_io *io)
3884 {
3885         void *pool_ref;
3886
3887         if (io == NULL)
3888                 return;
3889
3890         /*
3891          * May need to preserve linked list pointers at some point too.
3892          */
3893         pool_ref = io->io_hdr.pool;
3894
3895         memset(io, 0, sizeof(*io));
3896
3897         io->io_hdr.pool = pool_ref;
3898 }
3899
3900 /*
3901  * This routine is currently used for internal copies of ctl_ios that need
3902  * to persist for some reason after we've already returned status to the
3903  * FETD.  (Thus the flag set.)
3904  *
3905  * XXX XXX
3906  * Note that this makes a blind copy of all fields in the ctl_io, except
3907  * for the pool reference.  This includes any memory that has been
3908  * allocated!  That memory will no longer be valid after done has been
3909  * called, so this would be VERY DANGEROUS for command that actually does
3910  * any reads or writes.  Right now (11/7/2005), this is only used for immediate
3911  * start and stop commands, which don't transfer any data, so this is not a
3912  * problem.  If it is used for anything else, the caller would also need to
3913  * allocate data buffer space and this routine would need to be modified to
3914  * copy the data buffer(s) as well.
3915  */
3916 void
3917 ctl_copy_io(union ctl_io *src, union ctl_io *dest)
3918 {
3919         void *pool_ref;
3920
3921         if ((src == NULL)
3922          || (dest == NULL))
3923                 return;
3924
3925         /*
3926          * May need to preserve linked list pointers at some point too.
3927          */
3928         pool_ref = dest->io_hdr.pool;
3929
3930         memcpy(dest, src, ctl_min(sizeof(*src), sizeof(*dest)));
3931
3932         dest->io_hdr.pool = pool_ref;
3933         /*
3934          * We need to know that this is an internal copy, and doesn't need
3935          * to get passed back to the FETD that allocated it.
3936          */
3937         dest->io_hdr.flags |= CTL_FLAG_INT_COPY;
3938 }
3939
3940 #ifdef NEEDTOPORT
3941 static void
3942 ctl_update_power_subpage(struct copan_power_subpage *page)
3943 {
3944         int num_luns, num_partitions, config_type;
3945         struct ctl_softc *softc;
3946         cs_BOOL_t aor_present, shelf_50pct_power;
3947         cs_raidset_personality_t rs_type;
3948         int max_active_luns;
3949
3950         softc = control_softc;
3951
3952         /* subtract out the processor LUN */
3953         num_luns = softc->num_luns - 1;
3954         /*
3955          * Default to 7 LUNs active, which was the only number we allowed
3956          * in the past.
3957          */
3958         max_active_luns = 7;
3959
3960         num_partitions = config_GetRsPartitionInfo();
3961         config_type = config_GetConfigType();
3962         shelf_50pct_power = config_GetShelfPowerMode();
3963         aor_present = config_IsAorRsPresent();
3964
3965         rs_type = ddb_GetRsRaidType(1);
3966         if ((rs_type != CS_RAIDSET_PERSONALITY_RAID5)
3967          && (rs_type != CS_RAIDSET_PERSONALITY_RAID1)) {
3968                 EPRINT(0, "Unsupported RS type %d!", rs_type);
3969         }
3970
3971
3972         page->total_luns = num_luns;
3973
3974         switch (config_type) {
3975         case 40:
3976                 /*
3977                  * In a 40 drive configuration, it doesn't matter what DC
3978                  * cards we have, whether we have AOR enabled or not,
3979                  * partitioning or not, or what type of RAIDset we have.
3980                  * In that scenario, we can power up every LUN we present
3981                  * to the user.
3982                  */
3983                 max_active_luns = num_luns;
3984
3985                 break;
3986         case 64:
3987                 if (shelf_50pct_power == CS_FALSE) {
3988                         /* 25% power */
3989                         if (aor_present == CS_TRUE) {
3990                                 if (rs_type ==
3991                                      CS_RAIDSET_PERSONALITY_RAID5) {
3992                                         max_active_luns = 7;
3993                                 } else if (rs_type ==
3994                                          CS_RAIDSET_PERSONALITY_RAID1){
3995                                         max_active_luns = 14;
3996                                 } else {
3997                                         /* XXX KDM now what?? */
3998                                 }
3999                         } else {
4000                                 if (rs_type ==
4001                                      CS_RAIDSET_PERSONALITY_RAID5) {
4002                                         max_active_luns = 8;
4003                                 } else if (rs_type ==
4004                                          CS_RAIDSET_PERSONALITY_RAID1){
4005                                         max_active_luns = 16;
4006                                 } else {
4007                                         /* XXX KDM now what?? */
4008                                 }
4009                         }
4010                 } else {
4011                         /* 50% power */
4012                         /*
4013                          * With 50% power in a 64 drive configuration, we
4014                          * can power all LUNs we present.
4015                          */
4016                         max_active_luns = num_luns;
4017                 }
4018                 break;
4019         case 112:
4020                 if (shelf_50pct_power == CS_FALSE) {
4021                         /* 25% power */
4022                         if (aor_present == CS_TRUE) {
4023                                 if (rs_type ==
4024                                      CS_RAIDSET_PERSONALITY_RAID5) {
4025                                         max_active_luns = 7;
4026                                 } else if (rs_type ==
4027                                          CS_RAIDSET_PERSONALITY_RAID1){
4028                                         max_active_luns = 14;
4029                                 } else {
4030                                         /* XXX KDM now what?? */
4031                                 }
4032                         } else {
4033                                 if (rs_type ==
4034                                      CS_RAIDSET_PERSONALITY_RAID5) {
4035                                         max_active_luns = 8;
4036                                 } else if (rs_type ==
4037                                          CS_RAIDSET_PERSONALITY_RAID1){
4038                                         max_active_luns = 16;
4039                                 } else {
4040                                         /* XXX KDM now what?? */
4041                                 }
4042                         }
4043                 } else {
4044                         /* 50% power */
4045                         if (aor_present == CS_TRUE) {
4046                                 if (rs_type ==
4047                                      CS_RAIDSET_PERSONALITY_RAID5) {
4048                                         max_active_luns = 14;
4049                                 } else if (rs_type ==
4050                                          CS_RAIDSET_PERSONALITY_RAID1){
4051                                         /*
4052                                          * We're assuming here that disk
4053                                          * caching is enabled, and so we're
4054                                          * able to power up half of each
4055                                          * LUN, and cache all writes.
4056                                          */
4057                                         max_active_luns = num_luns;
4058                                 } else {
4059                                         /* XXX KDM now what?? */
4060                                 }
4061                         } else {
4062                                 if (rs_type ==
4063                                      CS_RAIDSET_PERSONALITY_RAID5) {
4064                                         max_active_luns = 15;
4065                                 } else if (rs_type ==
4066                                          CS_RAIDSET_PERSONALITY_RAID1){
4067                                         max_active_luns = 30;
4068                                 } else {
4069                                         /* XXX KDM now what?? */
4070                                 }
4071                         }
4072                 }
4073                 break;
4074         default:
4075                 /*
4076                  * In this case, we have an unknown configuration, so we
4077                  * just use the default from above.
4078                  */
4079                 break;
4080         }
4081
4082         page->max_active_luns = max_active_luns;
4083 #if 0
4084         printk("%s: total_luns = %d, max_active_luns = %d\n", __func__,
4085                page->total_luns, page->max_active_luns);
4086 #endif
4087 }
4088 #endif /* NEEDTOPORT */
4089
4090 /*
4091  * This routine could be used in the future to load default and/or saved
4092  * mode page parameters for a particuar lun.
4093  */
4094 static int
4095 ctl_init_page_index(struct ctl_lun *lun)
4096 {
4097         int i;
4098         struct ctl_page_index *page_index;
4099         struct ctl_softc *softc;
4100         const char *value;
4101
4102         memcpy(&lun->mode_pages.index, page_index_template,
4103                sizeof(page_index_template));
4104
4105         softc = lun->ctl_softc;
4106
4107         for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
4108
4109                 page_index = &lun->mode_pages.index[i];
4110                 /*
4111                  * If this is a disk-only mode page, there's no point in
4112                  * setting it up.  For some pages, we have to have some
4113                  * basic information about the disk in order to calculate the
4114                  * mode page data.
4115                  */
4116                 if ((lun->be_lun->lun_type != T_DIRECT)
4117                  && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY))
4118                         continue;
4119
4120                 switch (page_index->page_code & SMPH_PC_MASK) {
4121                 case SMS_FORMAT_DEVICE_PAGE: {
4122                         struct scsi_format_page *format_page;
4123
4124                         if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
4125                                 panic("subpage is incorrect!");
4126
4127                         /*
4128                          * Sectors per track are set above.  Bytes per
4129                          * sector need to be set here on a per-LUN basis.
4130                          */
4131                         memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT],
4132                                &format_page_default,
4133                                sizeof(format_page_default));
4134                         memcpy(&lun->mode_pages.format_page[
4135                                CTL_PAGE_CHANGEABLE], &format_page_changeable,
4136                                sizeof(format_page_changeable));
4137                         memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT],
4138                                &format_page_default,
4139                                sizeof(format_page_default));
4140                         memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED],
4141                                &format_page_default,
4142                                sizeof(format_page_default));
4143
4144                         format_page = &lun->mode_pages.format_page[
4145                                 CTL_PAGE_CURRENT];
4146                         scsi_ulto2b(lun->be_lun->blocksize,
4147                                     format_page->bytes_per_sector);
4148
4149                         format_page = &lun->mode_pages.format_page[
4150                                 CTL_PAGE_DEFAULT];
4151                         scsi_ulto2b(lun->be_lun->blocksize,
4152                                     format_page->bytes_per_sector);
4153
4154                         format_page = &lun->mode_pages.format_page[
4155                                 CTL_PAGE_SAVED];
4156                         scsi_ulto2b(lun->be_lun->blocksize,
4157                                     format_page->bytes_per_sector);
4158
4159                         page_index->page_data =
4160                                 (uint8_t *)lun->mode_pages.format_page;
4161                         break;
4162                 }
4163                 case SMS_RIGID_DISK_PAGE: {
4164                         struct scsi_rigid_disk_page *rigid_disk_page;
4165                         uint32_t sectors_per_cylinder;
4166                         uint64_t cylinders;
4167 #ifndef __XSCALE__
4168                         int shift;
4169 #endif /* !__XSCALE__ */
4170
4171                         if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
4172                                 panic("invalid subpage value %d",
4173                                       page_index->subpage);
4174
4175                         /*
4176                          * Rotation rate and sectors per track are set
4177                          * above.  We calculate the cylinders here based on
4178                          * capacity.  Due to the number of heads and
4179                          * sectors per track we're using, smaller arrays
4180                          * may turn out to have 0 cylinders.  Linux and
4181                          * FreeBSD don't pay attention to these mode pages
4182                          * to figure out capacity, but Solaris does.  It
4183                          * seems to deal with 0 cylinders just fine, and
4184                          * works out a fake geometry based on the capacity.
4185                          */
4186                         memcpy(&lun->mode_pages.rigid_disk_page[
4187                                CTL_PAGE_CURRENT], &rigid_disk_page_default,
4188                                sizeof(rigid_disk_page_default));
4189                         memcpy(&lun->mode_pages.rigid_disk_page[
4190                                CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable,
4191                                sizeof(rigid_disk_page_changeable));
4192                         memcpy(&lun->mode_pages.rigid_disk_page[
4193                                CTL_PAGE_DEFAULT], &rigid_disk_page_default,
4194                                sizeof(rigid_disk_page_default));
4195                         memcpy(&lun->mode_pages.rigid_disk_page[
4196                                CTL_PAGE_SAVED], &rigid_disk_page_default,
4197                                sizeof(rigid_disk_page_default));
4198
4199                         sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK *
4200                                 CTL_DEFAULT_HEADS;
4201
4202                         /*
4203                          * The divide method here will be more accurate,
4204                          * probably, but results in floating point being
4205                          * used in the kernel on i386 (__udivdi3()).  On the
4206                          * XScale, though, __udivdi3() is implemented in
4207                          * software.
4208                          *
4209                          * The shift method for cylinder calculation is
4210                          * accurate if sectors_per_cylinder is a power of
4211                          * 2.  Otherwise it might be slightly off -- you
4212                          * might have a bit of a truncation problem.
4213                          */
4214 #ifdef  __XSCALE__
4215                         cylinders = (lun->be_lun->maxlba + 1) /
4216                                 sectors_per_cylinder;
4217 #else
4218                         for (shift = 31; shift > 0; shift--) {
4219                                 if (sectors_per_cylinder & (1 << shift))
4220                                         break;
4221                         }
4222                         cylinders = (lun->be_lun->maxlba + 1) >> shift;
4223 #endif
4224
4225                         /*
4226                          * We've basically got 3 bytes, or 24 bits for the
4227                          * cylinder size in the mode page.  If we're over,
4228                          * just round down to 2^24.
4229                          */
4230                         if (cylinders > 0xffffff)
4231                                 cylinders = 0xffffff;
4232
4233                         rigid_disk_page = &lun->mode_pages.rigid_disk_page[
4234                                 CTL_PAGE_CURRENT];
4235                         scsi_ulto3b(cylinders, rigid_disk_page->cylinders);
4236
4237                         rigid_disk_page = &lun->mode_pages.rigid_disk_page[
4238                                 CTL_PAGE_DEFAULT];
4239                         scsi_ulto3b(cylinders, rigid_disk_page->cylinders);
4240
4241                         rigid_disk_page = &lun->mode_pages.rigid_disk_page[
4242                                 CTL_PAGE_SAVED];
4243                         scsi_ulto3b(cylinders, rigid_disk_page->cylinders);
4244
4245                         page_index->page_data =
4246                                 (uint8_t *)lun->mode_pages.rigid_disk_page;
4247                         break;
4248                 }
4249                 case SMS_CACHING_PAGE: {
4250                         struct scsi_caching_page *caching_page;
4251
4252                         if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
4253                                 panic("invalid subpage value %d",
4254                                       page_index->subpage);
4255                         memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT],
4256                                &caching_page_default,
4257                                sizeof(caching_page_default));
4258                         memcpy(&lun->mode_pages.caching_page[
4259                                CTL_PAGE_CHANGEABLE], &caching_page_changeable,
4260                                sizeof(caching_page_changeable));
4261                         memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED],
4262                                &caching_page_default,
4263                                sizeof(caching_page_default));
4264                         caching_page = &lun->mode_pages.caching_page[
4265                             CTL_PAGE_SAVED];
4266                         value = ctl_get_opt(&lun->be_lun->options, "writecache");
4267                         if (value != NULL && strcmp(value, "off") == 0)
4268                                 caching_page->flags1 &= ~SCP_WCE;
4269                         value = ctl_get_opt(&lun->be_lun->options, "readcache");
4270                         if (value != NULL && strcmp(value, "off") == 0)
4271                                 caching_page->flags1 |= SCP_RCD;
4272                         memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT],
4273                                &lun->mode_pages.caching_page[CTL_PAGE_SAVED],
4274                                sizeof(caching_page_default));
4275                         page_index->page_data =
4276                                 (uint8_t *)lun->mode_pages.caching_page;
4277                         break;
4278                 }
4279                 case SMS_CONTROL_MODE_PAGE: {
4280                         struct scsi_control_page *control_page;
4281
4282                         if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
4283                                 panic("invalid subpage value %d",
4284                                       page_index->subpage);
4285
4286                         memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT],
4287                                &control_page_default,
4288                                sizeof(control_page_default));
4289                         memcpy(&lun->mode_pages.control_page[
4290                                CTL_PAGE_CHANGEABLE], &control_page_changeable,
4291                                sizeof(control_page_changeable));
4292                         memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED],
4293                                &control_page_default,
4294                                sizeof(control_page_default));
4295                         control_page = &lun->mode_pages.control_page[
4296                             CTL_PAGE_SAVED];
4297                         value = ctl_get_opt(&lun->be_lun->options, "reordering");
4298                         if (value != NULL && strcmp(value, "unrestricted") == 0) {
4299                                 control_page->queue_flags &= ~SCP_QUEUE_ALG_MASK;
4300                                 control_page->queue_flags |= SCP_QUEUE_ALG_UNRESTRICTED;
4301                         }
4302                         memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT],
4303                                &lun->mode_pages.control_page[CTL_PAGE_SAVED],
4304                                sizeof(control_page_default));
4305                         page_index->page_data =
4306                                 (uint8_t *)lun->mode_pages.control_page;
4307                         break;
4308
4309                 }
4310                 case SMS_VENDOR_SPECIFIC_PAGE:{
4311                         switch (page_index->subpage) {
4312                         case PWR_SUBPAGE_CODE: {
4313                                 struct copan_power_subpage *current_page,
4314                                                            *saved_page;
4315
4316                                 memcpy(&lun->mode_pages.power_subpage[
4317                                        CTL_PAGE_CURRENT],
4318                                        &power_page_default,
4319                                        sizeof(power_page_default));
4320                                 memcpy(&lun->mode_pages.power_subpage[
4321                                        CTL_PAGE_CHANGEABLE],
4322                                        &power_page_changeable,
4323                                        sizeof(power_page_changeable));
4324                                 memcpy(&lun->mode_pages.power_subpage[
4325                                        CTL_PAGE_DEFAULT],
4326                                        &power_page_default,
4327                                        sizeof(power_page_default));
4328                                 memcpy(&lun->mode_pages.power_subpage[
4329                                        CTL_PAGE_SAVED],
4330                                        &power_page_default,
4331                                        sizeof(power_page_default));
4332                                 page_index->page_data =
4333                                     (uint8_t *)lun->mode_pages.power_subpage;
4334
4335                                 current_page = (struct copan_power_subpage *)
4336                                         (page_index->page_data +
4337                                          (page_index->page_len *
4338                                           CTL_PAGE_CURRENT));
4339                                 saved_page = (struct copan_power_subpage *)
4340                                         (page_index->page_data +
4341                                          (page_index->page_len *
4342                                           CTL_PAGE_SAVED));
4343                                 break;
4344                         }
4345                         case APS_SUBPAGE_CODE: {
4346                                 struct copan_aps_subpage *current_page,
4347                                                          *saved_page;
4348
4349                                 // This gets set multiple times but
4350                                 // it should always be the same. It's
4351                                 // only done during init so who cares.
4352                                 index_to_aps_page = i;
4353
4354                                 memcpy(&lun->mode_pages.aps_subpage[
4355                                        CTL_PAGE_CURRENT],
4356                                        &aps_page_default,
4357                                        sizeof(aps_page_default));
4358                                 memcpy(&lun->mode_pages.aps_subpage[
4359                                        CTL_PAGE_CHANGEABLE],
4360                                        &aps_page_changeable,
4361                                        sizeof(aps_page_changeable));
4362                                 memcpy(&lun->mode_pages.aps_subpage[
4363                                        CTL_PAGE_DEFAULT],
4364                                        &aps_page_default,
4365                                        sizeof(aps_page_default));
4366                                 memcpy(&lun->mode_pages.aps_subpage[
4367                                        CTL_PAGE_SAVED],
4368                                        &aps_page_default,
4369                                        sizeof(aps_page_default));
4370                                 page_index->page_data =
4371                                         (uint8_t *)lun->mode_pages.aps_subpage;
4372
4373                                 current_page = (struct copan_aps_subpage *)
4374                                         (page_index->page_data +
4375                                          (page_index->page_len *
4376                                           CTL_PAGE_CURRENT));
4377                                 saved_page = (struct copan_aps_subpage *)
4378                                         (page_index->page_data +
4379                                          (page_index->page_len *
4380                                           CTL_PAGE_SAVED));
4381                                 break;
4382                         }
4383                         case DBGCNF_SUBPAGE_CODE: {
4384                                 struct copan_debugconf_subpage *current_page,
4385                                                                *saved_page;
4386
4387                                 memcpy(&lun->mode_pages.debugconf_subpage[
4388                                        CTL_PAGE_CURRENT],
4389                                        &debugconf_page_default,
4390                                        sizeof(debugconf_page_default));
4391                                 memcpy(&lun->mode_pages.debugconf_subpage[
4392                                        CTL_PAGE_CHANGEABLE],
4393                                        &debugconf_page_changeable,
4394                                        sizeof(debugconf_page_changeable));
4395                                 memcpy(&lun->mode_pages.debugconf_subpage[
4396                                        CTL_PAGE_DEFAULT],
4397                                        &debugconf_page_default,
4398                                        sizeof(debugconf_page_default));
4399                                 memcpy(&lun->mode_pages.debugconf_subpage[
4400                                        CTL_PAGE_SAVED],
4401                                        &debugconf_page_default,
4402                                        sizeof(debugconf_page_default));
4403                                 page_index->page_data =
4404                                         (uint8_t *)lun->mode_pages.debugconf_subpage;
4405
4406                                 current_page = (struct copan_debugconf_subpage *)
4407                                         (page_index->page_data +
4408                                          (page_index->page_len *
4409                                           CTL_PAGE_CURRENT));
4410                                 saved_page = (struct copan_debugconf_subpage *)
4411                                         (page_index->page_data +
4412                                          (page_index->page_len *
4413                                           CTL_PAGE_SAVED));
4414                                 break;
4415                         }
4416                         default:
4417                                 panic("invalid subpage value %d",
4418                                       page_index->subpage);
4419                                 break;
4420                         }
4421                         break;
4422                 }
4423                 default:
4424                         panic("invalid page value %d",
4425                               page_index->page_code & SMPH_PC_MASK);
4426                         break;
4427         }
4428         }
4429
4430         return (CTL_RETVAL_COMPLETE);
4431 }
4432
4433 /*
4434  * LUN allocation.
4435  *
4436  * Requirements:
4437  * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he
4438  *   wants us to allocate the LUN and he can block.
4439  * - ctl_softc is always set
4440  * - be_lun is set if the LUN has a backend (needed for disk LUNs)
4441  *
4442  * Returns 0 for success, non-zero (errno) for failure.
4443  */
4444 static int
4445 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
4446               struct ctl_be_lun *const be_lun, struct ctl_id target_id)
4447 {
4448         struct ctl_lun *nlun, *lun;
4449         struct ctl_port *port;
4450         struct scsi_vpd_id_descriptor *desc;
4451         struct scsi_vpd_id_t10 *t10id;
4452         const char *eui, *naa, *scsiname, *vendor;
4453         int lun_number, i, lun_malloced;
4454         int devidlen, idlen1, idlen2 = 0, len;
4455
4456         if (be_lun == NULL)
4457                 return (EINVAL);
4458
4459         /*
4460          * We currently only support Direct Access or Processor LUN types.
4461          */
4462         switch (be_lun->lun_type) {
4463         case T_DIRECT:
4464                 break;
4465         case T_PROCESSOR:
4466                 break;
4467         case T_SEQUENTIAL:
4468         case T_CHANGER:
4469         default:
4470                 be_lun->lun_config_status(be_lun->be_lun,
4471                                           CTL_LUN_CONFIG_FAILURE);
4472                 break;
4473         }
4474         if (ctl_lun == NULL) {
4475                 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK);
4476                 lun_malloced = 1;
4477         } else {
4478                 lun_malloced = 0;
4479                 lun = ctl_lun;
4480         }
4481
4482         memset(lun, 0, sizeof(*lun));
4483         if (lun_malloced)
4484                 lun->flags = CTL_LUN_MALLOCED;
4485
4486         /* Generate LUN ID. */
4487         devidlen = max(CTL_DEVID_MIN_LEN,
4488             strnlen(be_lun->device_id, CTL_DEVID_LEN));
4489         idlen1 = sizeof(*t10id) + devidlen;
4490         len = sizeof(struct scsi_vpd_id_descriptor) + idlen1;
4491         scsiname = ctl_get_opt(&be_lun->options, "scsiname");
4492         if (scsiname != NULL) {
4493                 idlen2 = roundup2(strlen(scsiname) + 1, 4);
4494                 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2;
4495         }
4496         eui = ctl_get_opt(&be_lun->options, "eui");
4497         if (eui != NULL) {
4498                 len += sizeof(struct scsi_vpd_id_descriptor) + 8;
4499         }
4500         naa = ctl_get_opt(&be_lun->options, "naa");
4501         if (naa != NULL) {
4502                 len += sizeof(struct scsi_vpd_id_descriptor) + 8;
4503         }
4504         lun->lun_devid = malloc(sizeof(struct ctl_devid) + len,
4505             M_CTL, M_WAITOK | M_ZERO);
4506         lun->lun_devid->len = len;
4507         desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data;
4508         desc->proto_codeset = SVPD_ID_CODESET_ASCII;
4509         desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10;
4510         desc->length = idlen1;
4511         t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0];
4512         memset(t10id->vendor, ' ', sizeof(t10id->vendor));
4513         if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) {
4514                 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor));
4515         } else {
4516                 strncpy(t10id->vendor, vendor,
4517                     min(sizeof(t10id->vendor), strlen(vendor)));
4518         }
4519         strncpy((char *)t10id->vendor_spec_id,
4520             (char *)be_lun->device_id, devidlen);
4521         if (scsiname != NULL) {
4522                 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
4523                     desc->length);
4524                 desc->proto_codeset = SVPD_ID_CODESET_UTF8;
4525                 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
4526                     SVPD_ID_TYPE_SCSI_NAME;
4527                 desc->length = idlen2;
4528                 strlcpy(desc->identifier, scsiname, idlen2);
4529         }
4530         if (eui != NULL) {
4531                 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
4532                     desc->length);
4533                 desc->proto_codeset = SVPD_ID_CODESET_BINARY;
4534                 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
4535                     SVPD_ID_TYPE_EUI64;
4536                 desc->length = 8;
4537                 scsi_u64to8b(strtouq(eui, NULL, 0), desc->identifier);
4538         }
4539         if (naa != NULL) {
4540                 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
4541                     desc->length);
4542                 desc->proto_codeset = SVPD_ID_CODESET_BINARY;
4543                 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
4544                     SVPD_ID_TYPE_NAA;
4545                 desc->length = 8;
4546                 scsi_u64to8b(strtouq(naa, NULL, 0), desc->identifier);
4547         }
4548
4549         mtx_lock(&ctl_softc->ctl_lock);
4550         /*
4551          * See if the caller requested a particular LUN number.  If so, see
4552          * if it is available.  Otherwise, allocate the first available LUN.
4553          */
4554         if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) {
4555                 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1))
4556                  || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) {
4557                         mtx_unlock(&ctl_softc->ctl_lock);
4558                         if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) {
4559                                 printf("ctl: requested LUN ID %d is higher "
4560                                        "than CTL_MAX_LUNS - 1 (%d)\n",
4561                                        be_lun->req_lun_id, CTL_MAX_LUNS - 1);
4562                         } else {
4563                                 /*
4564                                  * XXX KDM return an error, or just assign
4565                                  * another LUN ID in this case??
4566                                  */
4567                                 printf("ctl: requested LUN ID %d is already "
4568                                        "in use\n", be_lun->req_lun_id);
4569                         }
4570                         if (lun->flags & CTL_LUN_MALLOCED)
4571                                 free(lun, M_CTL);
4572                         be_lun->lun_config_status(be_lun->be_lun,
4573                                                   CTL_LUN_CONFIG_FAILURE);
4574                         return (ENOSPC);
4575                 }
4576                 lun_number = be_lun->req_lun_id;
4577         } else {
4578                 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS);
4579                 if (lun_number == -1) {
4580                         mtx_unlock(&ctl_softc->ctl_lock);
4581                         printf("ctl: can't allocate LUN on target %ju, out of "
4582                                "LUNs\n", (uintmax_t)target_id.id);
4583                         if (lun->flags & CTL_LUN_MALLOCED)
4584                                 free(lun, M_CTL);
4585                         be_lun->lun_config_status(be_lun->be_lun,
4586                                                   CTL_LUN_CONFIG_FAILURE);
4587                         return (ENOSPC);
4588                 }
4589         }
4590         ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number);
4591
4592         mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF);
4593         lun->target = target_id;
4594         lun->lun = lun_number;
4595         lun->be_lun = be_lun;
4596         /*
4597          * The processor LUN is always enabled.  Disk LUNs come on line
4598          * disabled, and must be enabled by the backend.
4599          */
4600         lun->flags |= CTL_LUN_DISABLED;
4601         lun->backend = be_lun->be;
4602         be_lun->ctl_lun = lun;
4603         be_lun->lun_id = lun_number;
4604         atomic_add_int(&be_lun->be->num_luns, 1);
4605         if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF)
4606                 lun->flags |= CTL_LUN_STOPPED;
4607
4608         if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE)
4609                 lun->flags |= CTL_LUN_INOPERABLE;
4610
4611         if (be_lun->flags & CTL_LUN_FLAG_PRIMARY)
4612                 lun->flags |= CTL_LUN_PRIMARY_SC;
4613
4614         lun->ctl_softc = ctl_softc;
4615         TAILQ_INIT(&lun->ooa_queue);
4616         TAILQ_INIT(&lun->blocked_queue);
4617         STAILQ_INIT(&lun->error_list);
4618         ctl_tpc_lun_init(lun);
4619
4620         /*
4621          * Initialize the mode page index.
4622          */
4623         ctl_init_page_index(lun);
4624
4625         /*
4626          * Set the poweron UA for all initiators on this LUN only.
4627          */
4628         for (i = 0; i < CTL_MAX_INITIATORS; i++)
4629                 lun->pending_ua[i] = CTL_UA_POWERON;
4630
4631         /*
4632          * Now, before we insert this lun on the lun list, set the lun
4633          * inventory changed UA for all other luns.
4634          */
4635         STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) {
4636                 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
4637                         nlun->pending_ua[i] |= CTL_UA_LUN_CHANGE;
4638                 }
4639         }
4640
4641         STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links);
4642
4643         ctl_softc->ctl_luns[lun_number] = lun;
4644
4645         ctl_softc->num_luns++;
4646
4647         /* Setup statistics gathering */
4648         lun->stats.device_type = be_lun->lun_type;
4649         lun->stats.lun_number = lun_number;
4650         if (lun->stats.device_type == T_DIRECT)
4651                 lun->stats.blocksize = be_lun->blocksize;
4652         else
4653                 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE;
4654         for (i = 0;i < CTL_MAX_PORTS;i++)
4655                 lun->stats.ports[i].targ_port = i;
4656
4657         mtx_unlock(&ctl_softc->ctl_lock);
4658
4659         lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK);
4660
4661         /*
4662          * Run through each registered FETD and bring it online if it isn't
4663          * already.  Enable the target ID if it hasn't been enabled, and
4664          * enable this particular LUN.
4665          */
4666         STAILQ_FOREACH(port, &ctl_softc->port_list, links) {
4667                 int retval;
4668
4669                 retval = port->lun_enable(port->targ_lun_arg, target_id,lun_number);
4670                 if (retval != 0) {
4671                         printf("ctl_alloc_lun: FETD %s port %d returned error "
4672                                "%d for lun_enable on target %ju lun %d\n",
4673                                port->port_name, port->targ_port, retval,
4674                                (uintmax_t)target_id.id, lun_number);
4675                 } else
4676                         port->status |= CTL_PORT_STATUS_LUN_ONLINE;
4677         }
4678         return (0);
4679 }
4680
4681 /*
4682  * Delete a LUN.
4683  * Assumptions:
4684  * - LUN has already been marked invalid and any pending I/O has been taken
4685  *   care of.
4686  */
4687 static int
4688 ctl_free_lun(struct ctl_lun *lun)
4689 {
4690         struct ctl_softc *softc;
4691 #if 0
4692         struct ctl_port *port;
4693 #endif
4694         struct ctl_lun *nlun;
4695         int i;
4696
4697         softc = lun->ctl_softc;
4698
4699         mtx_assert(&softc->ctl_lock, MA_OWNED);
4700
4701         STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links);
4702
4703         ctl_clear_mask(softc->ctl_lun_mask, lun->lun);
4704
4705         softc->ctl_luns[lun->lun] = NULL;
4706
4707         if (!TAILQ_EMPTY(&lun->ooa_queue))
4708                 panic("Freeing a LUN %p with outstanding I/O!!\n", lun);
4709
4710         softc->num_luns--;
4711
4712         /*
4713          * XXX KDM this scheme only works for a single target/multiple LUN
4714          * setup.  It needs to be revamped for a multiple target scheme.
4715          *
4716          * XXX KDM this results in port->lun_disable() getting called twice,
4717          * once when ctl_disable_lun() is called, and a second time here.
4718          * We really need to re-think the LUN disable semantics.  There
4719          * should probably be several steps/levels to LUN removal:
4720          *  - disable
4721          *  - invalidate
4722          *  - free
4723          *
4724          * Right now we only have a disable method when communicating to
4725          * the front end ports, at least for individual LUNs.
4726          */
4727 #if 0
4728         STAILQ_FOREACH(port, &softc->port_list, links) {
4729                 int retval;
4730
4731                 retval = port->lun_disable(port->targ_lun_arg, lun->target,
4732                                          lun->lun);
4733                 if (retval != 0) {
4734                         printf("ctl_free_lun: FETD %s port %d returned error "
4735                                "%d for lun_disable on target %ju lun %jd\n",
4736                                port->port_name, port->targ_port, retval,
4737                                (uintmax_t)lun->target.id, (intmax_t)lun->lun);
4738                 }
4739
4740                 if (STAILQ_FIRST(&softc->lun_list) == NULL) {
4741                         port->status &= ~CTL_PORT_STATUS_LUN_ONLINE;
4742
4743                         retval = port->targ_disable(port->targ_lun_arg,lun->target);
4744                         if (retval != 0) {
4745                                 printf("ctl_free_lun: FETD %s port %d "
4746                                        "returned error %d for targ_disable on "
4747                                        "target %ju\n", port->port_name,
4748                                        port->targ_port, retval,
4749                                        (uintmax_t)lun->target.id);
4750                         } else
4751                                 port->status &= ~CTL_PORT_STATUS_TARG_ONLINE;
4752
4753                         if ((port->status & CTL_PORT_STATUS_TARG_ONLINE) != 0)
4754                                 continue;
4755
4756 #if 0
4757                         port->port_offline(port->onoff_arg);
4758                         port->status &= ~CTL_PORT_STATUS_ONLINE;
4759 #endif
4760                 }
4761         }
4762 #endif
4763
4764         /*
4765          * Tell the backend to free resources, if this LUN has a backend.
4766          */
4767         atomic_subtract_int(&lun->be_lun->be->num_luns, 1);
4768         lun->be_lun->lun_shutdown(lun->be_lun->be_lun);
4769
4770         ctl_tpc_lun_shutdown(lun);
4771         mtx_destroy(&lun->lun_lock);
4772         free(lun->lun_devid, M_CTL);
4773         if (lun->flags & CTL_LUN_MALLOCED)
4774                 free(lun, M_CTL);
4775
4776         STAILQ_FOREACH(nlun, &softc->lun_list, links) {
4777                 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
4778                         nlun->pending_ua[i] |= CTL_UA_LUN_CHANGE;
4779                 }
4780         }
4781
4782         return (0);
4783 }
4784
4785 static void
4786 ctl_create_lun(struct ctl_be_lun *be_lun)
4787 {
4788         struct ctl_softc *ctl_softc;
4789
4790         ctl_softc = control_softc;
4791
4792         /*
4793          * ctl_alloc_lun() should handle all potential failure cases.
4794          */
4795         ctl_alloc_lun(ctl_softc, NULL, be_lun, ctl_softc->target);
4796 }
4797
4798 int
4799 ctl_add_lun(struct ctl_be_lun *be_lun)
4800 {
4801         struct ctl_softc *ctl_softc = control_softc;
4802
4803         mtx_lock(&ctl_softc->ctl_lock);
4804         STAILQ_INSERT_TAIL(&ctl_softc->pending_lun_queue, be_lun, links);
4805         mtx_unlock(&ctl_softc->ctl_lock);
4806         wakeup(&ctl_softc->pending_lun_queue);
4807
4808         return (0);
4809 }
4810
4811 int
4812 ctl_enable_lun(struct ctl_be_lun *be_lun)
4813 {
4814         struct ctl_softc *ctl_softc;
4815         struct ctl_port *port, *nport;
4816         struct ctl_lun *lun;
4817         int retval;
4818
4819         ctl_softc = control_softc;
4820
4821         lun = (struct ctl_lun *)be_lun->ctl_lun;
4822
4823         mtx_lock(&ctl_softc->ctl_lock);
4824         mtx_lock(&lun->lun_lock);
4825         if ((lun->flags & CTL_LUN_DISABLED) == 0) {
4826                 /*
4827                  * eh?  Why did we get called if the LUN is already
4828                  * enabled?
4829                  */
4830                 mtx_unlock(&lun->lun_lock);
4831                 mtx_unlock(&ctl_softc->ctl_lock);
4832                 return (0);
4833         }
4834         lun->flags &= ~CTL_LUN_DISABLED;
4835         mtx_unlock(&lun->lun_lock);
4836
4837         for (port = STAILQ_FIRST(&ctl_softc->port_list); port != NULL; port = nport) {
4838                 nport = STAILQ_NEXT(port, links);
4839
4840                 /*
4841                  * Drop the lock while we call the FETD's enable routine.
4842                  * This can lead to a callback into CTL (at least in the
4843                  * case of the internal initiator frontend.
4844                  */
4845                 mtx_unlock(&ctl_softc->ctl_lock);
4846                 retval = port->lun_enable(port->targ_lun_arg, lun->target,lun->lun);
4847                 mtx_lock(&ctl_softc->ctl_lock);
4848                 if (retval != 0) {
4849                         printf("%s: FETD %s port %d returned error "
4850                                "%d for lun_enable on target %ju lun %jd\n",
4851                                __func__, port->port_name, port->targ_port, retval,
4852                                (uintmax_t)lun->target.id, (intmax_t)lun->lun);
4853                 }
4854 #if 0
4855                  else {
4856             /* NOTE:  TODO:  why does lun enable affect port status? */
4857                         port->status |= CTL_PORT_STATUS_LUN_ONLINE;
4858                 }
4859 #endif
4860         }
4861
4862         mtx_unlock(&ctl_softc->ctl_lock);
4863
4864         return (0);
4865 }
4866
4867 int
4868 ctl_disable_lun(struct ctl_be_lun *be_lun)
4869 {
4870         struct ctl_softc *ctl_softc;
4871         struct ctl_port *port;
4872         struct ctl_lun *lun;
4873         int retval;
4874
4875         ctl_softc = control_softc;
4876
4877         lun = (struct ctl_lun *)be_lun->ctl_lun;
4878
4879         mtx_lock(&ctl_softc->ctl_lock);
4880         mtx_lock(&lun->lun_lock);
4881         if (lun->flags & CTL_LUN_DISABLED) {
4882                 mtx_unlock(&lun->lun_lock);
4883                 mtx_unlock(&ctl_softc->ctl_lock);
4884                 return (0);
4885         }
4886         lun->flags |= CTL_LUN_DISABLED;
4887         mtx_unlock(&lun->lun_lock);
4888
4889         STAILQ_FOREACH(port, &ctl_softc->port_list, links) {
4890                 mtx_unlock(&ctl_softc->ctl_lock);
4891                 /*
4892                  * Drop the lock before we call the frontend's disable
4893                  * routine, to avoid lock order reversals.
4894                  *
4895                  * XXX KDM what happens if the frontend list changes while
4896                  * we're traversing it?  It's unlikely, but should be handled.
4897                  */
4898                 retval = port->lun_disable(port->targ_lun_arg, lun->target,
4899                                          lun->lun);
4900                 mtx_lock(&ctl_softc->ctl_lock);
4901                 if (retval != 0) {
4902                         printf("ctl_alloc_lun: FETD %s port %d returned error "
4903                                "%d for lun_disable on target %ju lun %jd\n",
4904                                port->port_name, port->targ_port, retval,
4905                                (uintmax_t)lun->target.id, (intmax_t)lun->lun);
4906                 }
4907         }
4908
4909         mtx_unlock(&ctl_softc->ctl_lock);
4910
4911         return (0);
4912 }
4913
4914 int
4915 ctl_start_lun(struct ctl_be_lun *be_lun)
4916 {
4917         struct ctl_softc *ctl_softc;
4918         struct ctl_lun *lun;
4919
4920         ctl_softc = control_softc;
4921
4922         lun = (struct ctl_lun *)be_lun->ctl_lun;
4923
4924         mtx_lock(&lun->lun_lock);
4925         lun->flags &= ~CTL_LUN_STOPPED;
4926         mtx_unlock(&lun->lun_lock);
4927
4928         return (0);
4929 }
4930
4931 int
4932 ctl_stop_lun(struct ctl_be_lun *be_lun)
4933 {
4934         struct ctl_softc *ctl_softc;
4935         struct ctl_lun *lun;
4936
4937         ctl_softc = control_softc;
4938
4939         lun = (struct ctl_lun *)be_lun->ctl_lun;
4940
4941         mtx_lock(&lun->lun_lock);
4942         lun->flags |= CTL_LUN_STOPPED;
4943         mtx_unlock(&lun->lun_lock);
4944
4945         return (0);
4946 }
4947
4948 int
4949 ctl_lun_offline(struct ctl_be_lun *be_lun)
4950 {
4951         struct ctl_softc *ctl_softc;
4952         struct ctl_lun *lun;
4953
4954         ctl_softc = control_softc;
4955
4956         lun = (struct ctl_lun *)be_lun->ctl_lun;
4957
4958         mtx_lock(&lun->lun_lock);
4959         lun->flags |= CTL_LUN_OFFLINE;
4960         mtx_unlock(&lun->lun_lock);
4961
4962         return (0);
4963 }
4964
4965 int
4966 ctl_lun_online(struct ctl_be_lun *be_lun)
4967 {
4968         struct ctl_softc *ctl_softc;
4969         struct ctl_lun *lun;
4970
4971         ctl_softc = control_softc;
4972
4973         lun = (struct ctl_lun *)be_lun->ctl_lun;
4974
4975         mtx_lock(&lun->lun_lock);
4976         lun->flags &= ~CTL_LUN_OFFLINE;
4977         mtx_unlock(&lun->lun_lock);
4978
4979         return (0);
4980 }
4981
4982 int
4983 ctl_invalidate_lun(struct ctl_be_lun *be_lun)
4984 {
4985         struct ctl_softc *ctl_softc;
4986         struct ctl_lun *lun;
4987
4988         ctl_softc = control_softc;
4989
4990         lun = (struct ctl_lun *)be_lun->ctl_lun;
4991
4992         mtx_lock(&lun->lun_lock);
4993
4994         /*
4995          * The LUN needs to be disabled before it can be marked invalid.
4996          */
4997         if ((lun->flags & CTL_LUN_DISABLED) == 0) {
4998                 mtx_unlock(&lun->lun_lock);
4999                 return (-1);
5000         }
5001         /*
5002          * Mark the LUN invalid.
5003          */
5004         lun->flags |= CTL_LUN_INVALID;
5005
5006         /*
5007          * If there is nothing in the OOA queue, go ahead and free the LUN.
5008          * If we have something in the OOA queue, we'll free it when the
5009          * last I/O completes.
5010          */
5011         if (TAILQ_EMPTY(&lun->ooa_queue)) {
5012                 mtx_unlock(&lun->lun_lock);
5013                 mtx_lock(&ctl_softc->ctl_lock);
5014                 ctl_free_lun(lun);
5015                 mtx_unlock(&ctl_softc->ctl_lock);
5016         } else
5017                 mtx_unlock(&lun->lun_lock);
5018
5019         return (0);
5020 }
5021
5022 int
5023 ctl_lun_inoperable(struct ctl_be_lun *be_lun)
5024 {
5025         struct ctl_softc *ctl_softc;
5026         struct ctl_lun *lun;
5027
5028         ctl_softc = control_softc;
5029         lun = (struct ctl_lun *)be_lun->ctl_lun;
5030
5031         mtx_lock(&lun->lun_lock);
5032         lun->flags |= CTL_LUN_INOPERABLE;
5033         mtx_unlock(&lun->lun_lock);
5034
5035         return (0);
5036 }
5037
5038 int
5039 ctl_lun_operable(struct ctl_be_lun *be_lun)
5040 {
5041         struct ctl_softc *ctl_softc;
5042         struct ctl_lun *lun;
5043
5044         ctl_softc = control_softc;
5045         lun = (struct ctl_lun *)be_lun->ctl_lun;
5046
5047         mtx_lock(&lun->lun_lock);
5048         lun->flags &= ~CTL_LUN_INOPERABLE;
5049         mtx_unlock(&lun->lun_lock);
5050
5051         return (0);
5052 }
5053
5054 int
5055 ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus,
5056                    int lock)
5057 {
5058         struct ctl_softc *softc;
5059         struct ctl_lun *lun;
5060         struct copan_aps_subpage *current_sp;
5061         struct ctl_page_index *page_index;
5062         int i;
5063
5064         softc = control_softc;
5065
5066         mtx_lock(&softc->ctl_lock);
5067
5068         lun = (struct ctl_lun *)be_lun->ctl_lun;
5069         mtx_lock(&lun->lun_lock);
5070
5071         page_index = NULL;
5072         for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
5073                 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) !=
5074                      APS_PAGE_CODE)
5075                         continue;
5076
5077                 if (lun->mode_pages.index[i].subpage != APS_SUBPAGE_CODE)
5078                         continue;
5079                 page_index = &lun->mode_pages.index[i];
5080         }
5081
5082         if (page_index == NULL) {
5083                 mtx_unlock(&lun->lun_lock);
5084                 mtx_unlock(&softc->ctl_lock);
5085                 printf("%s: APS subpage not found for lun %ju!\n", __func__,
5086                        (uintmax_t)lun->lun);
5087                 return (1);
5088         }
5089 #if 0
5090         if ((softc->aps_locked_lun != 0)
5091          && (softc->aps_locked_lun != lun->lun)) {
5092                 printf("%s: attempt to lock LUN %llu when %llu is already "
5093                        "locked\n");
5094                 mtx_unlock(&lun->lun_lock);
5095                 mtx_unlock(&softc->ctl_lock);
5096                 return (1);
5097         }
5098 #endif
5099
5100         current_sp = (struct copan_aps_subpage *)(page_index->page_data +
5101                 (page_index->page_len * CTL_PAGE_CURRENT));
5102
5103         if (lock != 0) {
5104                 current_sp->lock_active = APS_LOCK_ACTIVE;
5105                 softc->aps_locked_lun = lun->lun;
5106         } else {
5107                 current_sp->lock_active = 0;
5108                 softc->aps_locked_lun = 0;
5109         }
5110
5111
5112         /*
5113          * If we're in HA mode, try to send the lock message to the other
5114          * side.
5115          */
5116         if (ctl_is_single == 0) {
5117                 int isc_retval;
5118                 union ctl_ha_msg lock_msg;
5119
5120                 lock_msg.hdr.nexus = *nexus;
5121                 lock_msg.hdr.msg_type = CTL_MSG_APS_LOCK;
5122                 if (lock != 0)
5123                         lock_msg.aps.lock_flag = 1;
5124                 else
5125                         lock_msg.aps.lock_flag = 0;
5126                 isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &lock_msg,
5127                                          sizeof(lock_msg), 0);
5128                 if (isc_retval > CTL_HA_STATUS_SUCCESS) {
5129                         printf("%s: APS (lock=%d) error returned from "
5130                                "ctl_ha_msg_send: %d\n", __func__, lock, isc_retval);
5131                         mtx_unlock(&lun->lun_lock);
5132                         mtx_unlock(&softc->ctl_lock);
5133                         return (1);
5134                 }
5135         }
5136
5137         mtx_unlock(&lun->lun_lock);
5138         mtx_unlock(&softc->ctl_lock);
5139
5140         return (0);
5141 }
5142
5143 void
5144 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun)
5145 {
5146         struct ctl_lun *lun;
5147         struct ctl_softc *softc;
5148         int i;
5149
5150         softc = control_softc;
5151
5152         lun = (struct ctl_lun *)be_lun->ctl_lun;
5153
5154         mtx_lock(&lun->lun_lock);
5155
5156         for (i = 0; i < CTL_MAX_INITIATORS; i++) 
5157                 lun->pending_ua[i] |= CTL_UA_CAPACITY_CHANGED;
5158
5159         mtx_unlock(&lun->lun_lock);
5160 }
5161
5162 /*
5163  * Backend "memory move is complete" callback for requests that never
5164  * make it down to say RAIDCore's configuration code.
5165  */
5166 int
5167 ctl_config_move_done(union ctl_io *io)
5168 {
5169         int retval;
5170
5171         retval = CTL_RETVAL_COMPLETE;
5172
5173
5174         CTL_DEBUG_PRINT(("ctl_config_move_done\n"));
5175         /*
5176          * XXX KDM this shouldn't happen, but what if it does?
5177          */
5178         if (io->io_hdr.io_type != CTL_IO_SCSI)
5179                 panic("I/O type isn't CTL_IO_SCSI!");
5180
5181         if ((io->io_hdr.port_status == 0)
5182          && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
5183          && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE))
5184                 io->io_hdr.status = CTL_SUCCESS;
5185         else if ((io->io_hdr.port_status != 0)
5186               && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
5187               && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)){
5188                 /*
5189                  * For hardware error sense keys, the sense key
5190                  * specific value is defined to be a retry count,
5191                  * but we use it to pass back an internal FETD
5192                  * error code.  XXX KDM  Hopefully the FETD is only
5193                  * using 16 bits for an error code, since that's
5194                  * all the space we have in the sks field.
5195                  */
5196                 ctl_set_internal_failure(&io->scsiio,
5197                                          /*sks_valid*/ 1,
5198                                          /*retry_count*/
5199                                          io->io_hdr.port_status);
5200                 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
5201                         free(io->scsiio.kern_data_ptr, M_CTL);
5202                 ctl_done(io);
5203                 goto bailout;
5204         }
5205
5206         if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN)
5207          || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
5208          || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) {
5209                 /*
5210                  * XXX KDM just assuming a single pointer here, and not a
5211                  * S/G list.  If we start using S/G lists for config data,
5212                  * we'll need to know how to clean them up here as well.
5213                  */
5214                 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
5215                         free(io->scsiio.kern_data_ptr, M_CTL);
5216                 /* Hopefully the user has already set the status... */
5217                 ctl_done(io);
5218         } else {
5219                 /*
5220                  * XXX KDM now we need to continue data movement.  Some
5221                  * options:
5222                  * - call ctl_scsiio() again?  We don't do this for data
5223                  *   writes, because for those at least we know ahead of
5224                  *   time where the write will go and how long it is.  For
5225                  *   config writes, though, that information is largely
5226                  *   contained within the write itself, thus we need to
5227                  *   parse out the data again.
5228                  *
5229                  * - Call some other function once the data is in?
5230                  */
5231
5232                 /*
5233                  * XXX KDM call ctl_scsiio() again for now, and check flag
5234                  * bits to see whether we're allocated or not.
5235                  */
5236                 retval = ctl_scsiio(&io->scsiio);
5237         }
5238 bailout:
5239         return (retval);
5240 }
5241
5242 /*
5243  * This gets called by a backend driver when it is done with a
5244  * data_submit method.
5245  */
5246 void
5247 ctl_data_submit_done(union ctl_io *io)
5248 {
5249         /*
5250          * If the IO_CONT flag is set, we need to call the supplied
5251          * function to continue processing the I/O, instead of completing
5252          * the I/O just yet.
5253          *
5254          * If there is an error, though, we don't want to keep processing.
5255          * Instead, just send status back to the initiator.
5256          */
5257         if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) &&
5258             (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 &&
5259             ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
5260              (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
5261                 io->scsiio.io_cont(io);
5262                 return;
5263         }
5264         ctl_done(io);
5265 }
5266
5267 /*
5268  * This gets called by a backend driver when it is done with a
5269  * configuration write.
5270  */
5271 void
5272 ctl_config_write_done(union ctl_io *io)
5273 {
5274         uint8_t *buf;
5275
5276         /*
5277          * If the IO_CONT flag is set, we need to call the supplied
5278          * function to continue processing the I/O, instead of completing
5279          * the I/O just yet.
5280          *
5281          * If there is an error, though, we don't want to keep processing.
5282          * Instead, just send status back to the initiator.
5283          */
5284         if ((io->io_hdr.flags & CTL_FLAG_IO_CONT)
5285          && (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)
5286           || ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS))) {
5287                 io->scsiio.io_cont(io);
5288                 return;
5289         }
5290         /*
5291          * Since a configuration write can be done for commands that actually
5292          * have data allocated, like write buffer, and commands that have
5293          * no data, like start/stop unit, we need to check here.
5294          */
5295         if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
5296                 buf = io->scsiio.kern_data_ptr;
5297         else
5298                 buf = NULL;
5299         ctl_done(io);
5300         if (buf)
5301                 free(buf, M_CTL);
5302 }
5303
5304 /*
5305  * SCSI release command.
5306  */
5307 int
5308 ctl_scsi_release(struct ctl_scsiio *ctsio)
5309 {
5310         int length, longid, thirdparty_id, resv_id;
5311         struct ctl_softc *ctl_softc;
5312         struct ctl_lun *lun;
5313
5314         length = 0;
5315         resv_id = 0;
5316
5317         CTL_DEBUG_PRINT(("ctl_scsi_release\n"));
5318
5319         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5320         ctl_softc = control_softc;
5321
5322         switch (ctsio->cdb[0]) {
5323         case RELEASE_10: {
5324                 struct scsi_release_10 *cdb;
5325
5326                 cdb = (struct scsi_release_10 *)ctsio->cdb;
5327
5328                 if (cdb->byte2 & SR10_LONGID)
5329                         longid = 1;
5330                 else
5331                         thirdparty_id = cdb->thirdparty_id;
5332
5333                 resv_id = cdb->resv_id;
5334                 length = scsi_2btoul(cdb->length);
5335                 break;
5336         }
5337         }
5338
5339
5340         /*
5341          * XXX KDM right now, we only support LUN reservation.  We don't
5342          * support 3rd party reservations, or extent reservations, which
5343          * might actually need the parameter list.  If we've gotten this
5344          * far, we've got a LUN reservation.  Anything else got kicked out
5345          * above.  So, according to SPC, ignore the length.
5346          */
5347         length = 0;
5348
5349         if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
5350          && (length > 0)) {
5351                 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
5352                 ctsio->kern_data_len = length;
5353                 ctsio->kern_total_len = length;
5354                 ctsio->kern_data_resid = 0;
5355                 ctsio->kern_rel_offset = 0;
5356                 ctsio->kern_sg_entries = 0;
5357                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5358                 ctsio->be_move_done = ctl_config_move_done;
5359                 ctl_datamove((union ctl_io *)ctsio);
5360
5361                 return (CTL_RETVAL_COMPLETE);
5362         }
5363
5364         if (length > 0)
5365                 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr);
5366
5367         mtx_lock(&lun->lun_lock);
5368
5369         /*
5370          * According to SPC, it is not an error for an intiator to attempt
5371          * to release a reservation on a LUN that isn't reserved, or that
5372          * is reserved by another initiator.  The reservation can only be
5373          * released, though, by the initiator who made it or by one of
5374          * several reset type events.
5375          */
5376         if (lun->flags & CTL_LUN_RESERVED) {
5377                 if ((ctsio->io_hdr.nexus.initid.id == lun->rsv_nexus.initid.id)
5378                  && (ctsio->io_hdr.nexus.targ_port == lun->rsv_nexus.targ_port)
5379                  && (ctsio->io_hdr.nexus.targ_target.id ==
5380                      lun->rsv_nexus.targ_target.id)) {
5381                         lun->flags &= ~CTL_LUN_RESERVED;
5382                 }
5383         }
5384
5385         mtx_unlock(&lun->lun_lock);
5386
5387         ctsio->scsi_status = SCSI_STATUS_OK;
5388         ctsio->io_hdr.status = CTL_SUCCESS;
5389
5390         if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
5391                 free(ctsio->kern_data_ptr, M_CTL);
5392                 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
5393         }
5394
5395         ctl_done((union ctl_io *)ctsio);
5396         return (CTL_RETVAL_COMPLETE);
5397 }
5398
5399 int
5400 ctl_scsi_reserve(struct ctl_scsiio *ctsio)
5401 {
5402         int extent, thirdparty, longid;
5403         int resv_id, length;
5404         uint64_t thirdparty_id;
5405         struct ctl_softc *ctl_softc;
5406         struct ctl_lun *lun;
5407
5408         extent = 0;
5409         thirdparty = 0;
5410         longid = 0;
5411         resv_id = 0;
5412         length = 0;
5413         thirdparty_id = 0;
5414
5415         CTL_DEBUG_PRINT(("ctl_reserve\n"));
5416
5417         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5418         ctl_softc = control_softc;
5419
5420         switch (ctsio->cdb[0]) {
5421         case RESERVE_10: {
5422                 struct scsi_reserve_10 *cdb;
5423
5424                 cdb = (struct scsi_reserve_10 *)ctsio->cdb;
5425
5426                 if (cdb->byte2 & SR10_LONGID)
5427                         longid = 1;
5428                 else
5429                         thirdparty_id = cdb->thirdparty_id;
5430
5431                 resv_id = cdb->resv_id;
5432                 length = scsi_2btoul(cdb->length);
5433                 break;
5434         }
5435         }
5436
5437         /*
5438          * XXX KDM right now, we only support LUN reservation.  We don't
5439          * support 3rd party reservations, or extent reservations, which
5440          * might actually need the parameter list.  If we've gotten this
5441          * far, we've got a LUN reservation.  Anything else got kicked out
5442          * above.  So, according to SPC, ignore the length.
5443          */
5444         length = 0;
5445
5446         if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
5447          && (length > 0)) {
5448                 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
5449                 ctsio->kern_data_len = length;
5450                 ctsio->kern_total_len = length;
5451                 ctsio->kern_data_resid = 0;
5452                 ctsio->kern_rel_offset = 0;
5453                 ctsio->kern_sg_entries = 0;
5454                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5455                 ctsio->be_move_done = ctl_config_move_done;
5456                 ctl_datamove((union ctl_io *)ctsio);
5457
5458                 return (CTL_RETVAL_COMPLETE);
5459         }
5460
5461         if (length > 0)
5462                 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr);
5463
5464         mtx_lock(&lun->lun_lock);
5465         if (lun->flags & CTL_LUN_RESERVED) {
5466                 if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id)
5467                  || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port)
5468                  || (ctsio->io_hdr.nexus.targ_target.id !=
5469                      lun->rsv_nexus.targ_target.id)) {
5470                         ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
5471                         ctsio->io_hdr.status = CTL_SCSI_ERROR;
5472                         goto bailout;
5473                 }
5474         }
5475
5476         lun->flags |= CTL_LUN_RESERVED;
5477         lun->rsv_nexus = ctsio->io_hdr.nexus;
5478
5479         ctsio->scsi_status = SCSI_STATUS_OK;
5480         ctsio->io_hdr.status = CTL_SUCCESS;
5481
5482 bailout:
5483         mtx_unlock(&lun->lun_lock);
5484
5485         if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
5486                 free(ctsio->kern_data_ptr, M_CTL);
5487                 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
5488         }
5489
5490         ctl_done((union ctl_io *)ctsio);
5491         return (CTL_RETVAL_COMPLETE);
5492 }
5493
5494 int
5495 ctl_start_stop(struct ctl_scsiio *ctsio)
5496 {
5497         struct scsi_start_stop_unit *cdb;
5498         struct ctl_lun *lun;
5499         struct ctl_softc *ctl_softc;
5500         int retval;
5501
5502         CTL_DEBUG_PRINT(("ctl_start_stop\n"));
5503
5504         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5505         ctl_softc = control_softc;
5506         retval = 0;
5507
5508         cdb = (struct scsi_start_stop_unit *)ctsio->cdb;
5509
5510         /*
5511          * XXX KDM
5512          * We don't support the immediate bit on a stop unit.  In order to
5513          * do that, we would need to code up a way to know that a stop is
5514          * pending, and hold off any new commands until it completes, one
5515          * way or another.  Then we could accept or reject those commands
5516          * depending on its status.  We would almost need to do the reverse
5517          * of what we do below for an immediate start -- return the copy of
5518          * the ctl_io to the FETD with status to send to the host (and to
5519          * free the copy!) and then free the original I/O once the stop
5520          * actually completes.  That way, the OOA queue mechanism can work
5521          * to block commands that shouldn't proceed.  Another alternative
5522          * would be to put the copy in the queue in place of the original,
5523          * and return the original back to the caller.  That could be
5524          * slightly safer..
5525          */
5526         if ((cdb->byte2 & SSS_IMMED)
5527          && ((cdb->how & SSS_START) == 0)) {
5528                 ctl_set_invalid_field(ctsio,
5529                                       /*sks_valid*/ 1,
5530                                       /*command*/ 1,
5531                                       /*field*/ 1,
5532                                       /*bit_valid*/ 1,
5533                                       /*bit*/ 0);
5534                 ctl_done((union ctl_io *)ctsio);
5535                 return (CTL_RETVAL_COMPLETE);
5536         }
5537
5538         if ((lun->flags & CTL_LUN_PR_RESERVED)
5539          && ((cdb->how & SSS_START)==0)) {
5540                 uint32_t residx;
5541
5542                 residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
5543                 if (!lun->per_res[residx].registered
5544                  || (lun->pr_res_idx!=residx && lun->res_type < 4)) {
5545
5546                         ctl_set_reservation_conflict(ctsio);
5547                         ctl_done((union ctl_io *)ctsio);
5548                         return (CTL_RETVAL_COMPLETE);
5549                 }
5550         }
5551
5552         /*
5553          * If there is no backend on this device, we can't start or stop
5554          * it.  In theory we shouldn't get any start/stop commands in the
5555          * first place at this level if the LUN doesn't have a backend.
5556          * That should get stopped by the command decode code.
5557          */
5558         if (lun->backend == NULL) {
5559                 ctl_set_invalid_opcode(ctsio);
5560                 ctl_done((union ctl_io *)ctsio);
5561                 return (CTL_RETVAL_COMPLETE);
5562         }
5563
5564         /*
5565          * XXX KDM Copan-specific offline behavior.
5566          * Figure out a reasonable way to port this?
5567          */
5568 #ifdef NEEDTOPORT
5569         mtx_lock(&lun->lun_lock);
5570
5571         if (((cdb->byte2 & SSS_ONOFFLINE) == 0)
5572          && (lun->flags & CTL_LUN_OFFLINE)) {
5573                 /*
5574                  * If the LUN is offline, and the on/offline bit isn't set,
5575                  * reject the start or stop.  Otherwise, let it through.
5576                  */
5577                 mtx_unlock(&lun->lun_lock);
5578                 ctl_set_lun_not_ready(ctsio);
5579                 ctl_done((union ctl_io *)ctsio);
5580         } else {
5581                 mtx_unlock(&lun->lun_lock);
5582 #endif /* NEEDTOPORT */
5583                 /*
5584                  * This could be a start or a stop when we're online,
5585                  * or a stop/offline or start/online.  A start or stop when
5586                  * we're offline is covered in the case above.
5587                  */
5588                 /*
5589                  * In the non-immediate case, we send the request to
5590                  * the backend and return status to the user when
5591                  * it is done.
5592                  *
5593                  * In the immediate case, we allocate a new ctl_io
5594                  * to hold a copy of the request, and send that to
5595                  * the backend.  We then set good status on the
5596                  * user's request and return it immediately.
5597                  */
5598                 if (cdb->byte2 & SSS_IMMED) {
5599                         union ctl_io *new_io;
5600
5601                         new_io = ctl_alloc_io(ctsio->io_hdr.pool);
5602                         if (new_io == NULL) {
5603                                 ctl_set_busy(ctsio);
5604                                 ctl_done((union ctl_io *)ctsio);
5605                         } else {
5606                                 ctl_copy_io((union ctl_io *)ctsio,
5607                                             new_io);
5608                                 retval = lun->backend->config_write(new_io);
5609                                 ctl_set_success(ctsio);
5610                                 ctl_done((union ctl_io *)ctsio);
5611                         }
5612                 } else {
5613                         retval = lun->backend->config_write(
5614                                 (union ctl_io *)ctsio);
5615                 }
5616 #ifdef NEEDTOPORT
5617         }
5618 #endif
5619         return (retval);
5620 }
5621
5622 /*
5623  * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but
5624  * we don't really do anything with the LBA and length fields if the user
5625  * passes them in.  Instead we'll just flush out the cache for the entire
5626  * LUN.
5627  */
5628 int
5629 ctl_sync_cache(struct ctl_scsiio *ctsio)
5630 {
5631         struct ctl_lun *lun;
5632         struct ctl_softc *ctl_softc;
5633         uint64_t starting_lba;
5634         uint32_t block_count;
5635         int retval;
5636
5637         CTL_DEBUG_PRINT(("ctl_sync_cache\n"));
5638
5639         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5640         ctl_softc = control_softc;
5641         retval = 0;
5642
5643         switch (ctsio->cdb[0]) {
5644         case SYNCHRONIZE_CACHE: {
5645                 struct scsi_sync_cache *cdb;
5646                 cdb = (struct scsi_sync_cache *)ctsio->cdb;
5647
5648                 starting_lba = scsi_4btoul(cdb->begin_lba);
5649                 block_count = scsi_2btoul(cdb->lb_count);
5650                 break;
5651         }
5652         case SYNCHRONIZE_CACHE_16: {
5653                 struct scsi_sync_cache_16 *cdb;
5654                 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb;
5655
5656                 starting_lba = scsi_8btou64(cdb->begin_lba);
5657                 block_count = scsi_4btoul(cdb->lb_count);
5658                 break;
5659         }
5660         default:
5661                 ctl_set_invalid_opcode(ctsio);
5662                 ctl_done((union ctl_io *)ctsio);
5663                 goto bailout;
5664                 break; /* NOTREACHED */
5665         }
5666
5667         /*
5668          * We check the LBA and length, but don't do anything with them.
5669          * A SYNCHRONIZE CACHE will cause the entire cache for this lun to
5670          * get flushed.  This check will just help satisfy anyone who wants
5671          * to see an error for an out of range LBA.
5672          */
5673         if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) {
5674                 ctl_set_lba_out_of_range(ctsio);
5675                 ctl_done((union ctl_io *)ctsio);
5676                 goto bailout;
5677         }
5678
5679         /*
5680          * If this LUN has no backend, we can't flush the cache anyway.
5681          */
5682         if (lun->backend == NULL) {
5683                 ctl_set_invalid_opcode(ctsio);
5684                 ctl_done((union ctl_io *)ctsio);
5685                 goto bailout;
5686         }
5687
5688         /*
5689          * Check to see whether we're configured to send the SYNCHRONIZE
5690          * CACHE command directly to the back end.
5691          */
5692         mtx_lock(&lun->lun_lock);
5693         if ((ctl_softc->flags & CTL_FLAG_REAL_SYNC)
5694          && (++(lun->sync_count) >= lun->sync_interval)) {
5695                 lun->sync_count = 0;
5696                 mtx_unlock(&lun->lun_lock);
5697                 retval = lun->backend->config_write((union ctl_io *)ctsio);
5698         } else {
5699                 mtx_unlock(&lun->lun_lock);
5700                 ctl_set_success(ctsio);
5701                 ctl_done((union ctl_io *)ctsio);
5702         }
5703
5704 bailout:
5705
5706         return (retval);
5707 }
5708
5709 int
5710 ctl_format(struct ctl_scsiio *ctsio)
5711 {
5712         struct scsi_format *cdb;
5713         struct ctl_lun *lun;
5714         struct ctl_softc *ctl_softc;
5715         int length, defect_list_len;
5716
5717         CTL_DEBUG_PRINT(("ctl_format\n"));
5718
5719         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5720         ctl_softc = control_softc;
5721
5722         cdb = (struct scsi_format *)ctsio->cdb;
5723
5724         length = 0;
5725         if (cdb->byte2 & SF_FMTDATA) {
5726                 if (cdb->byte2 & SF_LONGLIST)
5727                         length = sizeof(struct scsi_format_header_long);
5728                 else
5729                         length = sizeof(struct scsi_format_header_short);
5730         }
5731
5732         if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
5733          && (length > 0)) {
5734                 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
5735                 ctsio->kern_data_len = length;
5736                 ctsio->kern_total_len = length;
5737                 ctsio->kern_data_resid = 0;
5738                 ctsio->kern_rel_offset = 0;
5739                 ctsio->kern_sg_entries = 0;
5740                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5741                 ctsio->be_move_done = ctl_config_move_done;
5742                 ctl_datamove((union ctl_io *)ctsio);
5743
5744                 return (CTL_RETVAL_COMPLETE);
5745         }
5746
5747         defect_list_len = 0;
5748
5749         if (cdb->byte2 & SF_FMTDATA) {
5750                 if (cdb->byte2 & SF_LONGLIST) {
5751                         struct scsi_format_header_long *header;
5752
5753                         header = (struct scsi_format_header_long *)
5754                                 ctsio->kern_data_ptr;
5755
5756                         defect_list_len = scsi_4btoul(header->defect_list_len);
5757                         if (defect_list_len != 0) {
5758                                 ctl_set_invalid_field(ctsio,
5759                                                       /*sks_valid*/ 1,
5760                                                       /*command*/ 0,
5761                                                       /*field*/ 2,
5762                                                       /*bit_valid*/ 0,
5763                                                       /*bit*/ 0);
5764                                 goto bailout;
5765                         }
5766                 } else {
5767                         struct scsi_format_header_short *header;
5768
5769                         header = (struct scsi_format_header_short *)
5770                                 ctsio->kern_data_ptr;
5771
5772                         defect_list_len = scsi_2btoul(header->defect_list_len);
5773                         if (defect_list_len != 0) {
5774                                 ctl_set_invalid_field(ctsio,
5775                                                       /*sks_valid*/ 1,
5776                                                       /*command*/ 0,
5777                                                       /*field*/ 2,
5778                                                       /*bit_valid*/ 0,
5779                                                       /*bit*/ 0);
5780                                 goto bailout;
5781                         }
5782                 }
5783         }
5784
5785         /*
5786          * The format command will clear out the "Medium format corrupted"
5787          * status if set by the configuration code.  That status is really
5788          * just a way to notify the host that we have lost the media, and
5789          * get them to issue a command that will basically make them think
5790          * they're blowing away the media.
5791          */
5792         mtx_lock(&lun->lun_lock);
5793         lun->flags &= ~CTL_LUN_INOPERABLE;
5794         mtx_unlock(&lun->lun_lock);
5795
5796         ctsio->scsi_status = SCSI_STATUS_OK;
5797         ctsio->io_hdr.status = CTL_SUCCESS;
5798 bailout:
5799
5800         if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
5801                 free(ctsio->kern_data_ptr, M_CTL);
5802                 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
5803         }
5804
5805         ctl_done((union ctl_io *)ctsio);
5806         return (CTL_RETVAL_COMPLETE);
5807 }
5808
5809 int
5810 ctl_read_buffer(struct ctl_scsiio *ctsio)
5811 {
5812         struct scsi_read_buffer *cdb;
5813         struct ctl_lun *lun;
5814         int buffer_offset, len;
5815         static uint8_t descr[4];
5816         static uint8_t echo_descr[4] = { 0 };
5817
5818         CTL_DEBUG_PRINT(("ctl_read_buffer\n"));
5819
5820         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5821         cdb = (struct scsi_read_buffer *)ctsio->cdb;
5822
5823         if (lun->flags & CTL_LUN_PR_RESERVED) {
5824                 uint32_t residx;
5825
5826                 /*
5827                  * XXX KDM need a lock here.
5828                  */
5829                 residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
5830                 if ((lun->res_type == SPR_TYPE_EX_AC
5831                   && residx != lun->pr_res_idx)
5832                  || ((lun->res_type == SPR_TYPE_EX_AC_RO
5833                    || lun->res_type == SPR_TYPE_EX_AC_AR)
5834                   && !lun->per_res[residx].registered)) {
5835                         ctl_set_reservation_conflict(ctsio);
5836                         ctl_done((union ctl_io *)ctsio);
5837                         return (CTL_RETVAL_COMPLETE);
5838                 }
5839         }
5840
5841         if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA &&
5842             (cdb->byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR &&
5843             (cdb->byte2 & RWB_MODE) != RWB_MODE_DESCR) {
5844                 ctl_set_invalid_field(ctsio,
5845                                       /*sks_valid*/ 1,
5846                                       /*command*/ 1,
5847                                       /*field*/ 1,
5848                                       /*bit_valid*/ 1,
5849                                       /*bit*/ 4);
5850                 ctl_done((union ctl_io *)ctsio);
5851                 return (CTL_RETVAL_COMPLETE);
5852         }
5853
5854         len = scsi_3btoul(cdb->length);
5855         buffer_offset = scsi_3btoul(cdb->offset);
5856
5857         if (buffer_offset + len > sizeof(lun->write_buffer)) {
5858                 ctl_set_invalid_field(ctsio,
5859                                       /*sks_valid*/ 1,
5860                                       /*command*/ 1,
5861                                       /*field*/ 6,
5862                                       /*bit_valid*/ 0,
5863                                       /*bit*/ 0);
5864                 ctl_done((union ctl_io *)ctsio);
5865                 return (CTL_RETVAL_COMPLETE);
5866         }
5867
5868         if ((cdb->byte2 & RWB_MODE) == RWB_MODE_DESCR) {
5869                 descr[0] = 0;
5870                 scsi_ulto3b(sizeof(lun->write_buffer), &descr[1]);
5871                 ctsio->kern_data_ptr = descr;
5872                 len = min(len, sizeof(descr));
5873         } else if ((cdb->byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) {
5874                 ctsio->kern_data_ptr = echo_descr;
5875                 len = min(len, sizeof(echo_descr));
5876         } else
5877                 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
5878         ctsio->kern_data_len = len;
5879         ctsio->kern_total_len = len;
5880         ctsio->kern_data_resid = 0;
5881         ctsio->kern_rel_offset = 0;
5882         ctsio->kern_sg_entries = 0;
5883         ctsio->be_move_done = ctl_config_move_done;
5884         ctl_datamove((union ctl_io *)ctsio);
5885
5886         return (CTL_RETVAL_COMPLETE);
5887 }
5888
5889 int
5890 ctl_write_buffer(struct ctl_scsiio *ctsio)
5891 {
5892         struct scsi_write_buffer *cdb;
5893         struct ctl_lun *lun;
5894         int buffer_offset, len;
5895
5896         CTL_DEBUG_PRINT(("ctl_write_buffer\n"));
5897
5898         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5899         cdb = (struct scsi_write_buffer *)ctsio->cdb;
5900
5901         if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) {
5902                 ctl_set_invalid_field(ctsio,
5903                                       /*sks_valid*/ 1,
5904                                       /*command*/ 1,
5905                                       /*field*/ 1,
5906                                       /*bit_valid*/ 1,
5907                                       /*bit*/ 4);
5908                 ctl_done((union ctl_io *)ctsio);
5909                 return (CTL_RETVAL_COMPLETE);
5910         }
5911
5912         len = scsi_3btoul(cdb->length);
5913         buffer_offset = scsi_3btoul(cdb->offset);
5914
5915         if (buffer_offset + len > sizeof(lun->write_buffer)) {
5916                 ctl_set_invalid_field(ctsio,
5917                                       /*sks_valid*/ 1,
5918                                       /*command*/ 1,
5919                                       /*field*/ 6,
5920                                       /*bit_valid*/ 0,
5921                                       /*bit*/ 0);
5922                 ctl_done((union ctl_io *)ctsio);
5923                 return (CTL_RETVAL_COMPLETE);
5924         }
5925
5926         /*
5927          * If we've got a kernel request that hasn't been malloced yet,
5928          * malloc it and tell the caller the data buffer is here.
5929          */
5930         if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
5931                 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
5932                 ctsio->kern_data_len = len;
5933                 ctsio->kern_total_len = len;
5934                 ctsio->kern_data_resid = 0;
5935                 ctsio->kern_rel_offset = 0;
5936                 ctsio->kern_sg_entries = 0;
5937                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5938                 ctsio->be_move_done = ctl_config_move_done;
5939                 ctl_datamove((union ctl_io *)ctsio);
5940
5941                 return (CTL_RETVAL_COMPLETE);
5942         }
5943
5944         ctl_done((union ctl_io *)ctsio);
5945
5946         return (CTL_RETVAL_COMPLETE);
5947 }
5948
5949 int
5950 ctl_write_same(struct ctl_scsiio *ctsio)
5951 {
5952         struct ctl_lun *lun;
5953         struct ctl_lba_len_flags *lbalen;
5954         uint64_t lba;
5955         uint32_t num_blocks;
5956         int len, retval;
5957         uint8_t byte2;
5958
5959         retval = CTL_RETVAL_COMPLETE;
5960
5961         CTL_DEBUG_PRINT(("ctl_write_same\n"));
5962
5963         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5964
5965         switch (ctsio->cdb[0]) {
5966         case WRITE_SAME_10: {
5967                 struct scsi_write_same_10 *cdb;
5968
5969                 cdb = (struct scsi_write_same_10 *)ctsio->cdb;
5970
5971                 lba = scsi_4btoul(cdb->addr);
5972                 num_blocks = scsi_2btoul(cdb->length);
5973                 byte2 = cdb->byte2;
5974                 break;
5975         }
5976         case WRITE_SAME_16: {
5977                 struct scsi_write_same_16 *cdb;
5978
5979                 cdb = (struct scsi_write_same_16 *)ctsio->cdb;
5980
5981                 lba = scsi_8btou64(cdb->addr);
5982                 num_blocks = scsi_4btoul(cdb->length);
5983                 byte2 = cdb->byte2;
5984                 break;
5985         }
5986         default:
5987                 /*
5988                  * We got a command we don't support.  This shouldn't
5989                  * happen, commands should be filtered out above us.
5990                  */
5991                 ctl_set_invalid_opcode(ctsio);
5992                 ctl_done((union ctl_io *)ctsio);
5993
5994                 return (CTL_RETVAL_COMPLETE);
5995                 break; /* NOTREACHED */
5996         }
5997
5998         /*
5999          * The first check is to make sure we're in bounds, the second
6000          * check is to catch wrap-around problems.  If the lba + num blocks
6001          * is less than the lba, then we've wrapped around and the block
6002          * range is invalid anyway.
6003          */
6004         if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
6005          || ((lba + num_blocks) < lba)) {
6006                 ctl_set_lba_out_of_range(ctsio);
6007                 ctl_done((union ctl_io *)ctsio);
6008                 return (CTL_RETVAL_COMPLETE);
6009         }
6010
6011         /* Zero number of blocks means "to the last logical block" */
6012         if (num_blocks == 0) {
6013                 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) {
6014                         ctl_set_invalid_field(ctsio,
6015                                               /*sks_valid*/ 0,
6016                                               /*command*/ 1,
6017                                               /*field*/ 0,
6018                                               /*bit_valid*/ 0,
6019                                               /*bit*/ 0);
6020                         ctl_done((union ctl_io *)ctsio);
6021                         return (CTL_RETVAL_COMPLETE);
6022                 }
6023                 num_blocks = (lun->be_lun->maxlba + 1) - lba;
6024         }
6025
6026         len = lun->be_lun->blocksize;
6027
6028         /*
6029          * If we've got a kernel request that hasn't been malloced yet,
6030          * malloc it and tell the caller the data buffer is here.
6031          */
6032         if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
6033                 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);;
6034                 ctsio->kern_data_len = len;
6035                 ctsio->kern_total_len = len;
6036                 ctsio->kern_data_resid = 0;
6037                 ctsio->kern_rel_offset = 0;
6038                 ctsio->kern_sg_entries = 0;
6039                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6040                 ctsio->be_move_done = ctl_config_move_done;
6041                 ctl_datamove((union ctl_io *)ctsio);
6042
6043                 return (CTL_RETVAL_COMPLETE);
6044         }
6045
6046         lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
6047         lbalen->lba = lba;
6048         lbalen->len = num_blocks;
6049         lbalen->flags = byte2;
6050         retval = lun->backend->config_write((union ctl_io *)ctsio);
6051
6052         return (retval);
6053 }
6054
6055 int
6056 ctl_unmap(struct ctl_scsiio *ctsio)
6057 {
6058         struct ctl_lun *lun;
6059         struct scsi_unmap *cdb;
6060         struct ctl_ptr_len_flags *ptrlen;
6061         struct scsi_unmap_header *hdr;
6062         struct scsi_unmap_desc *buf, *end, *endnz, *range;
6063         uint64_t lba;
6064         uint32_t num_blocks;
6065         int len, retval;
6066         uint8_t byte2;
6067
6068         retval = CTL_RETVAL_COMPLETE;
6069
6070         CTL_DEBUG_PRINT(("ctl_unmap\n"));
6071
6072         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6073         cdb = (struct scsi_unmap *)ctsio->cdb;
6074
6075         len = scsi_2btoul(cdb->length);
6076         byte2 = cdb->byte2;
6077
6078         /*
6079          * If we've got a kernel request that hasn't been malloced yet,
6080          * malloc it and tell the caller the data buffer is here.
6081          */
6082         if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
6083                 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);;
6084                 ctsio->kern_data_len = len;
6085                 ctsio->kern_total_len = len;
6086                 ctsio->kern_data_resid = 0;
6087                 ctsio->kern_rel_offset = 0;
6088                 ctsio->kern_sg_entries = 0;
6089                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6090                 ctsio->be_move_done = ctl_config_move_done;
6091                 ctl_datamove((union ctl_io *)ctsio);
6092
6093                 return (CTL_RETVAL_COMPLETE);
6094         }
6095
6096         len = ctsio->kern_total_len - ctsio->kern_data_resid;
6097         hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr;
6098         if (len < sizeof (*hdr) ||
6099             len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) ||
6100             len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) ||
6101             scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) {
6102                 ctl_set_invalid_field(ctsio,
6103                                       /*sks_valid*/ 0,
6104                                       /*command*/ 0,
6105                                       /*field*/ 0,
6106                                       /*bit_valid*/ 0,
6107                                       /*bit*/ 0);
6108                 ctl_done((union ctl_io *)ctsio);
6109                 return (CTL_RETVAL_COMPLETE);
6110         }
6111         len = scsi_2btoul(hdr->desc_length);
6112         buf = (struct scsi_unmap_desc *)(hdr + 1);
6113         end = buf + len / sizeof(*buf);
6114
6115         endnz = buf;
6116         for (range = buf; range < end; range++) {
6117                 lba = scsi_8btou64(range->lba);
6118                 num_blocks = scsi_4btoul(range->length);
6119                 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
6120                  || ((lba + num_blocks) < lba)) {
6121                         ctl_set_lba_out_of_range(ctsio);
6122                         ctl_done((union ctl_io *)ctsio);
6123                         return (CTL_RETVAL_COMPLETE);
6124                 }
6125                 if (num_blocks != 0)
6126                         endnz = range + 1;
6127         }
6128
6129         /*
6130          * Block backend can not handle zero last range.
6131          * Filter it out and return if there is nothing left.
6132          */
6133         len = (uint8_t *)endnz - (uint8_t *)buf;
6134         if (len == 0) {
6135                 ctl_set_success(ctsio);
6136                 ctl_done((union ctl_io *)ctsio);
6137                 return (CTL_RETVAL_COMPLETE);
6138         }
6139
6140         mtx_lock(&lun->lun_lock);
6141         ptrlen = (struct ctl_ptr_len_flags *)
6142             &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
6143         ptrlen->ptr = (void *)buf;
6144         ptrlen->len = len;
6145         ptrlen->flags = byte2;
6146         ctl_check_blocked(lun);
6147         mtx_unlock(&lun->lun_lock);
6148
6149         retval = lun->backend->config_write((union ctl_io *)ctsio);
6150         return (retval);
6151 }
6152
6153 /*
6154  * Note that this function currently doesn't actually do anything inside
6155  * CTL to enforce things if the DQue bit is turned on.
6156  *
6157  * Also note that this function can't be used in the default case, because
6158  * the DQue bit isn't set in the changeable mask for the control mode page
6159  * anyway.  This is just here as an example for how to implement a page
6160  * handler, and a placeholder in case we want to allow the user to turn
6161  * tagged queueing on and off.
6162  *
6163  * The D_SENSE bit handling is functional, however, and will turn
6164  * descriptor sense on and off for a given LUN.
6165  */
6166 int
6167 ctl_control_page_handler(struct ctl_scsiio *ctsio,
6168                          struct ctl_page_index *page_index, uint8_t *page_ptr)
6169 {
6170         struct scsi_control_page *current_cp, *saved_cp, *user_cp;
6171         struct ctl_lun *lun;
6172         struct ctl_softc *softc;
6173         int set_ua;
6174         uint32_t initidx;
6175
6176         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6177         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
6178         set_ua = 0;
6179
6180         user_cp = (struct scsi_control_page *)page_ptr;
6181         current_cp = (struct scsi_control_page *)
6182                 (page_index->page_data + (page_index->page_len *
6183                 CTL_PAGE_CURRENT));
6184         saved_cp = (struct scsi_control_page *)
6185                 (page_index->page_data + (page_index->page_len *
6186                 CTL_PAGE_SAVED));
6187
6188         softc = control_softc;
6189
6190         mtx_lock(&lun->lun_lock);
6191         if (((current_cp->rlec & SCP_DSENSE) == 0)
6192          && ((user_cp->rlec & SCP_DSENSE) != 0)) {
6193                 /*
6194                  * Descriptor sense is currently turned off and the user
6195                  * wants to turn it on.
6196                  */
6197                 current_cp->rlec |= SCP_DSENSE;
6198                 saved_cp->rlec |= SCP_DSENSE;
6199                 lun->flags |= CTL_LUN_SENSE_DESC;
6200                 set_ua = 1;
6201         } else if (((current_cp->rlec & SCP_DSENSE) != 0)
6202                 && ((user_cp->rlec & SCP_DSENSE) == 0)) {
6203                 /*
6204                  * Descriptor sense is currently turned on, and the user
6205                  * wants to turn it off.
6206                  */
6207                 current_cp->rlec &= ~SCP_DSENSE;
6208                 saved_cp->rlec &= ~SCP_DSENSE;
6209                 lun->flags &= ~CTL_LUN_SENSE_DESC;
6210                 set_ua = 1;
6211         }
6212         if ((current_cp->queue_flags & SCP_QUEUE_ALG_MASK) !=
6213             (user_cp->queue_flags & SCP_QUEUE_ALG_MASK)) {
6214                 current_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK;
6215                 current_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK;
6216                 saved_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK;
6217                 saved_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK;
6218                 set_ua = 1;
6219         }
6220         if (set_ua != 0) {
6221                 int i;
6222                 /*
6223                  * Let other initiators know that the mode
6224                  * parameters for this LUN have changed.
6225                  */
6226                 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
6227                         if (i == initidx)
6228                                 continue;
6229
6230                         lun->pending_ua[i] |= CTL_UA_MODE_CHANGE;
6231                 }
6232         }
6233         mtx_unlock(&lun->lun_lock);
6234
6235         return (0);
6236 }
6237
6238 int
6239 ctl_caching_sp_handler(struct ctl_scsiio *ctsio,
6240                      struct ctl_page_index *page_index, uint8_t *page_ptr)
6241 {
6242         struct scsi_caching_page *current_cp, *saved_cp, *user_cp;
6243         struct ctl_lun *lun;
6244         int set_ua;
6245         uint32_t initidx;
6246
6247         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6248         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
6249         set_ua = 0;
6250
6251         user_cp = (struct scsi_caching_page *)page_ptr;
6252         current_cp = (struct scsi_caching_page *)
6253                 (page_index->page_data + (page_index->page_len *
6254                 CTL_PAGE_CURRENT));
6255         saved_cp = (struct scsi_caching_page *)
6256                 (page_index->page_data + (page_index->page_len *
6257                 CTL_PAGE_SAVED));
6258
6259         mtx_lock(&lun->lun_lock);
6260         if ((current_cp->flags1 & (SCP_WCE | SCP_RCD)) !=
6261             (user_cp->flags1 & (SCP_WCE | SCP_RCD))) {
6262                 current_cp->flags1 &= ~(SCP_WCE | SCP_RCD);
6263                 current_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD);
6264                 saved_cp->flags1 &= ~(SCP_WCE | SCP_RCD);
6265                 saved_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD);
6266                 set_ua = 1;
6267         }
6268         if (set_ua != 0) {
6269                 int i;
6270                 /*
6271                  * Let other initiators know that the mode
6272                  * parameters for this LUN have changed.
6273                  */
6274                 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
6275                         if (i == initidx)
6276                                 continue;
6277
6278                         lun->pending_ua[i] |= CTL_UA_MODE_CHANGE;
6279                 }
6280         }
6281         mtx_unlock(&lun->lun_lock);
6282
6283         return (0);
6284 }
6285
6286 int
6287 ctl_power_sp_handler(struct ctl_scsiio *ctsio,
6288                      struct ctl_page_index *page_index, uint8_t *page_ptr)
6289 {
6290         return (0);
6291 }
6292
6293 int
6294 ctl_power_sp_sense_handler(struct ctl_scsiio *ctsio,
6295                            struct ctl_page_index *page_index, int pc)
6296 {
6297         struct copan_power_subpage *page;
6298
6299         page = (struct copan_power_subpage *)page_index->page_data +
6300                 (page_index->page_len * pc);
6301
6302         switch (pc) {
6303         case SMS_PAGE_CTRL_CHANGEABLE >> 6:
6304                 /*
6305                  * We don't update the changable bits for this page.
6306                  */
6307                 break;
6308         case SMS_PAGE_CTRL_CURRENT >> 6:
6309         case SMS_PAGE_CTRL_DEFAULT >> 6:
6310         case SMS_PAGE_CTRL_SAVED >> 6:
6311 #ifdef NEEDTOPORT
6312                 ctl_update_power_subpage(page);
6313 #endif
6314                 break;
6315         default:
6316 #ifdef NEEDTOPORT
6317                 EPRINT(0, "Invalid PC %d!!", pc);
6318 #endif
6319                 break;
6320         }
6321         return (0);
6322 }
6323
6324
6325 int
6326 ctl_aps_sp_handler(struct ctl_scsiio *ctsio,
6327                    struct ctl_page_index *page_index, uint8_t *page_ptr)
6328 {
6329         struct copan_aps_subpage *user_sp;
6330         struct copan_aps_subpage *current_sp;
6331         union ctl_modepage_info *modepage_info;
6332         struct ctl_softc *softc;
6333         struct ctl_lun *lun;
6334         int retval;
6335
6336         retval = CTL_RETVAL_COMPLETE;
6337         current_sp = (struct copan_aps_subpage *)(page_index->page_data +
6338                      (page_index->page_len * CTL_PAGE_CURRENT));
6339         softc = control_softc;
6340         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6341
6342         user_sp = (struct copan_aps_subpage *)page_ptr;
6343
6344         modepage_info = (union ctl_modepage_info *)
6345                 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
6346
6347         modepage_info->header.page_code = page_index->page_code & SMPH_PC_MASK;
6348         modepage_info->header.subpage = page_index->subpage;
6349         modepage_info->aps.lock_active = user_sp->lock_active;
6350
6351         mtx_lock(&softc->ctl_lock);
6352
6353         /*
6354          * If there is a request to lock the LUN and another LUN is locked
6355          * this is an error. If the requested LUN is already locked ignore
6356          * the request. If no LUN is locked attempt to lock it.
6357          * if there is a request to unlock the LUN and the LUN is currently
6358          * locked attempt to unlock it. Otherwise ignore the request. i.e.
6359          * if another LUN is locked or no LUN is locked.
6360          */
6361         if (user_sp->lock_active & APS_LOCK_ACTIVE) {
6362                 if (softc->aps_locked_lun == lun->lun) {
6363                         /*
6364                          * This LUN is already locked, so we're done.
6365                          */
6366                         retval = CTL_RETVAL_COMPLETE;
6367                 } else if (softc->aps_locked_lun == 0) {
6368                         /*
6369                          * No one has the lock, pass the request to the
6370                          * backend.
6371                          */
6372                         retval = lun->backend->config_write(
6373                                 (union ctl_io *)ctsio);
6374                 } else {
6375                         /*
6376                          * Someone else has the lock, throw out the request.
6377                          */
6378                         ctl_set_already_locked(ctsio);
6379                         free(ctsio->kern_data_ptr, M_CTL);
6380                         ctl_done((union ctl_io *)ctsio);
6381
6382                         /*
6383                          * Set the return value so that ctl_do_mode_select()
6384                          * won't try to complete the command.  We already
6385                          * completed it here.
6386                          */
6387                         retval = CTL_RETVAL_ERROR;
6388                 }
6389         } else if (softc->aps_locked_lun == lun->lun) {
6390                 /*
6391                  * This LUN is locked, so pass the unlock request to the
6392                  * backend.
6393                  */
6394                 retval = lun->backend->config_write((union ctl_io *)ctsio);
6395         }
6396         mtx_unlock(&softc->ctl_lock);
6397
6398         return (retval);
6399 }
6400
6401 int
6402 ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio,
6403                                 struct ctl_page_index *page_index,
6404                                 uint8_t *page_ptr)
6405 {
6406         uint8_t *c;
6407         int i;
6408
6409         c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs;
6410         ctl_time_io_secs =
6411                 (c[0] << 8) |
6412                 (c[1] << 0) |
6413                 0;
6414         CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs));
6415         printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs);
6416         printf("page data:");
6417         for (i=0; i<8; i++)
6418                 printf(" %.2x",page_ptr[i]);
6419         printf("\n");
6420         return (0);
6421 }
6422
6423 int
6424 ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio,
6425                                struct ctl_page_index *page_index,
6426                                int pc)
6427 {
6428         struct copan_debugconf_subpage *page;
6429
6430         page = (struct copan_debugconf_subpage *)page_index->page_data +
6431                 (page_index->page_len * pc);
6432
6433         switch (pc) {
6434         case SMS_PAGE_CTRL_CHANGEABLE >> 6:
6435         case SMS_PAGE_CTRL_DEFAULT >> 6:
6436         case SMS_PAGE_CTRL_SAVED >> 6:
6437                 /*
6438                  * We don't update the changable or default bits for this page.
6439                  */
6440                 break;
6441         case SMS_PAGE_CTRL_CURRENT >> 6:
6442                 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8;
6443                 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0;
6444                 break;
6445         default:
6446 #ifdef NEEDTOPORT
6447                 EPRINT(0, "Invalid PC %d!!", pc);
6448 #endif /* NEEDTOPORT */
6449                 break;
6450         }
6451         return (0);
6452 }
6453
6454
6455 static int
6456 ctl_do_mode_select(union ctl_io *io)
6457 {
6458         struct scsi_mode_page_header *page_header;
6459         struct ctl_page_index *page_index;
6460         struct ctl_scsiio *ctsio;
6461         int control_dev, page_len;
6462         int page_len_offset, page_len_size;
6463         union ctl_modepage_info *modepage_info;
6464         struct ctl_lun *lun;
6465         int *len_left, *len_used;
6466         int retval, i;
6467
6468         ctsio = &io->scsiio;
6469         page_index = NULL;
6470         page_len = 0;
6471         retval = CTL_RETVAL_COMPLETE;
6472
6473         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6474
6475         if (lun->be_lun->lun_type != T_DIRECT)
6476                 control_dev = 1;
6477         else
6478                 control_dev = 0;
6479
6480         modepage_info = (union ctl_modepage_info *)
6481                 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
6482         len_left = &modepage_info->header.len_left;
6483         len_used = &modepage_info->header.len_used;
6484
6485 do_next_page:
6486
6487         page_header = (struct scsi_mode_page_header *)
6488                 (ctsio->kern_data_ptr + *len_used);
6489
6490         if (*len_left == 0) {
6491                 free(ctsio->kern_data_ptr, M_CTL);
6492                 ctl_set_success(ctsio);
6493                 ctl_done((union ctl_io *)ctsio);
6494                 return (CTL_RETVAL_COMPLETE);
6495         } else if (*len_left < sizeof(struct scsi_mode_page_header)) {
6496
6497                 free(ctsio->kern_data_ptr, M_CTL);
6498                 ctl_set_param_len_error(ctsio);
6499                 ctl_done((union ctl_io *)ctsio);
6500                 return (CTL_RETVAL_COMPLETE);
6501
6502         } else if ((page_header->page_code & SMPH_SPF)
6503                 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) {
6504
6505                 free(ctsio->kern_data_ptr, M_CTL);
6506                 ctl_set_param_len_error(ctsio);
6507                 ctl_done((union ctl_io *)ctsio);
6508                 return (CTL_RETVAL_COMPLETE);
6509         }
6510
6511
6512         /*
6513          * XXX KDM should we do something with the block descriptor?
6514          */
6515         for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6516
6517                 if ((control_dev != 0)
6518                  && (lun->mode_pages.index[i].page_flags &
6519                      CTL_PAGE_FLAG_DISK_ONLY))
6520                         continue;
6521
6522                 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) !=
6523                     (page_header->page_code & SMPH_PC_MASK))
6524                         continue;
6525
6526                 /*
6527                  * If neither page has a subpage code, then we've got a
6528                  * match.
6529                  */
6530                 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0)
6531                  && ((page_header->page_code & SMPH_SPF) == 0)) {
6532                         page_index = &lun->mode_pages.index[i];
6533                         page_len = page_header->page_length;
6534                         break;
6535                 }
6536
6537                 /*
6538                  * If both pages have subpages, then the subpage numbers
6539                  * have to match.
6540                  */
6541                 if ((lun->mode_pages.index[i].page_code & SMPH_SPF)
6542                   && (page_header->page_code & SMPH_SPF)) {
6543                         struct scsi_mode_page_header_sp *sph;
6544
6545                         sph = (struct scsi_mode_page_header_sp *)page_header;
6546
6547                         if (lun->mode_pages.index[i].subpage ==
6548                             sph->subpage) {
6549                                 page_index = &lun->mode_pages.index[i];
6550                                 page_len = scsi_2btoul(sph->page_length);
6551                                 break;
6552                         }
6553                 }
6554         }
6555
6556         /*
6557          * If we couldn't find the page, or if we don't have a mode select
6558          * handler for it, send back an error to the user.
6559          */
6560         if ((page_index == NULL)
6561          || (page_index->select_handler == NULL)) {
6562                 ctl_set_invalid_field(ctsio,
6563                                       /*sks_valid*/ 1,
6564                                       /*command*/ 0,
6565                                       /*field*/ *len_used,
6566                                       /*bit_valid*/ 0,
6567                                       /*bit*/ 0);
6568                 free(ctsio->kern_data_ptr, M_CTL);
6569                 ctl_done((union ctl_io *)ctsio);
6570                 return (CTL_RETVAL_COMPLETE);
6571         }
6572
6573         if (page_index->page_code & SMPH_SPF) {
6574                 page_len_offset = 2;
6575                 page_len_size = 2;
6576         } else {
6577                 page_len_size = 1;
6578                 page_len_offset = 1;
6579         }
6580
6581         /*
6582          * If the length the initiator gives us isn't the one we specify in
6583          * the mode page header, or if they didn't specify enough data in
6584          * the CDB to avoid truncating this page, kick out the request.
6585          */
6586         if ((page_len != (page_index->page_len - page_len_offset -
6587                           page_len_size))
6588          || (*len_left < page_index->page_len)) {
6589
6590
6591                 ctl_set_invalid_field(ctsio,
6592                                       /*sks_valid*/ 1,
6593                                       /*command*/ 0,
6594                                       /*field*/ *len_used + page_len_offset,
6595                                       /*bit_valid*/ 0,
6596                                       /*bit*/ 0);
6597                 free(ctsio->kern_data_ptr, M_CTL);
6598                 ctl_done((union ctl_io *)ctsio);
6599                 return (CTL_RETVAL_COMPLETE);
6600         }
6601
6602         /*
6603          * Run through the mode page, checking to make sure that the bits
6604          * the user changed are actually legal for him to change.
6605          */
6606         for (i = 0; i < page_index->page_len; i++) {
6607                 uint8_t *user_byte, *change_mask, *current_byte;
6608                 int bad_bit;
6609                 int j;
6610
6611                 user_byte = (uint8_t *)page_header + i;
6612                 change_mask = page_index->page_data +
6613                               (page_index->page_len * CTL_PAGE_CHANGEABLE) + i;
6614                 current_byte = page_index->page_data +
6615                                (page_index->page_len * CTL_PAGE_CURRENT) + i;
6616
6617                 /*
6618                  * Check to see whether the user set any bits in this byte
6619                  * that he is not allowed to set.
6620                  */
6621                 if ((*user_byte & ~(*change_mask)) ==
6622                     (*current_byte & ~(*change_mask)))
6623                         continue;
6624
6625                 /*
6626                  * Go through bit by bit to determine which one is illegal.
6627                  */
6628                 bad_bit = 0;
6629                 for (j = 7; j >= 0; j--) {
6630                         if ((((1 << i) & ~(*change_mask)) & *user_byte) !=
6631                             (((1 << i) & ~(*change_mask)) & *current_byte)) {
6632                                 bad_bit = i;
6633                                 break;
6634                         }
6635                 }
6636                 ctl_set_invalid_field(ctsio,
6637                                       /*sks_valid*/ 1,
6638                                       /*command*/ 0,
6639                                       /*field*/ *len_used + i,
6640                                       /*bit_valid*/ 1,
6641                                       /*bit*/ bad_bit);
6642                 free(ctsio->kern_data_ptr, M_CTL);
6643                 ctl_done((union ctl_io *)ctsio);
6644                 return (CTL_RETVAL_COMPLETE);
6645         }
6646
6647         /*
6648          * Decrement these before we call the page handler, since we may
6649          * end up getting called back one way or another before the handler
6650          * returns to this context.
6651          */
6652         *len_left -= page_index->page_len;
6653         *len_used += page_index->page_len;
6654
6655         retval = page_index->select_handler(ctsio, page_index,
6656                                             (uint8_t *)page_header);
6657
6658         /*
6659          * If the page handler returns CTL_RETVAL_QUEUED, then we need to
6660          * wait until this queued command completes to finish processing
6661          * the mode page.  If it returns anything other than
6662          * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have
6663          * already set the sense information, freed the data pointer, and
6664          * completed the io for us.
6665          */
6666         if (retval != CTL_RETVAL_COMPLETE)
6667                 goto bailout_no_done;
6668
6669         /*
6670          * If the initiator sent us more than one page, parse the next one.
6671          */
6672         if (*len_left > 0)
6673                 goto do_next_page;
6674
6675         ctl_set_success(ctsio);
6676         free(ctsio->kern_data_ptr, M_CTL);
6677         ctl_done((union ctl_io *)ctsio);
6678
6679 bailout_no_done:
6680
6681         return (CTL_RETVAL_COMPLETE);
6682
6683 }
6684
6685 int
6686 ctl_mode_select(struct ctl_scsiio *ctsio)
6687 {
6688         int param_len, pf, sp;
6689         int header_size, bd_len;
6690         int len_left, len_used;
6691         struct ctl_page_index *page_index;
6692         struct ctl_lun *lun;
6693         int control_dev, page_len;
6694         union ctl_modepage_info *modepage_info;
6695         int retval;
6696
6697         pf = 0;
6698         sp = 0;
6699         page_len = 0;
6700         len_used = 0;
6701         len_left = 0;
6702         retval = 0;
6703         bd_len = 0;
6704         page_index = NULL;
6705
6706         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6707
6708         if (lun->be_lun->lun_type != T_DIRECT)
6709                 control_dev = 1;
6710         else
6711                 control_dev = 0;
6712
6713         switch (ctsio->cdb[0]) {
6714         case MODE_SELECT_6: {
6715                 struct scsi_mode_select_6 *cdb;
6716
6717                 cdb = (struct scsi_mode_select_6 *)ctsio->cdb;
6718
6719                 pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
6720                 sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
6721
6722                 param_len = cdb->length;
6723                 header_size = sizeof(struct scsi_mode_header_6);
6724                 break;
6725         }
6726         case MODE_SELECT_10: {
6727                 struct scsi_mode_select_10 *cdb;
6728
6729                 cdb = (struct scsi_mode_select_10 *)ctsio->cdb;
6730
6731                 pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
6732                 sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
6733
6734                 param_len = scsi_2btoul(cdb->length);
6735                 header_size = sizeof(struct scsi_mode_header_10);
6736                 break;
6737         }
6738         default:
6739                 ctl_set_invalid_opcode(ctsio);
6740                 ctl_done((union ctl_io *)ctsio);
6741                 return (CTL_RETVAL_COMPLETE);
6742                 break; /* NOTREACHED */
6743         }
6744
6745         /*
6746          * From SPC-3:
6747          * "A parameter list length of zero indicates that the Data-Out Buffer
6748          * shall be empty. This condition shall not be considered as an error."
6749          */
6750         if (param_len == 0) {
6751                 ctl_set_success(ctsio);
6752                 ctl_done((union ctl_io *)ctsio);
6753                 return (CTL_RETVAL_COMPLETE);
6754         }
6755
6756         /*
6757          * Since we'll hit this the first time through, prior to
6758          * allocation, we don't need to free a data buffer here.
6759          */
6760         if (param_len < header_size) {
6761                 ctl_set_param_len_error(ctsio);
6762                 ctl_done((union ctl_io *)ctsio);
6763                 return (CTL_RETVAL_COMPLETE);
6764         }
6765
6766         /*
6767          * Allocate the data buffer and grab the user's data.  In theory,
6768          * we shouldn't have to sanity check the parameter list length here
6769          * because the maximum size is 64K.  We should be able to malloc
6770          * that much without too many problems.
6771          */
6772         if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
6773                 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
6774                 ctsio->kern_data_len = param_len;
6775                 ctsio->kern_total_len = param_len;
6776                 ctsio->kern_data_resid = 0;
6777                 ctsio->kern_rel_offset = 0;
6778                 ctsio->kern_sg_entries = 0;
6779                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6780                 ctsio->be_move_done = ctl_config_move_done;
6781                 ctl_datamove((union ctl_io *)ctsio);
6782
6783                 return (CTL_RETVAL_COMPLETE);
6784         }
6785
6786         switch (ctsio->cdb[0]) {
6787         case MODE_SELECT_6: {
6788                 struct scsi_mode_header_6 *mh6;
6789
6790                 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr;
6791                 bd_len = mh6->blk_desc_len;
6792                 break;
6793         }
6794         case MODE_SELECT_10: {
6795                 struct scsi_mode_header_10 *mh10;
6796
6797                 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr;
6798                 bd_len = scsi_2btoul(mh10->blk_desc_len);
6799                 break;
6800         }
6801         default:
6802                 panic("Invalid CDB type %#x", ctsio->cdb[0]);
6803                 break;
6804         }
6805
6806         if (param_len < (header_size + bd_len)) {
6807                 free(ctsio->kern_data_ptr, M_CTL);
6808                 ctl_set_param_len_error(ctsio);
6809                 ctl_done((union ctl_io *)ctsio);
6810                 return (CTL_RETVAL_COMPLETE);
6811         }
6812
6813         /*
6814          * Set the IO_CONT flag, so that if this I/O gets passed to
6815          * ctl_config_write_done(), it'll get passed back to
6816          * ctl_do_mode_select() for further processing, or completion if
6817          * we're all done.
6818          */
6819         ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT;
6820         ctsio->io_cont = ctl_do_mode_select;
6821
6822         modepage_info = (union ctl_modepage_info *)
6823                 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
6824
6825         memset(modepage_info, 0, sizeof(*modepage_info));
6826
6827         len_left = param_len - header_size - bd_len;
6828         len_used = header_size + bd_len;
6829
6830         modepage_info->header.len_left = len_left;
6831         modepage_info->header.len_used = len_used;
6832
6833         return (ctl_do_mode_select((union ctl_io *)ctsio));
6834 }
6835
6836 int
6837 ctl_mode_sense(struct ctl_scsiio *ctsio)
6838 {
6839         struct ctl_lun *lun;
6840         int pc, page_code, dbd, llba, subpage;
6841         int alloc_len, page_len, header_len, total_len;
6842         struct scsi_mode_block_descr *block_desc;
6843         struct ctl_page_index *page_index;
6844         int control_dev;
6845
6846         dbd = 0;
6847         llba = 0;
6848         block_desc = NULL;
6849         page_index = NULL;
6850
6851         CTL_DEBUG_PRINT(("ctl_mode_sense\n"));
6852
6853         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6854
6855         if (lun->be_lun->lun_type != T_DIRECT)
6856                 control_dev = 1;
6857         else
6858                 control_dev = 0;
6859
6860         if (lun->flags & CTL_LUN_PR_RESERVED) {
6861                 uint32_t residx;
6862
6863                 /*
6864                  * XXX KDM need a lock here.
6865                  */
6866                 residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
6867                 if ((lun->res_type == SPR_TYPE_EX_AC
6868                   && residx != lun->pr_res_idx)
6869                  || ((lun->res_type == SPR_TYPE_EX_AC_RO
6870                    || lun->res_type == SPR_TYPE_EX_AC_AR)
6871                   && !lun->per_res[residx].registered)) {
6872                         ctl_set_reservation_conflict(ctsio);
6873                         ctl_done((union ctl_io *)ctsio);
6874                         return (CTL_RETVAL_COMPLETE);
6875                 }
6876         }
6877
6878         switch (ctsio->cdb[0]) {
6879         case MODE_SENSE_6: {
6880                 struct scsi_mode_sense_6 *cdb;
6881
6882                 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb;
6883
6884                 header_len = sizeof(struct scsi_mode_hdr_6);
6885                 if (cdb->byte2 & SMS_DBD)
6886                         dbd = 1;
6887                 else
6888                         header_len += sizeof(struct scsi_mode_block_descr);
6889
6890                 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6;
6891                 page_code = cdb->page & SMS_PAGE_CODE;
6892                 subpage = cdb->subpage;
6893                 alloc_len = cdb->length;
6894                 break;
6895         }
6896         case MODE_SENSE_10: {
6897                 struct scsi_mode_sense_10 *cdb;
6898
6899                 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb;
6900
6901                 header_len = sizeof(struct scsi_mode_hdr_10);
6902
6903                 if (cdb->byte2 & SMS_DBD)
6904                         dbd = 1;
6905                 else
6906                         header_len += sizeof(struct scsi_mode_block_descr);
6907                 if (cdb->byte2 & SMS10_LLBAA)
6908                         llba = 1;
6909                 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6;
6910                 page_code = cdb->page & SMS_PAGE_CODE;
6911                 subpage = cdb->subpage;
6912                 alloc_len = scsi_2btoul(cdb->length);
6913                 break;
6914         }
6915         default:
6916                 ctl_set_invalid_opcode(ctsio);
6917                 ctl_done((union ctl_io *)ctsio);
6918                 return (CTL_RETVAL_COMPLETE);
6919                 break; /* NOTREACHED */
6920         }
6921
6922         /*
6923          * We have to make a first pass through to calculate the size of
6924          * the pages that match the user's query.  Then we allocate enough
6925          * memory to hold it, and actually copy the data into the buffer.
6926          */
6927         switch (page_code) {
6928         case SMS_ALL_PAGES_PAGE: {
6929                 int i;
6930
6931                 page_len = 0;
6932
6933                 /*
6934                  * At the moment, values other than 0 and 0xff here are
6935                  * reserved according to SPC-3.
6936                  */
6937                 if ((subpage != SMS_SUBPAGE_PAGE_0)
6938                  && (subpage != SMS_SUBPAGE_ALL)) {
6939                         ctl_set_invalid_field(ctsio,
6940                                               /*sks_valid*/ 1,
6941                                               /*command*/ 1,
6942                                               /*field*/ 3,
6943                                               /*bit_valid*/ 0,
6944                                               /*bit*/ 0);
6945                         ctl_done((union ctl_io *)ctsio);
6946                         return (CTL_RETVAL_COMPLETE);
6947                 }
6948
6949                 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6950                         if ((control_dev != 0)
6951                          && (lun->mode_pages.index[i].page_flags &
6952                              CTL_PAGE_FLAG_DISK_ONLY))
6953                                 continue;
6954
6955                         /*
6956                          * We don't use this subpage if the user didn't
6957                          * request all subpages.
6958                          */
6959                         if ((lun->mode_pages.index[i].subpage != 0)
6960                          && (subpage == SMS_SUBPAGE_PAGE_0))
6961                                 continue;
6962
6963 #if 0
6964                         printf("found page %#x len %d\n",
6965                                lun->mode_pages.index[i].page_code &
6966                                SMPH_PC_MASK,
6967                                lun->mode_pages.index[i].page_len);
6968 #endif
6969                         page_len += lun->mode_pages.index[i].page_len;
6970                 }
6971                 break;
6972         }
6973         default: {
6974                 int i;
6975
6976                 page_len = 0;
6977
6978                 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6979                         /* Look for the right page code */
6980                         if ((lun->mode_pages.index[i].page_code &
6981                              SMPH_PC_MASK) != page_code)
6982                                 continue;
6983
6984                         /* Look for the right subpage or the subpage wildcard*/
6985                         if ((lun->mode_pages.index[i].subpage != subpage)
6986                          && (subpage != SMS_SUBPAGE_ALL))
6987                                 continue;
6988
6989                         /* Make sure the page is supported for this dev type */
6990                         if ((control_dev != 0)
6991                          && (lun->mode_pages.index[i].page_flags &
6992                              CTL_PAGE_FLAG_DISK_ONLY))
6993                                 continue;
6994
6995 #if 0
6996                         printf("found page %#x len %d\n",
6997                                lun->mode_pages.index[i].page_code &
6998                                SMPH_PC_MASK,
6999                                lun->mode_pages.index[i].page_len);
7000 #endif
7001
7002                         page_len += lun->mode_pages.index[i].page_len;
7003                 }
7004
7005                 if (page_len == 0) {
7006                         ctl_set_invalid_field(ctsio,
7007                                               /*sks_valid*/ 1,
7008                                               /*command*/ 1,
7009                                               /*field*/ 2,
7010                                               /*bit_valid*/ 1,
7011                                               /*bit*/ 5);
7012                         ctl_done((union ctl_io *)ctsio);
7013                         return (CTL_RETVAL_COMPLETE);
7014                 }
7015                 break;
7016         }
7017         }
7018
7019         total_len = header_len + page_len;
7020 #if 0
7021         printf("header_len = %d, page_len = %d, total_len = %d\n",
7022                header_len, page_len, total_len);
7023 #endif
7024
7025         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7026         ctsio->kern_sg_entries = 0;
7027         ctsio->kern_data_resid = 0;
7028         ctsio->kern_rel_offset = 0;
7029         if (total_len < alloc_len) {
7030                 ctsio->residual = alloc_len - total_len;
7031                 ctsio->kern_data_len = total_len;
7032                 ctsio->kern_total_len = total_len;
7033         } else {
7034                 ctsio->residual = 0;
7035                 ctsio->kern_data_len = alloc_len;
7036                 ctsio->kern_total_len = alloc_len;
7037         }
7038
7039         switch (ctsio->cdb[0]) {
7040         case MODE_SENSE_6: {
7041                 struct scsi_mode_hdr_6 *header;
7042
7043                 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr;
7044
7045                 header->datalen = ctl_min(total_len - 1, 254);
7046                 if (control_dev == 0)
7047                         header->dev_specific = 0x10; /* DPOFUA */
7048                 if (dbd)
7049                         header->block_descr_len = 0;
7050                 else
7051                         header->block_descr_len =
7052                                 sizeof(struct scsi_mode_block_descr);
7053                 block_desc = (struct scsi_mode_block_descr *)&header[1];
7054                 break;
7055         }
7056         case MODE_SENSE_10: {
7057                 struct scsi_mode_hdr_10 *header;
7058                 int datalen;
7059
7060                 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr;
7061
7062                 datalen = ctl_min(total_len - 2, 65533);
7063                 scsi_ulto2b(datalen, header->datalen);
7064                 if (control_dev == 0)
7065                         header->dev_specific = 0x10; /* DPOFUA */
7066                 if (dbd)
7067                         scsi_ulto2b(0, header->block_descr_len);
7068                 else
7069                         scsi_ulto2b(sizeof(struct scsi_mode_block_descr),
7070                                     header->block_descr_len);
7071                 block_desc = (struct scsi_mode_block_descr *)&header[1];
7072                 break;
7073         }
7074         default:
7075                 panic("invalid CDB type %#x", ctsio->cdb[0]);
7076                 break; /* NOTREACHED */
7077         }
7078
7079         /*
7080          * If we've got a disk, use its blocksize in the block
7081          * descriptor.  Otherwise, just set it to 0.
7082          */
7083         if (dbd == 0) {
7084                 if (control_dev != 0)
7085                         scsi_ulto3b(lun->be_lun->blocksize,
7086                                     block_desc->block_len);
7087                 else
7088                         scsi_ulto3b(0, block_desc->block_len);
7089         }
7090
7091         switch (page_code) {
7092         case SMS_ALL_PAGES_PAGE: {
7093                 int i, data_used;
7094
7095                 data_used = header_len;
7096                 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
7097                         struct ctl_page_index *page_index;
7098
7099                         page_index = &lun->mode_pages.index[i];
7100
7101                         if ((control_dev != 0)
7102                          && (page_index->page_flags &
7103                             CTL_PAGE_FLAG_DISK_ONLY))
7104                                 continue;
7105
7106                         /*
7107                          * We don't use this subpage if the user didn't
7108                          * request all subpages.  We already checked (above)
7109                          * to make sure the user only specified a subpage
7110                          * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case.
7111                          */
7112                         if ((page_index->subpage != 0)
7113                          && (subpage == SMS_SUBPAGE_PAGE_0))
7114                                 continue;
7115
7116                         /*
7117                          * Call the handler, if it exists, to update the
7118                          * page to the latest values.
7119                          */
7120                         if (page_index->sense_handler != NULL)
7121                                 page_index->sense_handler(ctsio, page_index,pc);
7122
7123                         memcpy(ctsio->kern_data_ptr + data_used,
7124                                page_index->page_data +
7125                                (page_index->page_len * pc),
7126                                page_index->page_len);
7127                         data_used += page_index->page_len;
7128                 }
7129                 break;
7130         }
7131         default: {
7132                 int i, data_used;
7133
7134                 data_used = header_len;
7135
7136                 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
7137                         struct ctl_page_index *page_index;
7138
7139                         page_index = &lun->mode_pages.index[i];
7140
7141                         /* Look for the right page code */
7142                         if ((page_index->page_code & SMPH_PC_MASK) != page_code)
7143                                 continue;
7144
7145                         /* Look for the right subpage or the subpage wildcard*/
7146                         if ((page_index->subpage != subpage)
7147                          && (subpage != SMS_SUBPAGE_ALL))
7148                                 continue;
7149
7150                         /* Make sure the page is supported for this dev type */
7151                         if ((control_dev != 0)
7152                          && (page_index->page_flags &
7153                              CTL_PAGE_FLAG_DISK_ONLY))
7154                                 continue;
7155
7156                         /*
7157                          * Call the handler, if it exists, to update the
7158                          * page to the latest values.
7159                          */
7160                         if (page_index->sense_handler != NULL)
7161                                 page_index->sense_handler(ctsio, page_index,pc);
7162
7163                         memcpy(ctsio->kern_data_ptr + data_used,
7164                                page_index->page_data +
7165                                (page_index->page_len * pc),
7166                                page_index->page_len);
7167                         data_used += page_index->page_len;
7168                 }
7169                 break;
7170         }
7171         }
7172
7173         ctsio->scsi_status = SCSI_STATUS_OK;
7174
7175         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7176         ctsio->be_move_done = ctl_config_move_done;
7177         ctl_datamove((union ctl_io *)ctsio);
7178
7179         return (CTL_RETVAL_COMPLETE);
7180 }
7181
7182 int
7183 ctl_read_capacity(struct ctl_scsiio *ctsio)
7184 {
7185         struct scsi_read_capacity *cdb;
7186         struct scsi_read_capacity_data *data;
7187         struct ctl_lun *lun;
7188         uint32_t lba;
7189
7190         CTL_DEBUG_PRINT(("ctl_read_capacity\n"));
7191
7192         cdb = (struct scsi_read_capacity *)ctsio->cdb;
7193
7194         lba = scsi_4btoul(cdb->addr);
7195         if (((cdb->pmi & SRC_PMI) == 0)
7196          && (lba != 0)) {
7197                 ctl_set_invalid_field(/*ctsio*/ ctsio,
7198                                       /*sks_valid*/ 1,
7199                                       /*command*/ 1,
7200                                       /*field*/ 2,
7201                                       /*bit_valid*/ 0,
7202                                       /*bit*/ 0);
7203                 ctl_done((union ctl_io *)ctsio);
7204                 return (CTL_RETVAL_COMPLETE);
7205         }
7206
7207         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7208
7209         ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
7210         data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr;
7211         ctsio->residual = 0;
7212         ctsio->kern_data_len = sizeof(*data);
7213         ctsio->kern_total_len = sizeof(*data);
7214         ctsio->kern_data_resid = 0;
7215         ctsio->kern_rel_offset = 0;
7216         ctsio->kern_sg_entries = 0;
7217
7218         /*
7219          * If the maximum LBA is greater than 0xfffffffe, the user must
7220          * issue a SERVICE ACTION IN (16) command, with the read capacity
7221          * serivce action set.
7222          */
7223         if (lun->be_lun->maxlba > 0xfffffffe)
7224                 scsi_ulto4b(0xffffffff, data->addr);
7225         else
7226                 scsi_ulto4b(lun->be_lun->maxlba, data->addr);
7227
7228         /*
7229          * XXX KDM this may not be 512 bytes...
7230          */
7231         scsi_ulto4b(lun->be_lun->blocksize, data->length);
7232
7233         ctsio->scsi_status = SCSI_STATUS_OK;
7234
7235         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7236         ctsio->be_move_done = ctl_config_move_done;
7237         ctl_datamove((union ctl_io *)ctsio);
7238
7239         return (CTL_RETVAL_COMPLETE);
7240 }
7241
7242 int
7243 ctl_read_capacity_16(struct ctl_scsiio *ctsio)
7244 {
7245         struct scsi_read_capacity_16 *cdb;
7246         struct scsi_read_capacity_data_long *data;
7247         struct ctl_lun *lun;
7248         uint64_t lba;
7249         uint32_t alloc_len;
7250
7251         CTL_DEBUG_PRINT(("ctl_read_capacity_16\n"));
7252
7253         cdb = (struct scsi_read_capacity_16 *)ctsio->cdb;
7254
7255         alloc_len = scsi_4btoul(cdb->alloc_len);
7256         lba = scsi_8btou64(cdb->addr);
7257
7258         if ((cdb->reladr & SRC16_PMI)
7259          && (lba != 0)) {
7260                 ctl_set_invalid_field(/*ctsio*/ ctsio,
7261                                       /*sks_valid*/ 1,
7262                                       /*command*/ 1,
7263                                       /*field*/ 2,
7264                                       /*bit_valid*/ 0,
7265                                       /*bit*/ 0);
7266                 ctl_done((union ctl_io *)ctsio);
7267                 return (CTL_RETVAL_COMPLETE);
7268         }
7269
7270         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7271
7272         ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
7273         data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr;
7274
7275         if (sizeof(*data) < alloc_len) {
7276                 ctsio->residual = alloc_len - sizeof(*data);
7277                 ctsio->kern_data_len = sizeof(*data);
7278                 ctsio->kern_total_len = sizeof(*data);
7279         } else {
7280                 ctsio->residual = 0;
7281                 ctsio->kern_data_len = alloc_len;
7282                 ctsio->kern_total_len = alloc_len;
7283         }
7284         ctsio->kern_data_resid = 0;
7285         ctsio->kern_rel_offset = 0;
7286         ctsio->kern_sg_entries = 0;
7287
7288         scsi_u64to8b(lun->be_lun->maxlba, data->addr);
7289         /* XXX KDM this may not be 512 bytes... */
7290         scsi_ulto4b(lun->be_lun->blocksize, data->length);
7291         data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE;
7292         scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp);
7293         if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
7294                 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ;
7295
7296         ctsio->scsi_status = SCSI_STATUS_OK;
7297
7298         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7299         ctsio->be_move_done = ctl_config_move_done;
7300         ctl_datamove((union ctl_io *)ctsio);
7301
7302         return (CTL_RETVAL_COMPLETE);
7303 }
7304
7305 int
7306 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
7307 {
7308         struct scsi_maintenance_in *cdb;
7309         int retval;
7310         int alloc_len, ext, total_len = 0, g, p, pc, pg;
7311         int num_target_port_groups, num_target_ports, single;
7312         struct ctl_lun *lun;
7313         struct ctl_softc *softc;
7314         struct ctl_port *port;
7315         struct scsi_target_group_data *rtg_ptr;
7316         struct scsi_target_group_data_extended *rtg_ext_ptr;
7317         struct scsi_target_port_group_descriptor *tpg_desc;
7318
7319         CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n"));
7320
7321         cdb = (struct scsi_maintenance_in *)ctsio->cdb;
7322         softc = control_softc;
7323         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7324
7325         retval = CTL_RETVAL_COMPLETE;
7326
7327         switch (cdb->byte2 & STG_PDF_MASK) {
7328         case STG_PDF_LENGTH:
7329                 ext = 0;
7330                 break;
7331         case STG_PDF_EXTENDED:
7332                 ext = 1;
7333                 break;
7334         default:
7335                 ctl_set_invalid_field(/*ctsio*/ ctsio,
7336                                       /*sks_valid*/ 1,
7337                                       /*command*/ 1,
7338                                       /*field*/ 2,
7339                                       /*bit_valid*/ 1,
7340                                       /*bit*/ 5);
7341                 ctl_done((union ctl_io *)ctsio);
7342                 return(retval);
7343         }
7344
7345         single = ctl_is_single;
7346         if (single)
7347                 num_target_port_groups = 1;
7348         else
7349                 num_target_port_groups = NUM_TARGET_PORT_GROUPS;
7350         num_target_ports = 0;
7351         mtx_lock(&softc->ctl_lock);
7352         STAILQ_FOREACH(port, &softc->port_list, links) {
7353                 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
7354                         continue;
7355                 if (ctl_map_lun_back(port->targ_port, lun->lun) >= CTL_MAX_LUNS)
7356                         continue;
7357                 num_target_ports++;
7358         }
7359         mtx_unlock(&softc->ctl_lock);
7360
7361         if (ext)
7362                 total_len = sizeof(struct scsi_target_group_data_extended);
7363         else
7364                 total_len = sizeof(struct scsi_target_group_data);
7365         total_len += sizeof(struct scsi_target_port_group_descriptor) *
7366                 num_target_port_groups +
7367             sizeof(struct scsi_target_port_descriptor) *
7368                 num_target_ports * num_target_port_groups;
7369
7370         alloc_len = scsi_4btoul(cdb->length);
7371
7372         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7373
7374         ctsio->kern_sg_entries = 0;
7375
7376         if (total_len < alloc_len) {
7377                 ctsio->residual = alloc_len - total_len;
7378                 ctsio->kern_data_len = total_len;
7379                 ctsio->kern_total_len = total_len;
7380         } else {
7381                 ctsio->residual = 0;
7382                 ctsio->kern_data_len = alloc_len;
7383                 ctsio->kern_total_len = alloc_len;
7384         }
7385         ctsio->kern_data_resid = 0;
7386         ctsio->kern_rel_offset = 0;
7387
7388         if (ext) {
7389                 rtg_ext_ptr = (struct scsi_target_group_data_extended *)
7390                     ctsio->kern_data_ptr;
7391                 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length);
7392                 rtg_ext_ptr->format_type = 0x10;
7393                 rtg_ext_ptr->implicit_transition_time = 0;
7394                 tpg_desc = &rtg_ext_ptr->groups[0];
7395         } else {
7396                 rtg_ptr = (struct scsi_target_group_data *)
7397                     ctsio->kern_data_ptr;
7398                 scsi_ulto4b(total_len - 4, rtg_ptr->length);
7399                 tpg_desc = &rtg_ptr->groups[0];
7400         }
7401
7402         pg = ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS;
7403         mtx_lock(&softc->ctl_lock);
7404         for (g = 0; g < num_target_port_groups; g++) {
7405                 if (g == pg)
7406                         tpg_desc->pref_state = TPG_PRIMARY |
7407                             TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
7408                 else
7409                         tpg_desc->pref_state =
7410                             TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
7411                 tpg_desc->support = TPG_AO_SUP;
7412                 if (!single)
7413                         tpg_desc->support |= TPG_AN_SUP;
7414                 scsi_ulto2b(g + 1, tpg_desc->target_port_group);
7415                 tpg_desc->status = TPG_IMPLICIT;
7416                 pc = 0;
7417                 STAILQ_FOREACH(port, &softc->port_list, links) {
7418                         if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
7419                                 continue;
7420                         if (ctl_map_lun_back(port->targ_port, lun->lun) >=
7421                             CTL_MAX_LUNS)
7422                                 continue;
7423                         p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS;
7424                         scsi_ulto2b(p, tpg_desc->descriptors[pc].
7425                             relative_target_port_identifier);
7426                         pc++;
7427                 }
7428                 tpg_desc->target_port_count = pc;
7429                 tpg_desc = (struct scsi_target_port_group_descriptor *)
7430                     &tpg_desc->descriptors[pc];
7431         }
7432         mtx_unlock(&softc->ctl_lock);
7433
7434         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7435         ctsio->be_move_done = ctl_config_move_done;
7436
7437         CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n",
7438                          ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1],
7439                          ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3],
7440                          ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5],
7441                          ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7]));
7442
7443         ctl_datamove((union ctl_io *)ctsio);
7444         return(retval);
7445 }
7446
7447 int
7448 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio)
7449 {
7450         struct ctl_lun *lun;
7451         struct scsi_report_supported_opcodes *cdb;
7452         const struct ctl_cmd_entry *entry, *sentry;
7453         struct scsi_report_supported_opcodes_all *all;
7454         struct scsi_report_supported_opcodes_descr *descr;
7455         struct scsi_report_supported_opcodes_one *one;
7456         int retval;
7457         int alloc_len, total_len;
7458         int opcode, service_action, i, j, num;
7459
7460         CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n"));
7461
7462         cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb;
7463         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7464
7465         retval = CTL_RETVAL_COMPLETE;
7466
7467         opcode = cdb->requested_opcode;
7468         service_action = scsi_2btoul(cdb->requested_service_action);
7469         switch (cdb->options & RSO_OPTIONS_MASK) {
7470         case RSO_OPTIONS_ALL:
7471                 num = 0;
7472                 for (i = 0; i < 256; i++) {
7473                         entry = &ctl_cmd_table[i];
7474                         if (entry->flags & CTL_CMD_FLAG_SA5) {
7475                                 for (j = 0; j < 32; j++) {
7476                                         sentry = &((const struct ctl_cmd_entry *)
7477                                             entry->execute)[j];
7478                                         if (ctl_cmd_applicable(
7479                                             lun->be_lun->lun_type, sentry))
7480                                                 num++;
7481                                 }
7482                         } else {
7483                                 if (ctl_cmd_applicable(lun->be_lun->lun_type,
7484                                     entry))
7485                                         num++;
7486                         }
7487                 }
7488                 total_len = sizeof(struct scsi_report_supported_opcodes_all) +
7489                     num * sizeof(struct scsi_report_supported_opcodes_descr);
7490                 break;
7491         case RSO_OPTIONS_OC:
7492                 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) {
7493                         ctl_set_invalid_field(/*ctsio*/ ctsio,
7494                                               /*sks_valid*/ 1,
7495                                               /*command*/ 1,
7496                                               /*field*/ 2,
7497                                               /*bit_valid*/ 1,
7498                                               /*bit*/ 2);
7499                         ctl_done((union ctl_io *)ctsio);
7500                         return (CTL_RETVAL_COMPLETE);
7501                 }
7502                 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32;
7503                 break;
7504         case RSO_OPTIONS_OC_SA:
7505                 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 ||
7506                     service_action >= 32) {
7507                         ctl_set_invalid_field(/*ctsio*/ ctsio,
7508                                               /*sks_valid*/ 1,
7509                                               /*command*/ 1,
7510                                               /*field*/ 2,
7511                                               /*bit_valid*/ 1,
7512                                               /*bit*/ 2);
7513                         ctl_done((union ctl_io *)ctsio);
7514                         return (CTL_RETVAL_COMPLETE);
7515                 }
7516                 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32;
7517                 break;
7518         default:
7519                 ctl_set_invalid_field(/*ctsio*/ ctsio,
7520                                       /*sks_valid*/ 1,
7521                                       /*command*/ 1,
7522                                       /*field*/ 2,
7523                                       /*bit_valid*/ 1,
7524                                       /*bit*/ 2);
7525                 ctl_done((union ctl_io *)ctsio);
7526                 return (CTL_RETVAL_COMPLETE);
7527         }
7528
7529         alloc_len = scsi_4btoul(cdb->length);
7530
7531         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7532
7533         ctsio->kern_sg_entries = 0;
7534
7535         if (total_len < alloc_len) {
7536                 ctsio->residual = alloc_len - total_len;
7537                 ctsio->kern_data_len = total_len;
7538                 ctsio->kern_total_len = total_len;
7539         } else {
7540                 ctsio->residual = 0;
7541                 ctsio->kern_data_len = alloc_len;
7542                 ctsio->kern_total_len = alloc_len;
7543         }
7544         ctsio->kern_data_resid = 0;
7545         ctsio->kern_rel_offset = 0;
7546
7547         switch (cdb->options & RSO_OPTIONS_MASK) {
7548         case RSO_OPTIONS_ALL:
7549                 all = (struct scsi_report_supported_opcodes_all *)
7550                     ctsio->kern_data_ptr;
7551                 num = 0;
7552                 for (i = 0; i < 256; i++) {
7553                         entry = &ctl_cmd_table[i];
7554                         if (entry->flags & CTL_CMD_FLAG_SA5) {
7555                                 for (j = 0; j < 32; j++) {
7556                                         sentry = &((const struct ctl_cmd_entry *)
7557                                             entry->execute)[j];
7558                                         if (!ctl_cmd_applicable(
7559                                             lun->be_lun->lun_type, sentry))
7560                                                 continue;
7561                                         descr = &all->descr[num++];
7562                                         descr->opcode = i;
7563                                         scsi_ulto2b(j, descr->service_action);
7564                                         descr->flags = RSO_SERVACTV;
7565                                         scsi_ulto2b(sentry->length,
7566                                             descr->cdb_length);
7567                                 }
7568                         } else {
7569                                 if (!ctl_cmd_applicable(lun->be_lun->lun_type,
7570                                     entry))
7571                                         continue;
7572                                 descr = &all->descr[num++];
7573                                 descr->opcode = i;
7574                                 scsi_ulto2b(0, descr->service_action);
7575                                 descr->flags = 0;
7576                                 scsi_ulto2b(entry->length, descr->cdb_length);
7577                         }
7578                 }
7579                 scsi_ulto4b(
7580                     num * sizeof(struct scsi_report_supported_opcodes_descr),
7581                     all->length);
7582                 break;
7583         case RSO_OPTIONS_OC:
7584                 one = (struct scsi_report_supported_opcodes_one *)
7585                     ctsio->kern_data_ptr;
7586                 entry = &ctl_cmd_table[opcode];
7587                 goto fill_one;
7588         case RSO_OPTIONS_OC_SA:
7589                 one = (struct scsi_report_supported_opcodes_one *)
7590                     ctsio->kern_data_ptr;
7591                 entry = &ctl_cmd_table[opcode];
7592                 entry = &((const struct ctl_cmd_entry *)
7593                     entry->execute)[service_action];
7594 fill_one:
7595                 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) {
7596                         one->support = 3;
7597                         scsi_ulto2b(entry->length, one->cdb_length);
7598                         one->cdb_usage[0] = opcode;
7599                         memcpy(&one->cdb_usage[1], entry->usage,
7600                             entry->length - 1);
7601                 } else
7602                         one->support = 1;
7603                 break;
7604         }
7605
7606         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7607         ctsio->be_move_done = ctl_config_move_done;
7608
7609         ctl_datamove((union ctl_io *)ctsio);
7610         return(retval);
7611 }
7612
7613 int
7614 ctl_report_supported_tmf(struct ctl_scsiio *ctsio)
7615 {
7616         struct ctl_lun *lun;
7617         struct scsi_report_supported_tmf *cdb;
7618         struct scsi_report_supported_tmf_data *data;
7619         int retval;
7620         int alloc_len, total_len;
7621
7622         CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
7623
7624         cdb = (struct scsi_report_supported_tmf *)ctsio->cdb;
7625         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7626
7627         retval = CTL_RETVAL_COMPLETE;
7628
7629         total_len = sizeof(struct scsi_report_supported_tmf_data);
7630         alloc_len = scsi_4btoul(cdb->length);
7631
7632         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7633
7634         ctsio->kern_sg_entries = 0;
7635
7636         if (total_len < alloc_len) {
7637                 ctsio->residual = alloc_len - total_len;
7638                 ctsio->kern_data_len = total_len;
7639                 ctsio->kern_total_len = total_len;
7640         } else {
7641                 ctsio->residual = 0;
7642                 ctsio->kern_data_len = alloc_len;
7643                 ctsio->kern_total_len = alloc_len;
7644         }
7645         ctsio->kern_data_resid = 0;
7646         ctsio->kern_rel_offset = 0;
7647
7648         data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr;
7649         data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_TRS;
7650         data->byte2 |= RST_ITNRS;
7651
7652         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7653         ctsio->be_move_done = ctl_config_move_done;
7654
7655         ctl_datamove((union ctl_io *)ctsio);
7656         return (retval);
7657 }
7658
7659 int
7660 ctl_report_timestamp(struct ctl_scsiio *ctsio)
7661 {
7662         struct ctl_lun *lun;
7663         struct scsi_report_timestamp *cdb;
7664         struct scsi_report_timestamp_data *data;
7665         struct timeval tv;
7666         int64_t timestamp;
7667         int retval;
7668         int alloc_len, total_len;
7669
7670         CTL_DEBUG_PRINT(("ctl_report_timestamp\n"));
7671
7672         cdb = (struct scsi_report_timestamp *)ctsio->cdb;
7673         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7674
7675         retval = CTL_RETVAL_COMPLETE;
7676
7677         total_len = sizeof(struct scsi_report_timestamp_data);
7678         alloc_len = scsi_4btoul(cdb->length);
7679
7680         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7681
7682         ctsio->kern_sg_entries = 0;
7683
7684         if (total_len < alloc_len) {
7685                 ctsio->residual = alloc_len - total_len;
7686                 ctsio->kern_data_len = total_len;
7687                 ctsio->kern_total_len = total_len;
7688         } else {
7689                 ctsio->residual = 0;
7690                 ctsio->kern_data_len = alloc_len;
7691                 ctsio->kern_total_len = alloc_len;
7692         }
7693         ctsio->kern_data_resid = 0;
7694         ctsio->kern_rel_offset = 0;
7695
7696         data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr;
7697         scsi_ulto2b(sizeof(*data) - 2, data->length);
7698         data->origin = RTS_ORIG_OUTSIDE;
7699         getmicrotime(&tv);
7700         timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000;
7701         scsi_ulto4b(timestamp >> 16, data->timestamp);
7702         scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]);
7703
7704         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7705         ctsio->be_move_done = ctl_config_move_done;
7706
7707         ctl_datamove((union ctl_io *)ctsio);
7708         return (retval);
7709 }
7710
7711 int
7712 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio)
7713 {
7714         struct scsi_per_res_in *cdb;
7715         int alloc_len, total_len = 0;
7716         /* struct scsi_per_res_in_rsrv in_data; */
7717         struct ctl_lun *lun;
7718         struct ctl_softc *softc;
7719
7720         CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n"));
7721
7722         softc = control_softc;
7723
7724         cdb = (struct scsi_per_res_in *)ctsio->cdb;
7725
7726         alloc_len = scsi_2btoul(cdb->length);
7727
7728         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7729
7730 retry:
7731         mtx_lock(&lun->lun_lock);
7732         switch (cdb->action) {
7733         case SPRI_RK: /* read keys */
7734                 total_len = sizeof(struct scsi_per_res_in_keys) +
7735                         lun->pr_key_count *
7736                         sizeof(struct scsi_per_res_key);
7737                 break;
7738         case SPRI_RR: /* read reservation */
7739                 if (lun->flags & CTL_LUN_PR_RESERVED)
7740                         total_len = sizeof(struct scsi_per_res_in_rsrv);
7741                 else
7742                         total_len = sizeof(struct scsi_per_res_in_header);
7743                 break;
7744         case SPRI_RC: /* report capabilities */
7745                 total_len = sizeof(struct scsi_per_res_cap);
7746                 break;
7747         case SPRI_RS: /* read full status */
7748                 total_len = sizeof(struct scsi_per_res_in_header) +
7749                     (sizeof(struct scsi_per_res_in_full_desc) + 256) *
7750                     lun->pr_key_count;
7751                 break;
7752         default:
7753                 panic("Invalid PR type %x", cdb->action);
7754         }
7755         mtx_unlock(&lun->lun_lock);
7756
7757         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7758
7759         if (total_len < alloc_len) {
7760                 ctsio->residual = alloc_len - total_len;
7761                 ctsio->kern_data_len = total_len;
7762                 ctsio->kern_total_len = total_len;
7763         } else {
7764                 ctsio->residual = 0;
7765                 ctsio->kern_data_len = alloc_len;
7766                 ctsio->kern_total_len = alloc_len;
7767         }
7768
7769         ctsio->kern_data_resid = 0;
7770         ctsio->kern_rel_offset = 0;
7771         ctsio->kern_sg_entries = 0;
7772
7773         mtx_lock(&lun->lun_lock);
7774         switch (cdb->action) {
7775         case SPRI_RK: { // read keys
7776         struct scsi_per_res_in_keys *res_keys;
7777                 int i, key_count;
7778
7779                 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr;
7780
7781                 /*
7782                  * We had to drop the lock to allocate our buffer, which
7783                  * leaves time for someone to come in with another
7784                  * persistent reservation.  (That is unlikely, though,
7785                  * since this should be the only persistent reservation
7786                  * command active right now.)
7787                  */
7788                 if (total_len != (sizeof(struct scsi_per_res_in_keys) +
7789                     (lun->pr_key_count *
7790                      sizeof(struct scsi_per_res_key)))){
7791                         mtx_unlock(&lun->lun_lock);
7792                         free(ctsio->kern_data_ptr, M_CTL);
7793                         printf("%s: reservation length changed, retrying\n",
7794                                __func__);
7795                         goto retry;
7796                 }
7797
7798                 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation);
7799
7800                 scsi_ulto4b(sizeof(struct scsi_per_res_key) *
7801                              lun->pr_key_count, res_keys->header.length);
7802
7803                 for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) {
7804                         if (!lun->per_res[i].registered)
7805                                 continue;
7806
7807                         /*
7808                          * We used lun->pr_key_count to calculate the
7809                          * size to allocate.  If it turns out the number of
7810                          * initiators with the registered flag set is
7811                          * larger than that (i.e. they haven't been kept in
7812                          * sync), we've got a problem.
7813                          */
7814                         if (key_count >= lun->pr_key_count) {
7815 #ifdef NEEDTOPORT
7816                                 csevent_log(CSC_CTL | CSC_SHELF_SW |
7817                                             CTL_PR_ERROR,
7818                                             csevent_LogType_Fault,
7819                                             csevent_AlertLevel_Yellow,
7820                                             csevent_FRU_ShelfController,
7821                                             csevent_FRU_Firmware,
7822                                         csevent_FRU_Unknown,
7823                                             "registered keys %d >= key "
7824                                             "count %d", key_count,
7825                                             lun->pr_key_count);
7826 #endif
7827                                 key_count++;
7828                                 continue;
7829                         }
7830                         memcpy(res_keys->keys[key_count].key,
7831                                lun->per_res[i].res_key.key,
7832                                ctl_min(sizeof(res_keys->keys[key_count].key),
7833                                sizeof(lun->per_res[i].res_key)));
7834                         key_count++;
7835                 }
7836                 break;
7837         }
7838         case SPRI_RR: { // read reservation
7839                 struct scsi_per_res_in_rsrv *res;
7840                 int tmp_len, header_only;
7841
7842                 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr;
7843
7844                 scsi_ulto4b(lun->PRGeneration, res->header.generation);
7845
7846                 if (lun->flags & CTL_LUN_PR_RESERVED)
7847                 {
7848                         tmp_len = sizeof(struct scsi_per_res_in_rsrv);
7849                         scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data),
7850                                     res->header.length);
7851                         header_only = 0;
7852                 } else {
7853                         tmp_len = sizeof(struct scsi_per_res_in_header);
7854                         scsi_ulto4b(0, res->header.length);
7855                         header_only = 1;
7856                 }
7857
7858                 /*
7859                  * We had to drop the lock to allocate our buffer, which
7860                  * leaves time for someone to come in with another
7861                  * persistent reservation.  (That is unlikely, though,
7862                  * since this should be the only persistent reservation
7863                  * command active right now.)
7864                  */
7865                 if (tmp_len != total_len) {
7866                         mtx_unlock(&lun->lun_lock);
7867                         free(ctsio->kern_data_ptr, M_CTL);
7868                         printf("%s: reservation status changed, retrying\n",
7869                                __func__);
7870                         goto retry;
7871                 }
7872
7873                 /*
7874                  * No reservation held, so we're done.
7875                  */
7876                 if (header_only != 0)
7877                         break;
7878
7879                 /*
7880                  * If the registration is an All Registrants type, the key
7881                  * is 0, since it doesn't really matter.
7882                  */
7883                 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) {
7884                         memcpy(res->data.reservation,
7885                                &lun->per_res[lun->pr_res_idx].res_key,
7886                                sizeof(struct scsi_per_res_key));
7887                 }
7888                 res->data.scopetype = lun->res_type;
7889                 break;
7890         }
7891         case SPRI_RC:     //report capabilities
7892         {
7893                 struct scsi_per_res_cap *res_cap;
7894                 uint16_t type_mask;
7895
7896                 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr;
7897                 scsi_ulto2b(sizeof(*res_cap), res_cap->length);
7898                 res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_3;
7899                 type_mask = SPRI_TM_WR_EX_AR |
7900                             SPRI_TM_EX_AC_RO |
7901                             SPRI_TM_WR_EX_RO |
7902                             SPRI_TM_EX_AC |
7903                             SPRI_TM_WR_EX |
7904                             SPRI_TM_EX_AC_AR;
7905                 scsi_ulto2b(type_mask, res_cap->type_mask);
7906                 break;
7907         }
7908         case SPRI_RS: { // read full status
7909                 struct scsi_per_res_in_full *res_status;
7910                 struct scsi_per_res_in_full_desc *res_desc;
7911                 struct ctl_port *port;
7912                 int i, len;
7913
7914                 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr;
7915
7916                 /*
7917                  * We had to drop the lock to allocate our buffer, which
7918                  * leaves time for someone to come in with another
7919                  * persistent reservation.  (That is unlikely, though,
7920                  * since this should be the only persistent reservation
7921                  * command active right now.)
7922                  */
7923                 if (total_len < (sizeof(struct scsi_per_res_in_header) +
7924                     (sizeof(struct scsi_per_res_in_full_desc) + 256) *
7925                      lun->pr_key_count)){
7926                         mtx_unlock(&lun->lun_lock);
7927                         free(ctsio->kern_data_ptr, M_CTL);
7928                         printf("%s: reservation length changed, retrying\n",
7929                                __func__);
7930                         goto retry;
7931                 }
7932
7933                 scsi_ulto4b(lun->PRGeneration, res_status->header.generation);
7934
7935                 res_desc = &res_status->desc[0];
7936                 for (i = 0; i < 2*CTL_MAX_INITIATORS; i++) {
7937                         if (!lun->per_res[i].registered)
7938                                 continue;
7939
7940                         memcpy(&res_desc->res_key, &lun->per_res[i].res_key.key,
7941                             sizeof(res_desc->res_key));
7942                         if ((lun->flags & CTL_LUN_PR_RESERVED) &&
7943                             (lun->pr_res_idx == i ||
7944                              lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) {
7945                                 res_desc->flags = SPRI_FULL_R_HOLDER;
7946                                 res_desc->scopetype = lun->res_type;
7947                         }
7948                         scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT,
7949                             res_desc->rel_trgt_port_id);
7950                         len = 0;
7951                         port = softc->ctl_ports[
7952                             ctl_port_idx(i / CTL_MAX_INIT_PER_PORT)];
7953                         if (port != NULL)
7954                                 len = ctl_create_iid(port,
7955                                     i % CTL_MAX_INIT_PER_PORT,
7956                                     res_desc->transport_id);
7957                         scsi_ulto4b(len, res_desc->additional_length);
7958                         res_desc = (struct scsi_per_res_in_full_desc *)
7959                             &res_desc->transport_id[len];
7960                 }
7961                 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0],
7962                     res_status->header.length);
7963                 break;
7964         }
7965         default:
7966                 /*
7967                  * This is a bug, because we just checked for this above,
7968                  * and should have returned an error.
7969                  */
7970                 panic("Invalid PR type %x", cdb->action);
7971                 break; /* NOTREACHED */
7972         }
7973         mtx_unlock(&lun->lun_lock);
7974
7975         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7976         ctsio->be_move_done = ctl_config_move_done;
7977
7978         CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n",
7979                          ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1],
7980                          ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3],
7981                          ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5],
7982                          ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7]));
7983
7984         ctl_datamove((union ctl_io *)ctsio);
7985
7986         return (CTL_RETVAL_COMPLETE);
7987 }
7988
7989 /*
7990  * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if
7991  * it should return.
7992  */
7993 static int
7994 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
7995                 uint64_t sa_res_key, uint8_t type, uint32_t residx,
7996                 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb,
7997                 struct scsi_per_res_out_parms* param)
7998 {
7999         union ctl_ha_msg persis_io;
8000         int retval, i;
8001         int isc_retval;
8002
8003         retval = 0;
8004
8005         mtx_lock(&lun->lun_lock);
8006         if (sa_res_key == 0) {
8007                 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
8008                         /* validate scope and type */
8009                         if ((cdb->scope_type & SPR_SCOPE_MASK) !=
8010                              SPR_LU_SCOPE) {
8011                                 mtx_unlock(&lun->lun_lock);
8012                                 ctl_set_invalid_field(/*ctsio*/ ctsio,
8013                                                       /*sks_valid*/ 1,
8014                                                       /*command*/ 1,
8015                                                       /*field*/ 2,
8016                                                       /*bit_valid*/ 1,
8017                                                       /*bit*/ 4);
8018                                 ctl_done((union ctl_io *)ctsio);
8019                                 return (1);
8020                         }
8021
8022                         if (type>8 || type==2 || type==4 || type==0) {
8023                                 mtx_unlock(&lun->lun_lock);
8024                                 ctl_set_invalid_field(/*ctsio*/ ctsio,
8025                                                       /*sks_valid*/ 1,
8026                                                       /*command*/ 1,
8027                                                       /*field*/ 2,
8028                                                       /*bit_valid*/ 1,
8029                                                       /*bit*/ 0);
8030                                 ctl_done((union ctl_io *)ctsio);
8031                                 return (1);
8032                         }
8033
8034                         /* temporarily unregister this nexus */
8035                         lun->per_res[residx].registered = 0;
8036
8037                         /*
8038                          * Unregister everybody else and build UA for
8039                          * them
8040                          */
8041                         for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
8042                                 if (lun->per_res[i].registered == 0)
8043                                         continue;
8044
8045                                 if (!persis_offset
8046                                  && i <CTL_MAX_INITIATORS)
8047                                         lun->pending_ua[i] |=
8048                                                 CTL_UA_REG_PREEMPT;
8049                                 else if (persis_offset
8050                                       && i >= persis_offset)
8051                                         lun->pending_ua[i-persis_offset] |=
8052                                                 CTL_UA_REG_PREEMPT;
8053                                 lun->per_res[i].registered = 0;
8054                                 memset(&lun->per_res[i].res_key, 0,
8055                                        sizeof(struct scsi_per_res_key));
8056                         }
8057                         lun->per_res[residx].registered = 1;
8058                         lun->pr_key_count = 1;
8059                         lun->res_type = type;
8060                         if (lun->res_type != SPR_TYPE_WR_EX_AR
8061                          && lun->res_type != SPR_TYPE_EX_AC_AR)
8062                                 lun->pr_res_idx = residx;
8063
8064                         /* send msg to other side */
8065                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8066                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8067                         persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
8068                         persis_io.pr.pr_info.residx = lun->pr_res_idx;
8069                         persis_io.pr.pr_info.res_type = type;
8070                         memcpy(persis_io.pr.pr_info.sa_res_key,
8071                                param->serv_act_res_key,
8072                                sizeof(param->serv_act_res_key));
8073                         if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8074                              &persis_io, sizeof(persis_io), 0)) >
8075                              CTL_HA_STATUS_SUCCESS) {
8076                                 printf("CTL:Persis Out error returned "
8077                                        "from ctl_ha_msg_send %d\n",
8078                                        isc_retval);
8079                         }
8080                 } else {
8081                         /* not all registrants */
8082                         mtx_unlock(&lun->lun_lock);
8083                         free(ctsio->kern_data_ptr, M_CTL);
8084                         ctl_set_invalid_field(ctsio,
8085                                               /*sks_valid*/ 1,
8086                                               /*command*/ 0,
8087                                               /*field*/ 8,
8088                                               /*bit_valid*/ 0,
8089                                               /*bit*/ 0);
8090                         ctl_done((union ctl_io *)ctsio);
8091                         return (1);
8092                 }
8093         } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS
8094                 || !(lun->flags & CTL_LUN_PR_RESERVED)) {
8095                 int found = 0;
8096
8097                 if (res_key == sa_res_key) {
8098                         /* special case */
8099                         /*
8100                          * The spec implies this is not good but doesn't
8101                          * say what to do. There are two choices either
8102                          * generate a res conflict or check condition
8103                          * with illegal field in parameter data. Since
8104                          * that is what is done when the sa_res_key is
8105                          * zero I'll take that approach since this has
8106                          * to do with the sa_res_key.
8107                          */
8108                         mtx_unlock(&lun->lun_lock);
8109                         free(ctsio->kern_data_ptr, M_CTL);
8110                         ctl_set_invalid_field(ctsio,
8111                                               /*sks_valid*/ 1,
8112                                               /*command*/ 0,
8113                                               /*field*/ 8,
8114                                               /*bit_valid*/ 0,
8115                                               /*bit*/ 0);
8116                         ctl_done((union ctl_io *)ctsio);
8117                         return (1);
8118                 }
8119
8120                 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
8121                         if (lun->per_res[i].registered
8122                          && memcmp(param->serv_act_res_key,
8123                             lun->per_res[i].res_key.key,
8124                             sizeof(struct scsi_per_res_key)) != 0)
8125                                 continue;
8126
8127                         found = 1;
8128                         lun->per_res[i].registered = 0;
8129                         memset(&lun->per_res[i].res_key, 0,
8130                                sizeof(struct scsi_per_res_key));
8131                         lun->pr_key_count--;
8132
8133                         if (!persis_offset && i < CTL_MAX_INITIATORS)
8134                                 lun->pending_ua[i] |= CTL_UA_REG_PREEMPT;
8135                         else if (persis_offset && i >= persis_offset)
8136                                 lun->pending_ua[i-persis_offset] |=
8137                                         CTL_UA_REG_PREEMPT;
8138                 }
8139                 if (!found) {
8140                         mtx_unlock(&lun->lun_lock);
8141                         free(ctsio->kern_data_ptr, M_CTL);
8142                         ctl_set_reservation_conflict(ctsio);
8143                         ctl_done((union ctl_io *)ctsio);
8144                         return (CTL_RETVAL_COMPLETE);
8145                 }
8146                 /* send msg to other side */
8147                 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8148                 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8149                 persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
8150                 persis_io.pr.pr_info.residx = lun->pr_res_idx;
8151                 persis_io.pr.pr_info.res_type = type;
8152                 memcpy(persis_io.pr.pr_info.sa_res_key,
8153                        param->serv_act_res_key,
8154                        sizeof(param->serv_act_res_key));
8155                 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8156                      &persis_io, sizeof(persis_io), 0)) >
8157                      CTL_HA_STATUS_SUCCESS) {
8158                         printf("CTL:Persis Out error returned from "
8159                                "ctl_ha_msg_send %d\n", isc_retval);
8160                 }
8161         } else {
8162                 /* Reserved but not all registrants */
8163                 /* sa_res_key is res holder */
8164                 if (memcmp(param->serv_act_res_key,
8165                    lun->per_res[lun->pr_res_idx].res_key.key,
8166                    sizeof(struct scsi_per_res_key)) == 0) {
8167                         /* validate scope and type */
8168                         if ((cdb->scope_type & SPR_SCOPE_MASK) !=
8169                              SPR_LU_SCOPE) {
8170                                 mtx_unlock(&lun->lun_lock);
8171                                 ctl_set_invalid_field(/*ctsio*/ ctsio,
8172                                                       /*sks_valid*/ 1,
8173                                                       /*command*/ 1,
8174                                                       /*field*/ 2,
8175                                                       /*bit_valid*/ 1,
8176                                                       /*bit*/ 4);
8177                                 ctl_done((union ctl_io *)ctsio);
8178                                 return (1);
8179                         }
8180
8181                         if (type>8 || type==2 || type==4 || type==0) {
8182                                 mtx_unlock(&lun->lun_lock);
8183                                 ctl_set_invalid_field(/*ctsio*/ ctsio,
8184                                                       /*sks_valid*/ 1,
8185                                                       /*command*/ 1,
8186                                                       /*field*/ 2,
8187                                                       /*bit_valid*/ 1,
8188                                                       /*bit*/ 0);
8189                                 ctl_done((union ctl_io *)ctsio);
8190                                 return (1);
8191                         }
8192
8193                         /*
8194                          * Do the following:
8195                          * if sa_res_key != res_key remove all
8196                          * registrants w/sa_res_key and generate UA
8197                          * for these registrants(Registrations
8198                          * Preempted) if it wasn't an exclusive
8199                          * reservation generate UA(Reservations
8200                          * Preempted) for all other registered nexuses
8201                          * if the type has changed. Establish the new
8202                          * reservation and holder. If res_key and
8203                          * sa_res_key are the same do the above
8204                          * except don't unregister the res holder.
8205                          */
8206
8207                         /*
8208                          * Temporarily unregister so it won't get
8209                          * removed or UA generated
8210                          */
8211                         lun->per_res[residx].registered = 0;
8212                         for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
8213                                 if (lun->per_res[i].registered == 0)
8214                                         continue;
8215
8216                                 if (memcmp(param->serv_act_res_key,
8217                                     lun->per_res[i].res_key.key,
8218                                     sizeof(struct scsi_per_res_key)) == 0) {
8219                                         lun->per_res[i].registered = 0;
8220                                         memset(&lun->per_res[i].res_key,
8221                                                0,
8222                                                sizeof(struct scsi_per_res_key));
8223                                         lun->pr_key_count--;
8224
8225                                         if (!persis_offset
8226                                          && i < CTL_MAX_INITIATORS)
8227                                                 lun->pending_ua[i] |=
8228                                                         CTL_UA_REG_PREEMPT;
8229                                         else if (persis_offset
8230                                               && i >= persis_offset)
8231                                                 lun->pending_ua[i-persis_offset] |=
8232                                                   CTL_UA_REG_PREEMPT;
8233                                 } else if (type != lun->res_type
8234                                         && (lun->res_type == SPR_TYPE_WR_EX_RO
8235                                          || lun->res_type ==SPR_TYPE_EX_AC_RO)){
8236                                                 if (!persis_offset
8237                                                  && i < CTL_MAX_INITIATORS)
8238                                                         lun->pending_ua[i] |=
8239                                                         CTL_UA_RES_RELEASE;
8240                                                 else if (persis_offset
8241                                                       && i >= persis_offset)
8242                                                         lun->pending_ua[
8243                                                         i-persis_offset] |=
8244                                                         CTL_UA_RES_RELEASE;
8245                                 }
8246                         }
8247                         lun->per_res[residx].registered = 1;
8248                         lun->res_type = type;
8249                         if (lun->res_type != SPR_TYPE_WR_EX_AR
8250                          && lun->res_type != SPR_TYPE_EX_AC_AR)
8251                                 lun->pr_res_idx = residx;
8252                         else
8253                                 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
8254
8255                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8256                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8257                         persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
8258                         persis_io.pr.pr_info.residx = lun->pr_res_idx;
8259                         persis_io.pr.pr_info.res_type = type;
8260                         memcpy(persis_io.pr.pr_info.sa_res_key,
8261                                param->serv_act_res_key,
8262                                sizeof(param->serv_act_res_key));
8263                         if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8264                              &persis_io, sizeof(persis_io), 0)) >
8265                              CTL_HA_STATUS_SUCCESS) {
8266                                 printf("CTL:Persis Out error returned "
8267                                        "from ctl_ha_msg_send %d\n",
8268                                        isc_retval);
8269                         }
8270                 } else {
8271                         /*
8272                          * sa_res_key is not the res holder just
8273                          * remove registrants
8274                          */
8275                         int found=0;
8276
8277                         for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
8278                                 if (memcmp(param->serv_act_res_key,
8279                                     lun->per_res[i].res_key.key,
8280                                     sizeof(struct scsi_per_res_key)) != 0)
8281                                         continue;
8282
8283                                 found = 1;
8284                                 lun->per_res[i].registered = 0;
8285                                 memset(&lun->per_res[i].res_key, 0,
8286                                        sizeof(struct scsi_per_res_key));
8287                                 lun->pr_key_count--;
8288
8289                                 if (!persis_offset
8290                                  && i < CTL_MAX_INITIATORS)
8291                                         lun->pending_ua[i] |=
8292                                                 CTL_UA_REG_PREEMPT;
8293                                 else if (persis_offset
8294                                       && i >= persis_offset)
8295                                         lun->pending_ua[i-persis_offset] |=
8296                                                 CTL_UA_REG_PREEMPT;
8297                         }
8298
8299                         if (!found) {
8300                                 mtx_unlock(&lun->lun_lock);
8301                                 free(ctsio->kern_data_ptr, M_CTL);
8302                                 ctl_set_reservation_conflict(ctsio);
8303                                 ctl_done((union ctl_io *)ctsio);
8304                                 return (1);
8305                         }
8306                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8307                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8308                         persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
8309                         persis_io.pr.pr_info.residx = lun->pr_res_idx;
8310                         persis_io.pr.pr_info.res_type = type;
8311                         memcpy(persis_io.pr.pr_info.sa_res_key,
8312                                param->serv_act_res_key,
8313                                sizeof(param->serv_act_res_key));
8314                         if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8315                              &persis_io, sizeof(persis_io), 0)) >
8316                              CTL_HA_STATUS_SUCCESS) {
8317                                 printf("CTL:Persis Out error returned "
8318                                        "from ctl_ha_msg_send %d\n",
8319                                 isc_retval);
8320                         }
8321                 }
8322         }
8323
8324         lun->PRGeneration++;
8325         mtx_unlock(&lun->lun_lock);
8326
8327         return (retval);
8328 }
8329
8330 static void
8331 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
8332 {
8333         int i;
8334
8335         if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS
8336          || lun->pr_res_idx == CTL_PR_NO_RESERVATION
8337          || memcmp(&lun->per_res[lun->pr_res_idx].res_key,
8338                    msg->pr.pr_info.sa_res_key,
8339                    sizeof(struct scsi_per_res_key)) != 0) {
8340                 uint64_t sa_res_key;
8341                 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key);
8342
8343                 if (sa_res_key == 0) {
8344                         /* temporarily unregister this nexus */
8345                         lun->per_res[msg->pr.pr_info.residx].registered = 0;
8346
8347                         /*
8348                          * Unregister everybody else and build UA for
8349                          * them
8350                          */
8351                         for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
8352                                 if (lun->per_res[i].registered == 0)
8353                                         continue;
8354
8355                                 if (!persis_offset
8356                                  && i < CTL_MAX_INITIATORS)
8357                                         lun->pending_ua[i] |=
8358                                                 CTL_UA_REG_PREEMPT;
8359                                 else if (persis_offset && i >= persis_offset)
8360                                         lun->pending_ua[i - persis_offset] |=
8361                                                 CTL_UA_REG_PREEMPT;
8362                                 lun->per_res[i].registered = 0;
8363                                 memset(&lun->per_res[i].res_key, 0,
8364                                        sizeof(struct scsi_per_res_key));
8365                         }
8366
8367                         lun->per_res[msg->pr.pr_info.residx].registered = 1;
8368                         lun->pr_key_count = 1;
8369                         lun->res_type = msg->pr.pr_info.res_type;
8370                         if (lun->res_type != SPR_TYPE_WR_EX_AR
8371                          && lun->res_type != SPR_TYPE_EX_AC_AR)
8372                                 lun->pr_res_idx = msg->pr.pr_info.residx;
8373                 } else {
8374                         for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
8375                                 if (memcmp(msg->pr.pr_info.sa_res_key,
8376                                    lun->per_res[i].res_key.key,
8377                                    sizeof(struct scsi_per_res_key)) != 0)
8378                                         continue;
8379
8380                                 lun->per_res[i].registered = 0;
8381                                 memset(&lun->per_res[i].res_key, 0,
8382                                        sizeof(struct scsi_per_res_key));
8383                                 lun->pr_key_count--;
8384
8385                                 if (!persis_offset
8386                                  && i < persis_offset)
8387                                         lun->pending_ua[i] |=
8388                                                 CTL_UA_REG_PREEMPT;
8389                                 else if (persis_offset
8390                                       && i >= persis_offset)
8391                                         lun->pending_ua[i - persis_offset] |=
8392                                                 CTL_UA_REG_PREEMPT;
8393                         }
8394                 }
8395         } else {
8396                 /*
8397                  * Temporarily unregister so it won't get removed
8398                  * or UA generated
8399                  */
8400                 lun->per_res[msg->pr.pr_info.residx].registered = 0;
8401                 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
8402                         if (lun->per_res[i].registered == 0)
8403                                 continue;
8404
8405                         if (memcmp(msg->pr.pr_info.sa_res_key,
8406                            lun->per_res[i].res_key.key,
8407                            sizeof(struct scsi_per_res_key)) == 0) {
8408                                 lun->per_res[i].registered = 0;
8409                                 memset(&lun->per_res[i].res_key, 0,
8410                                        sizeof(struct scsi_per_res_key));
8411                                 lun->pr_key_count--;
8412                                 if (!persis_offset
8413                                  && i < CTL_MAX_INITIATORS)
8414                                         lun->pending_ua[i] |=
8415                                                 CTL_UA_REG_PREEMPT;
8416                                 else if (persis_offset
8417                                       && i >= persis_offset)
8418                                         lun->pending_ua[i - persis_offset] |=
8419                                                 CTL_UA_REG_PREEMPT;
8420                         } else if (msg->pr.pr_info.res_type != lun->res_type
8421                                 && (lun->res_type == SPR_TYPE_WR_EX_RO
8422                                  || lun->res_type == SPR_TYPE_EX_AC_RO)) {
8423                                         if (!persis_offset
8424                                          && i < persis_offset)
8425                                                 lun->pending_ua[i] |=
8426                                                         CTL_UA_RES_RELEASE;
8427                                         else if (persis_offset
8428                                               && i >= persis_offset)
8429                                         lun->pending_ua[i - persis_offset] |=
8430                                                 CTL_UA_RES_RELEASE;
8431                         }
8432                 }
8433                 lun->per_res[msg->pr.pr_info.residx].registered = 1;
8434                 lun->res_type = msg->pr.pr_info.res_type;
8435                 if (lun->res_type != SPR_TYPE_WR_EX_AR
8436                  && lun->res_type != SPR_TYPE_EX_AC_AR)
8437                         lun->pr_res_idx = msg->pr.pr_info.residx;
8438                 else
8439                         lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
8440         }
8441         lun->PRGeneration++;
8442
8443 }
8444
8445
8446 int
8447 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
8448 {
8449         int retval;
8450         int isc_retval;
8451         u_int32_t param_len;
8452         struct scsi_per_res_out *cdb;
8453         struct ctl_lun *lun;
8454         struct scsi_per_res_out_parms* param;
8455         struct ctl_softc *softc;
8456         uint32_t residx;
8457         uint64_t res_key, sa_res_key;
8458         uint8_t type;
8459         union ctl_ha_msg persis_io;
8460         int    i;
8461
8462         CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n"));
8463
8464         retval = CTL_RETVAL_COMPLETE;
8465
8466         softc = control_softc;
8467
8468         cdb = (struct scsi_per_res_out *)ctsio->cdb;
8469         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8470
8471         /*
8472          * We only support whole-LUN scope.  The scope & type are ignored for
8473          * register, register and ignore existing key and clear.
8474          * We sometimes ignore scope and type on preempts too!!
8475          * Verify reservation type here as well.
8476          */
8477         type = cdb->scope_type & SPR_TYPE_MASK;
8478         if ((cdb->action == SPRO_RESERVE)
8479          || (cdb->action == SPRO_RELEASE)) {
8480                 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) {
8481                         ctl_set_invalid_field(/*ctsio*/ ctsio,
8482                                               /*sks_valid*/ 1,
8483                                               /*command*/ 1,
8484                                               /*field*/ 2,
8485                                               /*bit_valid*/ 1,
8486                                               /*bit*/ 4);
8487                         ctl_done((union ctl_io *)ctsio);
8488                         return (CTL_RETVAL_COMPLETE);
8489                 }
8490
8491                 if (type>8 || type==2 || type==4 || type==0) {
8492                         ctl_set_invalid_field(/*ctsio*/ ctsio,
8493                                               /*sks_valid*/ 1,
8494                                               /*command*/ 1,
8495                                               /*field*/ 2,
8496                                               /*bit_valid*/ 1,
8497                                               /*bit*/ 0);
8498                         ctl_done((union ctl_io *)ctsio);
8499                         return (CTL_RETVAL_COMPLETE);
8500                 }
8501         }
8502
8503         param_len = scsi_4btoul(cdb->length);
8504
8505         if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
8506                 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
8507                 ctsio->kern_data_len = param_len;
8508                 ctsio->kern_total_len = param_len;
8509                 ctsio->kern_data_resid = 0;
8510                 ctsio->kern_rel_offset = 0;
8511                 ctsio->kern_sg_entries = 0;
8512                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
8513                 ctsio->be_move_done = ctl_config_move_done;
8514                 ctl_datamove((union ctl_io *)ctsio);
8515
8516                 return (CTL_RETVAL_COMPLETE);
8517         }
8518
8519         param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr;
8520
8521         residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
8522         res_key = scsi_8btou64(param->res_key.key);
8523         sa_res_key = scsi_8btou64(param->serv_act_res_key);
8524
8525         /*
8526          * Validate the reservation key here except for SPRO_REG_IGNO
8527          * This must be done for all other service actions
8528          */
8529         if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) {
8530                 mtx_lock(&lun->lun_lock);
8531                 if (lun->per_res[residx].registered) {
8532                     if (memcmp(param->res_key.key,
8533                                lun->per_res[residx].res_key.key,
8534                                ctl_min(sizeof(param->res_key),
8535                                sizeof(lun->per_res[residx].res_key))) != 0) {
8536                                 /*
8537                                  * The current key passed in doesn't match
8538                                  * the one the initiator previously
8539                                  * registered.
8540                                  */
8541                                 mtx_unlock(&lun->lun_lock);
8542                                 free(ctsio->kern_data_ptr, M_CTL);
8543                                 ctl_set_reservation_conflict(ctsio);
8544                                 ctl_done((union ctl_io *)ctsio);
8545                                 return (CTL_RETVAL_COMPLETE);
8546                         }
8547                 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) {
8548                         /*
8549                          * We are not registered
8550                          */
8551                         mtx_unlock(&lun->lun_lock);
8552                         free(ctsio->kern_data_ptr, M_CTL);
8553                         ctl_set_reservation_conflict(ctsio);
8554                         ctl_done((union ctl_io *)ctsio);
8555                         return (CTL_RETVAL_COMPLETE);
8556                 } else if (res_key != 0) {
8557                         /*
8558                          * We are not registered and trying to register but
8559                          * the register key isn't zero.
8560                          */
8561                         mtx_unlock(&lun->lun_lock);
8562                         free(ctsio->kern_data_ptr, M_CTL);
8563                         ctl_set_reservation_conflict(ctsio);
8564                         ctl_done((union ctl_io *)ctsio);
8565                         return (CTL_RETVAL_COMPLETE);
8566                 }
8567                 mtx_unlock(&lun->lun_lock);
8568         }
8569
8570         switch (cdb->action & SPRO_ACTION_MASK) {
8571         case SPRO_REGISTER:
8572         case SPRO_REG_IGNO: {
8573
8574 #if 0
8575                 printf("Registration received\n");
8576 #endif
8577
8578                 /*
8579                  * We don't support any of these options, as we report in
8580                  * the read capabilities request (see
8581                  * ctl_persistent_reserve_in(), above).
8582                  */
8583                 if ((param->flags & SPR_SPEC_I_PT)
8584                  || (param->flags & SPR_ALL_TG_PT)
8585                  || (param->flags & SPR_APTPL)) {
8586                         int bit_ptr;
8587
8588                         if (param->flags & SPR_APTPL)
8589                                 bit_ptr = 0;
8590                         else if (param->flags & SPR_ALL_TG_PT)
8591                                 bit_ptr = 2;
8592                         else /* SPR_SPEC_I_PT */
8593                                 bit_ptr = 3;
8594
8595                         free(ctsio->kern_data_ptr, M_CTL);
8596                         ctl_set_invalid_field(ctsio,
8597                                               /*sks_valid*/ 1,
8598                                               /*command*/ 0,
8599                                               /*field*/ 20,
8600                                               /*bit_valid*/ 1,
8601                                               /*bit*/ bit_ptr);
8602                         ctl_done((union ctl_io *)ctsio);
8603                         return (CTL_RETVAL_COMPLETE);
8604                 }
8605
8606                 mtx_lock(&lun->lun_lock);
8607
8608                 /*
8609                  * The initiator wants to clear the
8610                  * key/unregister.
8611                  */
8612                 if (sa_res_key == 0) {
8613                         if ((res_key == 0
8614                           && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER)
8615                          || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO
8616                           && !lun->per_res[residx].registered)) {
8617                                 mtx_unlock(&lun->lun_lock);
8618                                 goto done;
8619                         }
8620
8621                         lun->per_res[residx].registered = 0;
8622                         memset(&lun->per_res[residx].res_key,
8623                                0, sizeof(lun->per_res[residx].res_key));
8624                         lun->pr_key_count--;
8625
8626                         if (residx == lun->pr_res_idx) {
8627                                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8628                                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8629
8630                                 if ((lun->res_type == SPR_TYPE_WR_EX_RO
8631                                   || lun->res_type == SPR_TYPE_EX_AC_RO)
8632                                  && lun->pr_key_count) {
8633                                         /*
8634                                          * If the reservation is a registrants
8635                                          * only type we need to generate a UA
8636                                          * for other registered inits.  The
8637                                          * sense code should be RESERVATIONS
8638                                          * RELEASED
8639                                          */
8640
8641                                         for (i = 0; i < CTL_MAX_INITIATORS;i++){
8642                                                 if (lun->per_res[
8643                                                     i+persis_offset].registered
8644                                                     == 0)
8645                                                         continue;
8646                                                 lun->pending_ua[i] |=
8647                                                         CTL_UA_RES_RELEASE;
8648                                         }
8649                                 }
8650                                 lun->res_type = 0;
8651                         } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
8652                                 if (lun->pr_key_count==0) {
8653                                         lun->flags &= ~CTL_LUN_PR_RESERVED;
8654                                         lun->res_type = 0;
8655                                         lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8656                                 }
8657                         }
8658                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8659                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8660                         persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY;
8661                         persis_io.pr.pr_info.residx = residx;
8662                         if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8663                              &persis_io, sizeof(persis_io), 0 )) >
8664                              CTL_HA_STATUS_SUCCESS) {
8665                                 printf("CTL:Persis Out error returned from "
8666                                        "ctl_ha_msg_send %d\n", isc_retval);
8667                         }
8668                 } else /* sa_res_key != 0 */ {
8669
8670                         /*
8671                          * If we aren't registered currently then increment
8672                          * the key count and set the registered flag.
8673                          */
8674                         if (!lun->per_res[residx].registered) {
8675                                 lun->pr_key_count++;
8676                                 lun->per_res[residx].registered = 1;
8677                         }
8678
8679                         memcpy(&lun->per_res[residx].res_key,
8680                                param->serv_act_res_key,
8681                                ctl_min(sizeof(param->serv_act_res_key),
8682                                sizeof(lun->per_res[residx].res_key)));
8683
8684                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8685                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8686                         persis_io.pr.pr_info.action = CTL_PR_REG_KEY;
8687                         persis_io.pr.pr_info.residx = residx;
8688                         memcpy(persis_io.pr.pr_info.sa_res_key,
8689                                param->serv_act_res_key,
8690                                sizeof(param->serv_act_res_key));
8691                         if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8692                              &persis_io, sizeof(persis_io), 0)) >
8693                              CTL_HA_STATUS_SUCCESS) {
8694                                 printf("CTL:Persis Out error returned from "
8695                                        "ctl_ha_msg_send %d\n", isc_retval);
8696                         }
8697                 }
8698                 lun->PRGeneration++;
8699                 mtx_unlock(&lun->lun_lock);
8700
8701                 break;
8702         }
8703         case SPRO_RESERVE:
8704 #if 0
8705                 printf("Reserve executed type %d\n", type);
8706 #endif
8707                 mtx_lock(&lun->lun_lock);
8708                 if (lun->flags & CTL_LUN_PR_RESERVED) {
8709                         /*
8710                          * if this isn't the reservation holder and it's
8711                          * not a "all registrants" type or if the type is
8712                          * different then we have a conflict
8713                          */
8714                         if ((lun->pr_res_idx != residx
8715                           && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS)
8716                          || lun->res_type != type) {
8717                                 mtx_unlock(&lun->lun_lock);
8718                                 free(ctsio->kern_data_ptr, M_CTL);
8719                                 ctl_set_reservation_conflict(ctsio);
8720                                 ctl_done((union ctl_io *)ctsio);
8721                                 return (CTL_RETVAL_COMPLETE);
8722                         }
8723                         mtx_unlock(&lun->lun_lock);
8724                 } else /* create a reservation */ {
8725                         /*
8726                          * If it's not an "all registrants" type record
8727                          * reservation holder
8728                          */
8729                         if (type != SPR_TYPE_WR_EX_AR
8730                          && type != SPR_TYPE_EX_AC_AR)
8731                                 lun->pr_res_idx = residx; /* Res holder */
8732                         else
8733                                 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
8734
8735                         lun->flags |= CTL_LUN_PR_RESERVED;
8736                         lun->res_type = type;
8737
8738                         mtx_unlock(&lun->lun_lock);
8739
8740                         /* send msg to other side */
8741                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8742                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8743                         persis_io.pr.pr_info.action = CTL_PR_RESERVE;
8744                         persis_io.pr.pr_info.residx = lun->pr_res_idx;
8745                         persis_io.pr.pr_info.res_type = type;
8746                         if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8747                              &persis_io, sizeof(persis_io), 0)) >
8748                              CTL_HA_STATUS_SUCCESS) {
8749                                 printf("CTL:Persis Out error returned from "
8750                                        "ctl_ha_msg_send %d\n", isc_retval);
8751                         }
8752                 }
8753                 break;
8754
8755         case SPRO_RELEASE:
8756                 mtx_lock(&lun->lun_lock);
8757                 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) {
8758                         /* No reservation exists return good status */
8759                         mtx_unlock(&lun->lun_lock);
8760                         goto done;
8761                 }
8762                 /*
8763                  * Is this nexus a reservation holder?
8764                  */
8765                 if (lun->pr_res_idx != residx
8766                  && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) {
8767                         /*
8768                          * not a res holder return good status but
8769                          * do nothing
8770                          */
8771                         mtx_unlock(&lun->lun_lock);
8772                         goto done;
8773                 }
8774
8775                 if (lun->res_type != type) {
8776                         mtx_unlock(&lun->lun_lock);
8777                         free(ctsio->kern_data_ptr, M_CTL);
8778                         ctl_set_illegal_pr_release(ctsio);
8779                         ctl_done((union ctl_io *)ctsio);
8780                         return (CTL_RETVAL_COMPLETE);
8781                 }
8782
8783                 /* okay to release */
8784                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8785                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8786                 lun->res_type = 0;
8787
8788                 /*
8789                  * if this isn't an exclusive access
8790                  * res generate UA for all other
8791                  * registrants.
8792                  */
8793                 if (type != SPR_TYPE_EX_AC
8794                  && type != SPR_TYPE_WR_EX) {
8795                         /*
8796                          * temporarily unregister so we don't generate UA
8797                          */
8798                         lun->per_res[residx].registered = 0;
8799
8800                         for (i = 0; i < CTL_MAX_INITIATORS; i++) {
8801                                 if (lun->per_res[i+persis_offset].registered
8802                                     == 0)
8803                                         continue;
8804                                 lun->pending_ua[i] |=
8805                                         CTL_UA_RES_RELEASE;
8806                         }
8807
8808                         lun->per_res[residx].registered = 1;
8809                 }
8810                 mtx_unlock(&lun->lun_lock);
8811                 /* Send msg to other side */
8812                 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8813                 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8814                 persis_io.pr.pr_info.action = CTL_PR_RELEASE;
8815                 if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io,
8816                      sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) {
8817                         printf("CTL:Persis Out error returned from "
8818                                "ctl_ha_msg_send %d\n", isc_retval);
8819                 }
8820                 break;
8821
8822         case SPRO_CLEAR:
8823                 /* send msg to other side */
8824
8825                 mtx_lock(&lun->lun_lock);
8826                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8827                 lun->res_type = 0;
8828                 lun->pr_key_count = 0;
8829                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8830
8831
8832                 memset(&lun->per_res[residx].res_key,
8833                        0, sizeof(lun->per_res[residx].res_key));
8834                 lun->per_res[residx].registered = 0;
8835
8836                 for (i=0; i < 2*CTL_MAX_INITIATORS; i++)
8837                         if (lun->per_res[i].registered) {
8838                                 if (!persis_offset && i < CTL_MAX_INITIATORS)
8839                                         lun->pending_ua[i] |=
8840                                                 CTL_UA_RES_PREEMPT;
8841                                 else if (persis_offset && i >= persis_offset)
8842                                         lun->pending_ua[i-persis_offset] |=
8843                                             CTL_UA_RES_PREEMPT;
8844
8845                                 memset(&lun->per_res[i].res_key,
8846                                        0, sizeof(struct scsi_per_res_key));
8847                                 lun->per_res[i].registered = 0;
8848                         }
8849                 lun->PRGeneration++;
8850                 mtx_unlock(&lun->lun_lock);
8851                 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8852                 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8853                 persis_io.pr.pr_info.action = CTL_PR_CLEAR;
8854                 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8855                      sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) {
8856                         printf("CTL:Persis Out error returned from "
8857                                "ctl_ha_msg_send %d\n", isc_retval);
8858                 }
8859                 break;
8860
8861         case SPRO_PREEMPT: {
8862                 int nretval;
8863
8864                 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type,
8865                                           residx, ctsio, cdb, param);
8866                 if (nretval != 0)
8867                         return (CTL_RETVAL_COMPLETE);
8868                 break;
8869         }
8870         default:
8871                 panic("Invalid PR type %x", cdb->action);
8872         }
8873
8874 done:
8875         free(ctsio->kern_data_ptr, M_CTL);
8876         ctl_set_success(ctsio);
8877         ctl_done((union ctl_io *)ctsio);
8878
8879         return (retval);
8880 }
8881
8882 /*
8883  * This routine is for handling a message from the other SC pertaining to
8884  * persistent reserve out. All the error checking will have been done
8885  * so only perorming the action need be done here to keep the two
8886  * in sync.
8887  */
8888 static void
8889 ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
8890 {
8891         struct ctl_lun *lun;
8892         struct ctl_softc *softc;
8893         int i;
8894         uint32_t targ_lun;
8895
8896         softc = control_softc;
8897
8898         targ_lun = msg->hdr.nexus.targ_mapped_lun;
8899         lun = softc->ctl_luns[targ_lun];
8900         mtx_lock(&lun->lun_lock);
8901         switch(msg->pr.pr_info.action) {
8902         case CTL_PR_REG_KEY:
8903                 if (!lun->per_res[msg->pr.pr_info.residx].registered) {
8904                         lun->per_res[msg->pr.pr_info.residx].registered = 1;
8905                         lun->pr_key_count++;
8906                 }
8907                 lun->PRGeneration++;
8908                 memcpy(&lun->per_res[msg->pr.pr_info.residx].res_key,
8909                        msg->pr.pr_info.sa_res_key,
8910                        sizeof(struct scsi_per_res_key));
8911                 break;
8912
8913         case CTL_PR_UNREG_KEY:
8914                 lun->per_res[msg->pr.pr_info.residx].registered = 0;
8915                 memset(&lun->per_res[msg->pr.pr_info.residx].res_key,
8916                        0, sizeof(struct scsi_per_res_key));
8917                 lun->pr_key_count--;
8918
8919                 /* XXX Need to see if the reservation has been released */
8920                 /* if so do we need to generate UA? */
8921                 if (msg->pr.pr_info.residx == lun->pr_res_idx) {
8922                         lun->flags &= ~CTL_LUN_PR_RESERVED;
8923                         lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8924
8925                         if ((lun->res_type == SPR_TYPE_WR_EX_RO
8926                           || lun->res_type == SPR_TYPE_EX_AC_RO)
8927                          && lun->pr_key_count) {
8928                                 /*
8929                                  * If the reservation is a registrants
8930                                  * only type we need to generate a UA
8931                                  * for other registered inits.  The
8932                                  * sense code should be RESERVATIONS
8933                                  * RELEASED
8934                                  */
8935
8936                                 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
8937                                         if (lun->per_res[i+
8938                                             persis_offset].registered == 0)
8939                                                 continue;
8940
8941                                         lun->pending_ua[i] |=
8942                                                 CTL_UA_RES_RELEASE;
8943                                 }
8944                         }
8945                         lun->res_type = 0;
8946                 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
8947                         if (lun->pr_key_count==0) {
8948                                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8949                                 lun->res_type = 0;
8950                                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8951                         }
8952                 }
8953                 lun->PRGeneration++;
8954                 break;
8955
8956         case CTL_PR_RESERVE:
8957                 lun->flags |= CTL_LUN_PR_RESERVED;
8958                 lun->res_type = msg->pr.pr_info.res_type;
8959                 lun->pr_res_idx = msg->pr.pr_info.residx;
8960
8961                 break;
8962
8963         case CTL_PR_RELEASE:
8964                 /*
8965                  * if this isn't an exclusive access res generate UA for all
8966                  * other registrants.
8967                  */
8968                 if (lun->res_type != SPR_TYPE_EX_AC
8969                  && lun->res_type != SPR_TYPE_WR_EX) {
8970                         for (i = 0; i < CTL_MAX_INITIATORS; i++)
8971                                 if (lun->per_res[i+persis_offset].registered)
8972                                         lun->pending_ua[i] |=
8973                                                 CTL_UA_RES_RELEASE;
8974                 }
8975
8976                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8977                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8978                 lun->res_type = 0;
8979                 break;
8980
8981         case CTL_PR_PREEMPT:
8982                 ctl_pro_preempt_other(lun, msg);
8983                 break;
8984         case CTL_PR_CLEAR:
8985                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8986                 lun->res_type = 0;
8987                 lun->pr_key_count = 0;
8988                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8989
8990                 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
8991                         if (lun->per_res[i].registered == 0)
8992                                 continue;
8993                         if (!persis_offset
8994                          && i < CTL_MAX_INITIATORS)
8995                                 lun->pending_ua[i] |= CTL_UA_RES_PREEMPT;
8996                         else if (persis_offset
8997                               && i >= persis_offset)
8998                                 lun->pending_ua[i-persis_offset] |=
8999                                         CTL_UA_RES_PREEMPT;
9000                         memset(&lun->per_res[i].res_key, 0,
9001                                sizeof(struct scsi_per_res_key));
9002                         lun->per_res[i].registered = 0;
9003                 }
9004                 lun->PRGeneration++;
9005                 break;
9006         }
9007
9008         mtx_unlock(&lun->lun_lock);
9009 }
9010
9011 int
9012 ctl_read_write(struct ctl_scsiio *ctsio)
9013 {
9014         struct ctl_lun *lun;
9015         struct ctl_lba_len_flags *lbalen;
9016         uint64_t lba;
9017         uint32_t num_blocks;
9018         int flags, retval;
9019         int isread;
9020
9021         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9022
9023         CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0]));
9024
9025         flags = 0;
9026         retval = CTL_RETVAL_COMPLETE;
9027
9028         isread = ctsio->cdb[0] == READ_6  || ctsio->cdb[0] == READ_10
9029               || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16;
9030         if (lun->flags & CTL_LUN_PR_RESERVED && isread) {
9031                 uint32_t residx;
9032
9033                 /*
9034                  * XXX KDM need a lock here.
9035                  */
9036                 residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
9037                 if ((lun->res_type == SPR_TYPE_EX_AC
9038                   && residx != lun->pr_res_idx)
9039                  || ((lun->res_type == SPR_TYPE_EX_AC_RO
9040                    || lun->res_type == SPR_TYPE_EX_AC_AR)
9041                   && !lun->per_res[residx].registered)) {
9042                         ctl_set_reservation_conflict(ctsio);
9043                         ctl_done((union ctl_io *)ctsio);
9044                         return (CTL_RETVAL_COMPLETE);
9045                 }
9046         }
9047
9048         switch (ctsio->cdb[0]) {
9049         case READ_6:
9050         case WRITE_6: {
9051                 struct scsi_rw_6 *cdb;
9052
9053                 cdb = (struct scsi_rw_6 *)ctsio->cdb;
9054
9055                 lba = scsi_3btoul(cdb->addr);
9056                 /* only 5 bits are valid in the most significant address byte */
9057                 lba &= 0x1fffff;
9058                 num_blocks = cdb->length;
9059                 /*
9060                  * This is correct according to SBC-2.
9061                  */
9062                 if (num_blocks == 0)
9063                         num_blocks = 256;
9064                 break;
9065         }
9066         case READ_10:
9067         case WRITE_10: {
9068                 struct scsi_rw_10 *cdb;
9069
9070                 cdb = (struct scsi_rw_10 *)ctsio->cdb;
9071                 if (cdb->byte2 & SRW10_FUA)
9072                         flags |= CTL_LLF_FUA;
9073                 if (cdb->byte2 & SRW10_DPO)
9074                         flags |= CTL_LLF_DPO;
9075                 lba = scsi_4btoul(cdb->addr);
9076                 num_blocks = scsi_2btoul(cdb->length);
9077                 break;
9078         }
9079         case WRITE_VERIFY_10: {
9080                 struct scsi_write_verify_10 *cdb;
9081
9082                 cdb = (struct scsi_write_verify_10 *)ctsio->cdb;
9083                 flags |= CTL_LLF_FUA;
9084                 if (cdb->byte2 & SWV_DPO)
9085                         flags |= CTL_LLF_DPO;
9086                 lba = scsi_4btoul(cdb->addr);
9087                 num_blocks = scsi_2btoul(cdb->length);
9088                 break;
9089         }
9090         case READ_12:
9091         case WRITE_12: {
9092                 struct scsi_rw_12 *cdb;
9093
9094                 cdb = (struct scsi_rw_12 *)ctsio->cdb;
9095                 if (cdb->byte2 & SRW12_FUA)
9096                         flags |= CTL_LLF_FUA;
9097                 if (cdb->byte2 & SRW12_DPO)
9098                         flags |= CTL_LLF_DPO;
9099                 lba = scsi_4btoul(cdb->addr);
9100                 num_blocks = scsi_4btoul(cdb->length);
9101                 break;
9102         }
9103         case WRITE_VERIFY_12: {
9104                 struct scsi_write_verify_12 *cdb;
9105
9106                 cdb = (struct scsi_write_verify_12 *)ctsio->cdb;
9107                 flags |= CTL_LLF_FUA;
9108                 if (cdb->byte2 & SWV_DPO)
9109                         flags |= CTL_LLF_DPO;
9110                 lba = scsi_4btoul(cdb->addr);
9111                 num_blocks = scsi_4btoul(cdb->length);
9112                 break;
9113         }
9114         case READ_16:
9115         case WRITE_16: {
9116                 struct scsi_rw_16 *cdb;
9117
9118                 cdb = (struct scsi_rw_16 *)ctsio->cdb;
9119                 if (cdb->byte2 & SRW12_FUA)
9120                         flags |= CTL_LLF_FUA;
9121                 if (cdb->byte2 & SRW12_DPO)
9122                         flags |= CTL_LLF_DPO;
9123                 lba = scsi_8btou64(cdb->addr);
9124                 num_blocks = scsi_4btoul(cdb->length);
9125                 break;
9126         }
9127         case WRITE_VERIFY_16: {
9128                 struct scsi_write_verify_16 *cdb;
9129
9130                 cdb = (struct scsi_write_verify_16 *)ctsio->cdb;
9131                 flags |= CTL_LLF_FUA;
9132                 if (cdb->byte2 & SWV_DPO)
9133                         flags |= CTL_LLF_DPO;
9134                 lba = scsi_8btou64(cdb->addr);
9135                 num_blocks = scsi_4btoul(cdb->length);
9136                 break;
9137         }
9138         default:
9139                 /*
9140                  * We got a command we don't support.  This shouldn't
9141                  * happen, commands should be filtered out above us.
9142                  */
9143                 ctl_set_invalid_opcode(ctsio);
9144                 ctl_done((union ctl_io *)ctsio);
9145
9146                 return (CTL_RETVAL_COMPLETE);
9147                 break; /* NOTREACHED */
9148         }
9149
9150         /*
9151          * The first check is to make sure we're in bounds, the second
9152          * check is to catch wrap-around problems.  If the lba + num blocks
9153          * is less than the lba, then we've wrapped around and the block
9154          * range is invalid anyway.
9155          */
9156         if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
9157          || ((lba + num_blocks) < lba)) {
9158                 ctl_set_lba_out_of_range(ctsio);
9159                 ctl_done((union ctl_io *)ctsio);
9160                 return (CTL_RETVAL_COMPLETE);
9161         }
9162
9163         /*
9164          * According to SBC-3, a transfer length of 0 is not an error.
9165          * Note that this cannot happen with WRITE(6) or READ(6), since 0
9166          * translates to 256 blocks for those commands.
9167          */
9168         if (num_blocks == 0) {
9169                 ctl_set_success(ctsio);
9170                 ctl_done((union ctl_io *)ctsio);
9171                 return (CTL_RETVAL_COMPLETE);
9172         }
9173
9174         /* Set FUA and/or DPO if caches are disabled. */
9175         if (isread) {
9176                 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 &
9177                     SCP_RCD) != 0)
9178                         flags |= CTL_LLF_FUA | CTL_LLF_DPO;
9179         } else {
9180                 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 &
9181                     SCP_WCE) == 0)
9182                         flags |= CTL_LLF_FUA;
9183         }
9184
9185         lbalen = (struct ctl_lba_len_flags *)
9186             &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
9187         lbalen->lba = lba;
9188         lbalen->len = num_blocks;
9189         lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags;
9190
9191         ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize;
9192         ctsio->kern_rel_offset = 0;
9193
9194         CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n"));
9195
9196         retval = lun->backend->data_submit((union ctl_io *)ctsio);
9197
9198         return (retval);
9199 }
9200
9201 static int
9202 ctl_cnw_cont(union ctl_io *io)
9203 {
9204         struct ctl_scsiio *ctsio;
9205         struct ctl_lun *lun;
9206         struct ctl_lba_len_flags *lbalen;
9207         int retval;
9208
9209         ctsio = &io->scsiio;
9210         ctsio->io_hdr.status = CTL_STATUS_NONE;
9211         ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT;
9212         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9213         lbalen = (struct ctl_lba_len_flags *)
9214             &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
9215         lbalen->flags &= ~CTL_LLF_COMPARE;
9216         lbalen->flags |= CTL_LLF_WRITE;
9217
9218         CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n"));
9219         retval = lun->backend->data_submit((union ctl_io *)ctsio);
9220         return (retval);
9221 }
9222
9223 int
9224 ctl_cnw(struct ctl_scsiio *ctsio)
9225 {
9226         struct ctl_lun *lun;
9227         struct ctl_lba_len_flags *lbalen;
9228         uint64_t lba;
9229         uint32_t num_blocks;
9230         int flags, retval;
9231
9232         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9233
9234         CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0]));
9235
9236         flags = 0;
9237         retval = CTL_RETVAL_COMPLETE;
9238
9239         switch (ctsio->cdb[0]) {
9240         case COMPARE_AND_WRITE: {
9241                 struct scsi_compare_and_write *cdb;
9242
9243                 cdb = (struct scsi_compare_and_write *)ctsio->cdb;
9244                 if (cdb->byte2 & SRW10_FUA)
9245                         flags |= CTL_LLF_FUA;
9246                 if (cdb->byte2 & SRW10_DPO)
9247                         flags |= CTL_LLF_DPO;
9248                 lba = scsi_8btou64(cdb->addr);
9249                 num_blocks = cdb->length;
9250                 break;
9251         }
9252         default:
9253                 /*
9254                  * We got a command we don't support.  This shouldn't
9255                  * happen, commands should be filtered out above us.
9256                  */
9257                 ctl_set_invalid_opcode(ctsio);
9258                 ctl_done((union ctl_io *)ctsio);
9259
9260                 return (CTL_RETVAL_COMPLETE);
9261                 break; /* NOTREACHED */
9262         }
9263
9264         /*
9265          * The first check is to make sure we're in bounds, the second
9266          * check is to catch wrap-around problems.  If the lba + num blocks
9267          * is less than the lba, then we've wrapped around and the block
9268          * range is invalid anyway.
9269          */
9270         if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
9271          || ((lba + num_blocks) < lba)) {
9272                 ctl_set_lba_out_of_range(ctsio);
9273                 ctl_done((union ctl_io *)ctsio);
9274                 return (CTL_RETVAL_COMPLETE);
9275         }
9276
9277         /*
9278          * According to SBC-3, a transfer length of 0 is not an error.
9279          */
9280         if (num_blocks == 0) {
9281                 ctl_set_success(ctsio);
9282                 ctl_done((union ctl_io *)ctsio);
9283                 return (CTL_RETVAL_COMPLETE);
9284         }
9285
9286         /* Set FUA if write cache is disabled. */
9287         if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 &
9288             SCP_WCE) == 0)
9289                 flags |= CTL_LLF_FUA;
9290
9291         ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize;
9292         ctsio->kern_rel_offset = 0;
9293
9294         /*
9295          * Set the IO_CONT flag, so that if this I/O gets passed to
9296          * ctl_data_submit_done(), it'll get passed back to
9297          * ctl_ctl_cnw_cont() for further processing.
9298          */
9299         ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT;
9300         ctsio->io_cont = ctl_cnw_cont;
9301
9302         lbalen = (struct ctl_lba_len_flags *)
9303             &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
9304         lbalen->lba = lba;
9305         lbalen->len = num_blocks;
9306         lbalen->flags = CTL_LLF_COMPARE | flags;
9307
9308         CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n"));
9309         retval = lun->backend->data_submit((union ctl_io *)ctsio);
9310         return (retval);
9311 }
9312
9313 int
9314 ctl_verify(struct ctl_scsiio *ctsio)
9315 {
9316         struct ctl_lun *lun;
9317         struct ctl_lba_len_flags *lbalen;
9318         uint64_t lba;
9319         uint32_t num_blocks;
9320         int bytchk, flags;
9321         int retval;
9322
9323         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9324
9325         CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0]));
9326
9327         bytchk = 0;
9328         flags = CTL_LLF_FUA;
9329         retval = CTL_RETVAL_COMPLETE;
9330
9331         switch (ctsio->cdb[0]) {
9332         case VERIFY_10: {
9333                 struct scsi_verify_10 *cdb;
9334
9335                 cdb = (struct scsi_verify_10 *)ctsio->cdb;
9336                 if (cdb->byte2 & SVFY_BYTCHK)
9337                         bytchk = 1;
9338                 if (cdb->byte2 & SVFY_DPO)
9339                         flags |= CTL_LLF_DPO;
9340                 lba = scsi_4btoul(cdb->addr);
9341                 num_blocks = scsi_2btoul(cdb->length);
9342                 break;
9343         }
9344         case VERIFY_12: {
9345                 struct scsi_verify_12 *cdb;
9346
9347                 cdb = (struct scsi_verify_12 *)ctsio->cdb;
9348                 if (cdb->byte2 & SVFY_BYTCHK)
9349                         bytchk = 1;
9350                 if (cdb->byte2 & SVFY_DPO)
9351                         flags |= CTL_LLF_DPO;
9352                 lba = scsi_4btoul(cdb->addr);
9353                 num_blocks = scsi_4btoul(cdb->length);
9354                 break;
9355         }
9356         case VERIFY_16: {
9357                 struct scsi_rw_16 *cdb;
9358
9359                 cdb = (struct scsi_rw_16 *)ctsio->cdb;
9360                 if (cdb->byte2 & SVFY_BYTCHK)
9361                         bytchk = 1;
9362                 if (cdb->byte2 & SVFY_DPO)
9363                         flags |= CTL_LLF_DPO;
9364                 lba = scsi_8btou64(cdb->addr);
9365                 num_blocks = scsi_4btoul(cdb->length);
9366                 break;
9367         }
9368         default:
9369                 /*
9370                  * We got a command we don't support.  This shouldn't
9371                  * happen, commands should be filtered out above us.
9372                  */
9373                 ctl_set_invalid_opcode(ctsio);
9374                 ctl_done((union ctl_io *)ctsio);
9375                 return (CTL_RETVAL_COMPLETE);
9376         }
9377
9378         /*
9379          * The first check is to make sure we're in bounds, the second
9380          * check is to catch wrap-around problems.  If the lba + num blocks
9381          * is less than the lba, then we've wrapped around and the block
9382          * range is invalid anyway.
9383          */
9384         if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
9385          || ((lba + num_blocks) < lba)) {
9386                 ctl_set_lba_out_of_range(ctsio);
9387                 ctl_done((union ctl_io *)ctsio);
9388                 return (CTL_RETVAL_COMPLETE);
9389         }
9390
9391         /*
9392          * According to SBC-3, a transfer length of 0 is not an error.
9393          */
9394         if (num_blocks == 0) {
9395                 ctl_set_success(ctsio);
9396                 ctl_done((union ctl_io *)ctsio);
9397                 return (CTL_RETVAL_COMPLETE);
9398         }
9399
9400         lbalen = (struct ctl_lba_len_flags *)
9401             &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
9402         lbalen->lba = lba;
9403         lbalen->len = num_blocks;
9404         if (bytchk) {
9405                 lbalen->flags = CTL_LLF_COMPARE | flags;
9406                 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize;
9407         } else {
9408                 lbalen->flags = CTL_LLF_VERIFY | flags;
9409                 ctsio->kern_total_len = 0;
9410         }
9411         ctsio->kern_rel_offset = 0;
9412
9413         CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n"));
9414         retval = lun->backend->data_submit((union ctl_io *)ctsio);
9415         return (retval);
9416 }
9417
9418 int
9419 ctl_report_luns(struct ctl_scsiio *ctsio)
9420 {
9421         struct scsi_report_luns *cdb;
9422         struct scsi_report_luns_data *lun_data;
9423         struct ctl_lun *lun, *request_lun;
9424         int num_luns, retval;
9425         uint32_t alloc_len, lun_datalen;
9426         int num_filled, well_known;
9427         uint32_t initidx, targ_lun_id, lun_id;
9428
9429         retval = CTL_RETVAL_COMPLETE;
9430         well_known = 0;
9431
9432         cdb = (struct scsi_report_luns *)ctsio->cdb;
9433
9434         CTL_DEBUG_PRINT(("ctl_report_luns\n"));
9435
9436         mtx_lock(&control_softc->ctl_lock);
9437         num_luns = control_softc->num_luns;
9438         mtx_unlock(&control_softc->ctl_lock);
9439
9440         switch (cdb->select_report) {
9441         case RPL_REPORT_DEFAULT:
9442         case RPL_REPORT_ALL:
9443                 break;
9444         case RPL_REPORT_WELLKNOWN:
9445                 well_known = 1;
9446                 num_luns = 0;
9447                 break;
9448         default:
9449                 ctl_set_invalid_field(ctsio,
9450                                       /*sks_valid*/ 1,
9451                                       /*command*/ 1,
9452                                       /*field*/ 2,
9453                                       /*bit_valid*/ 0,
9454                                       /*bit*/ 0);
9455                 ctl_done((union ctl_io *)ctsio);
9456                 return (retval);
9457                 break; /* NOTREACHED */
9458         }
9459
9460         alloc_len = scsi_4btoul(cdb->length);
9461         /*
9462          * The initiator has to allocate at least 16 bytes for this request,
9463          * so he can at least get the header and the first LUN.  Otherwise
9464          * we reject the request (per SPC-3 rev 14, section 6.21).
9465          */
9466         if (alloc_len < (sizeof(struct scsi_report_luns_data) +
9467             sizeof(struct scsi_report_luns_lundata))) {
9468                 ctl_set_invalid_field(ctsio,
9469                                       /*sks_valid*/ 1,
9470                                       /*command*/ 1,
9471                                       /*field*/ 6,
9472                                       /*bit_valid*/ 0,
9473                                       /*bit*/ 0);
9474                 ctl_done((union ctl_io *)ctsio);
9475                 return (retval);
9476         }
9477
9478         request_lun = (struct ctl_lun *)
9479                 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9480
9481         lun_datalen = sizeof(*lun_data) +
9482                 (num_luns * sizeof(struct scsi_report_luns_lundata));
9483
9484         ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO);
9485         lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr;
9486         ctsio->kern_sg_entries = 0;
9487
9488         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
9489
9490         mtx_lock(&control_softc->ctl_lock);
9491         for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) {
9492                 lun_id = ctl_map_lun(ctsio->io_hdr.nexus.targ_port, targ_lun_id);
9493                 if (lun_id >= CTL_MAX_LUNS)
9494                         continue;
9495                 lun = control_softc->ctl_luns[lun_id];
9496                 if (lun == NULL)
9497                         continue;
9498
9499                 if (targ_lun_id <= 0xff) {
9500                         /*
9501                          * Peripheral addressing method, bus number 0.
9502                          */
9503                         lun_data->luns[num_filled].lundata[0] =
9504                                 RPL_LUNDATA_ATYP_PERIPH;
9505                         lun_data->luns[num_filled].lundata[1] = targ_lun_id;
9506                         num_filled++;
9507                 } else if (targ_lun_id <= 0x3fff) {
9508                         /*
9509                          * Flat addressing method.
9510                          */
9511                         lun_data->luns[num_filled].lundata[0] =
9512                                 RPL_LUNDATA_ATYP_FLAT |
9513                                 (targ_lun_id & RPL_LUNDATA_FLAT_LUN_MASK);
9514 #ifdef OLDCTLHEADERS
9515                                 (SRLD_ADDR_FLAT << SRLD_ADDR_SHIFT) |
9516                                 (targ_lun_id & SRLD_BUS_LUN_MASK);
9517 #endif
9518                         lun_data->luns[num_filled].lundata[1] =
9519 #ifdef OLDCTLHEADERS
9520                                 targ_lun_id >> SRLD_BUS_LUN_BITS;
9521 #endif
9522                                 targ_lun_id >> RPL_LUNDATA_FLAT_LUN_BITS;
9523                         num_filled++;
9524                 } else {
9525                         printf("ctl_report_luns: bogus LUN number %jd, "
9526                                "skipping\n", (intmax_t)targ_lun_id);
9527                 }
9528                 /*
9529                  * According to SPC-3, rev 14 section 6.21:
9530                  *
9531                  * "The execution of a REPORT LUNS command to any valid and
9532                  * installed logical unit shall clear the REPORTED LUNS DATA
9533                  * HAS CHANGED unit attention condition for all logical
9534                  * units of that target with respect to the requesting
9535                  * initiator. A valid and installed logical unit is one
9536                  * having a PERIPHERAL QUALIFIER of 000b in the standard
9537                  * INQUIRY data (see 6.4.2)."
9538                  *
9539                  * If request_lun is NULL, the LUN this report luns command
9540                  * was issued to is either disabled or doesn't exist. In that
9541                  * case, we shouldn't clear any pending lun change unit
9542                  * attention.
9543                  */
9544                 if (request_lun != NULL) {
9545                         mtx_lock(&lun->lun_lock);
9546                         lun->pending_ua[initidx] &= ~CTL_UA_LUN_CHANGE;
9547                         mtx_unlock(&lun->lun_lock);
9548                 }
9549         }
9550         mtx_unlock(&control_softc->ctl_lock);
9551
9552         /*
9553          * It's quite possible that we've returned fewer LUNs than we allocated
9554          * space for.  Trim it.
9555          */
9556         lun_datalen = sizeof(*lun_data) +
9557                 (num_filled * sizeof(struct scsi_report_luns_lundata));
9558
9559         if (lun_datalen < alloc_len) {
9560                 ctsio->residual = alloc_len - lun_datalen;
9561                 ctsio->kern_data_len = lun_datalen;
9562                 ctsio->kern_total_len = lun_datalen;
9563         } else {
9564                 ctsio->residual = 0;
9565                 ctsio->kern_data_len = alloc_len;
9566                 ctsio->kern_total_len = alloc_len;
9567         }
9568         ctsio->kern_data_resid = 0;
9569         ctsio->kern_rel_offset = 0;
9570         ctsio->kern_sg_entries = 0;
9571
9572         /*
9573          * We set this to the actual data length, regardless of how much
9574          * space we actually have to return results.  If the user looks at
9575          * this value, he'll know whether or not he allocated enough space
9576          * and reissue the command if necessary.  We don't support well
9577          * known logical units, so if the user asks for that, return none.
9578          */
9579         scsi_ulto4b(lun_datalen - 8, lun_data->length);
9580
9581         /*
9582          * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy
9583          * this request.
9584          */
9585         ctsio->scsi_status = SCSI_STATUS_OK;
9586
9587         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9588         ctsio->be_move_done = ctl_config_move_done;
9589         ctl_datamove((union ctl_io *)ctsio);
9590
9591         return (retval);
9592 }
9593
9594 int
9595 ctl_request_sense(struct ctl_scsiio *ctsio)
9596 {
9597         struct scsi_request_sense *cdb;
9598         struct scsi_sense_data *sense_ptr;
9599         struct ctl_lun *lun;
9600         uint32_t initidx;
9601         int have_error;
9602         scsi_sense_data_type sense_format;
9603
9604         cdb = (struct scsi_request_sense *)ctsio->cdb;
9605
9606         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9607
9608         CTL_DEBUG_PRINT(("ctl_request_sense\n"));
9609
9610         /*
9611          * Determine which sense format the user wants.
9612          */
9613         if (cdb->byte2 & SRS_DESC)
9614                 sense_format = SSD_TYPE_DESC;
9615         else
9616                 sense_format = SSD_TYPE_FIXED;
9617
9618         ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK);
9619         sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr;
9620         ctsio->kern_sg_entries = 0;
9621
9622         /*
9623          * struct scsi_sense_data, which is currently set to 256 bytes, is
9624          * larger than the largest allowed value for the length field in the
9625          * REQUEST SENSE CDB, which is 252 bytes as of SPC-4.
9626          */
9627         ctsio->residual = 0;
9628         ctsio->kern_data_len = cdb->length;
9629         ctsio->kern_total_len = cdb->length;
9630
9631         ctsio->kern_data_resid = 0;
9632         ctsio->kern_rel_offset = 0;
9633         ctsio->kern_sg_entries = 0;
9634
9635         /*
9636          * If we don't have a LUN, we don't have any pending sense.
9637          */
9638         if (lun == NULL)
9639                 goto no_sense;
9640
9641         have_error = 0;
9642         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
9643         /*
9644          * Check for pending sense, and then for pending unit attentions.
9645          * Pending sense gets returned first, then pending unit attentions.
9646          */
9647         mtx_lock(&lun->lun_lock);
9648 #ifdef CTL_WITH_CA
9649         if (ctl_is_set(lun->have_ca, initidx)) {
9650                 scsi_sense_data_type stored_format;
9651
9652                 /*
9653                  * Check to see which sense format was used for the stored
9654                  * sense data.
9655                  */
9656                 stored_format = scsi_sense_type(&lun->pending_sense[initidx]);
9657
9658                 /*
9659                  * If the user requested a different sense format than the
9660                  * one we stored, then we need to convert it to the other
9661                  * format.  If we're going from descriptor to fixed format
9662                  * sense data, we may lose things in translation, depending
9663                  * on what options were used.
9664                  *
9665                  * If the stored format is SSD_TYPE_NONE (i.e. invalid),
9666                  * for some reason we'll just copy it out as-is.
9667                  */
9668                 if ((stored_format == SSD_TYPE_FIXED)
9669                  && (sense_format == SSD_TYPE_DESC))
9670                         ctl_sense_to_desc((struct scsi_sense_data_fixed *)
9671                             &lun->pending_sense[initidx],
9672                             (struct scsi_sense_data_desc *)sense_ptr);
9673                 else if ((stored_format == SSD_TYPE_DESC)
9674                       && (sense_format == SSD_TYPE_FIXED))
9675                         ctl_sense_to_fixed((struct scsi_sense_data_desc *)
9676                             &lun->pending_sense[initidx],
9677                             (struct scsi_sense_data_fixed *)sense_ptr);
9678                 else
9679                         memcpy(sense_ptr, &lun->pending_sense[initidx],
9680                                ctl_min(sizeof(*sense_ptr),
9681                                sizeof(lun->pending_sense[initidx])));
9682
9683                 ctl_clear_mask(lun->have_ca, initidx);
9684                 have_error = 1;
9685         } else
9686 #endif
9687         if (lun->pending_ua[initidx] != CTL_UA_NONE) {
9688                 ctl_ua_type ua_type;
9689
9690                 ua_type = ctl_build_ua(&lun->pending_ua[initidx],
9691                                        sense_ptr, sense_format);
9692                 if (ua_type != CTL_UA_NONE)
9693                         have_error = 1;
9694         }
9695         mtx_unlock(&lun->lun_lock);
9696
9697         /*
9698          * We already have a pending error, return it.
9699          */
9700         if (have_error != 0) {
9701                 /*
9702                  * We report the SCSI status as OK, since the status of the
9703                  * request sense command itself is OK.
9704                  */
9705                 ctsio->scsi_status = SCSI_STATUS_OK;
9706
9707                 /*
9708                  * We report 0 for the sense length, because we aren't doing
9709                  * autosense in this case.  We're reporting sense as
9710                  * parameter data.
9711                  */
9712                 ctsio->sense_len = 0;
9713                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9714                 ctsio->be_move_done = ctl_config_move_done;
9715                 ctl_datamove((union ctl_io *)ctsio);
9716
9717                 return (CTL_RETVAL_COMPLETE);
9718         }
9719
9720 no_sense:
9721
9722         /*
9723          * No sense information to report, so we report that everything is
9724          * okay.
9725          */
9726         ctl_set_sense_data(sense_ptr,
9727                            lun,
9728                            sense_format,
9729                            /*current_error*/ 1,
9730                            /*sense_key*/ SSD_KEY_NO_SENSE,
9731                            /*asc*/ 0x00,
9732                            /*ascq*/ 0x00,
9733                            SSD_ELEM_NONE);
9734
9735         ctsio->scsi_status = SCSI_STATUS_OK;
9736
9737         /*
9738          * We report 0 for the sense length, because we aren't doing
9739          * autosense in this case.  We're reporting sense as parameter data.
9740          */
9741         ctsio->sense_len = 0;
9742         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9743         ctsio->be_move_done = ctl_config_move_done;
9744         ctl_datamove((union ctl_io *)ctsio);
9745
9746         return (CTL_RETVAL_COMPLETE);
9747 }
9748
9749 int
9750 ctl_tur(struct ctl_scsiio *ctsio)
9751 {
9752         struct ctl_lun *lun;
9753
9754         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9755
9756         CTL_DEBUG_PRINT(("ctl_tur\n"));
9757
9758         if (lun == NULL)
9759                 return (EINVAL);
9760
9761         ctsio->scsi_status = SCSI_STATUS_OK;
9762         ctsio->io_hdr.status = CTL_SUCCESS;
9763
9764         ctl_done((union ctl_io *)ctsio);
9765
9766         return (CTL_RETVAL_COMPLETE);
9767 }
9768
9769 #ifdef notyet
9770 static int
9771 ctl_cmddt_inquiry(struct ctl_scsiio *ctsio)
9772 {
9773
9774 }
9775 #endif
9776
9777 static int
9778 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len)
9779 {
9780         struct scsi_vpd_supported_pages *pages;
9781         int sup_page_size;
9782         struct ctl_lun *lun;
9783
9784         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9785
9786         sup_page_size = sizeof(struct scsi_vpd_supported_pages) *
9787             SCSI_EVPD_NUM_SUPPORTED_PAGES;
9788         ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO);
9789         pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr;
9790         ctsio->kern_sg_entries = 0;
9791
9792         if (sup_page_size < alloc_len) {
9793                 ctsio->residual = alloc_len - sup_page_size;
9794                 ctsio->kern_data_len = sup_page_size;
9795                 ctsio->kern_total_len = sup_page_size;
9796         } else {
9797                 ctsio->residual = 0;
9798                 ctsio->kern_data_len = alloc_len;
9799                 ctsio->kern_total_len = alloc_len;
9800         }
9801         ctsio->kern_data_resid = 0;
9802         ctsio->kern_rel_offset = 0;
9803         ctsio->kern_sg_entries = 0;
9804
9805         /*
9806          * The control device is always connected.  The disk device, on the
9807          * other hand, may not be online all the time.  Need to change this
9808          * to figure out whether the disk device is actually online or not.
9809          */
9810         if (lun != NULL)
9811                 pages->device = (SID_QUAL_LU_CONNECTED << 5) |
9812                                 lun->be_lun->lun_type;
9813         else
9814                 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9815
9816         pages->length = SCSI_EVPD_NUM_SUPPORTED_PAGES;
9817         /* Supported VPD pages */
9818         pages->page_list[0] = SVPD_SUPPORTED_PAGES;
9819         /* Serial Number */
9820         pages->page_list[1] = SVPD_UNIT_SERIAL_NUMBER;
9821         /* Device Identification */
9822         pages->page_list[2] = SVPD_DEVICE_ID;
9823         /* Extended INQUIRY Data */
9824         pages->page_list[3] = SVPD_EXTENDED_INQUIRY_DATA;
9825         /* Mode Page Policy */
9826         pages->page_list[4] = SVPD_MODE_PAGE_POLICY;
9827         /* SCSI Ports */
9828         pages->page_list[5] = SVPD_SCSI_PORTS;
9829         /* Third-party Copy */
9830         pages->page_list[6] = SVPD_SCSI_TPC;
9831         /* Block limits */
9832         pages->page_list[7] = SVPD_BLOCK_LIMITS;
9833         /* Block Device Characteristics */
9834         pages->page_list[8] = SVPD_BDC;
9835         /* Logical Block Provisioning */
9836         pages->page_list[9] = SVPD_LBP;
9837
9838         ctsio->scsi_status = SCSI_STATUS_OK;
9839
9840         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9841         ctsio->be_move_done = ctl_config_move_done;
9842         ctl_datamove((union ctl_io *)ctsio);
9843
9844         return (CTL_RETVAL_COMPLETE);
9845 }
9846
9847 static int
9848 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
9849 {
9850         struct scsi_vpd_unit_serial_number *sn_ptr;
9851         struct ctl_lun *lun;
9852
9853         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9854
9855         ctsio->kern_data_ptr = malloc(sizeof(*sn_ptr), M_CTL, M_WAITOK | M_ZERO);
9856         sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr;
9857         ctsio->kern_sg_entries = 0;
9858
9859         if (sizeof(*sn_ptr) < alloc_len) {
9860                 ctsio->residual = alloc_len - sizeof(*sn_ptr);
9861                 ctsio->kern_data_len = sizeof(*sn_ptr);
9862                 ctsio->kern_total_len = sizeof(*sn_ptr);
9863         } else {
9864                 ctsio->residual = 0;
9865                 ctsio->kern_data_len = alloc_len;
9866                 ctsio->kern_total_len = alloc_len;
9867         }
9868         ctsio->kern_data_resid = 0;
9869         ctsio->kern_rel_offset = 0;
9870         ctsio->kern_sg_entries = 0;
9871
9872         /*
9873          * The control device is always connected.  The disk device, on the
9874          * other hand, may not be online all the time.  Need to change this
9875          * to figure out whether the disk device is actually online or not.
9876          */
9877         if (lun != NULL)
9878                 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9879                                   lun->be_lun->lun_type;
9880         else
9881                 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9882
9883         sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER;
9884         sn_ptr->length = ctl_min(sizeof(*sn_ptr) - 4, CTL_SN_LEN);
9885         /*
9886          * If we don't have a LUN, we just leave the serial number as
9887          * all spaces.
9888          */
9889         memset(sn_ptr->serial_num, 0x20, sizeof(sn_ptr->serial_num));
9890         if (lun != NULL) {
9891                 strncpy((char *)sn_ptr->serial_num,
9892                         (char *)lun->be_lun->serial_num, CTL_SN_LEN);
9893         }
9894         ctsio->scsi_status = SCSI_STATUS_OK;
9895
9896         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9897         ctsio->be_move_done = ctl_config_move_done;
9898         ctl_datamove((union ctl_io *)ctsio);
9899
9900         return (CTL_RETVAL_COMPLETE);
9901 }
9902
9903
9904 static int
9905 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len)
9906 {
9907         struct scsi_vpd_extended_inquiry_data *eid_ptr;
9908         struct ctl_lun *lun;
9909         int data_len;
9910
9911         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9912
9913         data_len = sizeof(struct scsi_vpd_mode_page_policy) +
9914             sizeof(struct scsi_vpd_mode_page_policy_descr);
9915
9916         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9917         eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr;
9918         ctsio->kern_sg_entries = 0;
9919
9920         if (data_len < alloc_len) {
9921                 ctsio->residual = alloc_len - data_len;
9922                 ctsio->kern_data_len = data_len;
9923                 ctsio->kern_total_len = data_len;
9924         } else {
9925                 ctsio->residual = 0;
9926                 ctsio->kern_data_len = alloc_len;
9927                 ctsio->kern_total_len = alloc_len;
9928         }
9929         ctsio->kern_data_resid = 0;
9930         ctsio->kern_rel_offset = 0;
9931         ctsio->kern_sg_entries = 0;
9932
9933         /*
9934          * The control device is always connected.  The disk device, on the
9935          * other hand, may not be online all the time.
9936          */
9937         if (lun != NULL)
9938                 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9939                                      lun->be_lun->lun_type;
9940         else
9941                 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9942         eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA;
9943         eid_ptr->page_length = data_len - 4;
9944         eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP;
9945         eid_ptr->flags3 = SVPD_EID_V_SUP;
9946
9947         ctsio->scsi_status = SCSI_STATUS_OK;
9948         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9949         ctsio->be_move_done = ctl_config_move_done;
9950         ctl_datamove((union ctl_io *)ctsio);
9951
9952         return (CTL_RETVAL_COMPLETE);
9953 }
9954
9955 static int
9956 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len)
9957 {
9958         struct scsi_vpd_mode_page_policy *mpp_ptr;
9959         struct ctl_lun *lun;
9960         int data_len;
9961
9962         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9963
9964         data_len = sizeof(struct scsi_vpd_mode_page_policy) +
9965             sizeof(struct scsi_vpd_mode_page_policy_descr);
9966
9967         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9968         mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr;
9969         ctsio->kern_sg_entries = 0;
9970
9971         if (data_len < alloc_len) {
9972                 ctsio->residual = alloc_len - data_len;
9973                 ctsio->kern_data_len = data_len;
9974                 ctsio->kern_total_len = data_len;
9975         } else {
9976                 ctsio->residual = 0;
9977                 ctsio->kern_data_len = alloc_len;
9978                 ctsio->kern_total_len = alloc_len;
9979         }
9980         ctsio->kern_data_resid = 0;
9981         ctsio->kern_rel_offset = 0;
9982         ctsio->kern_sg_entries = 0;
9983
9984         /*
9985          * The control device is always connected.  The disk device, on the
9986          * other hand, may not be online all the time.
9987          */
9988         if (lun != NULL)
9989                 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9990                                      lun->be_lun->lun_type;
9991         else
9992                 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9993         mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY;
9994         scsi_ulto2b(data_len - 4, mpp_ptr->page_length);
9995         mpp_ptr->descr[0].page_code = 0x3f;
9996         mpp_ptr->descr[0].subpage_code = 0xff;
9997         mpp_ptr->descr[0].policy = SVPD_MPP_SHARED;
9998
9999         ctsio->scsi_status = SCSI_STATUS_OK;
10000         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10001         ctsio->be_move_done = ctl_config_move_done;
10002         ctl_datamove((union ctl_io *)ctsio);
10003
10004         return (CTL_RETVAL_COMPLETE);
10005 }
10006
10007 static int
10008 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
10009 {
10010         struct scsi_vpd_device_id *devid_ptr;
10011         struct scsi_vpd_id_descriptor *desc;
10012         struct ctl_softc *ctl_softc;
10013         struct ctl_lun *lun;
10014         struct ctl_port *port;
10015         int data_len;
10016         uint8_t proto;
10017
10018         ctl_softc = control_softc;
10019
10020         port = ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
10021         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10022
10023         data_len = sizeof(struct scsi_vpd_device_id) +
10024             sizeof(struct scsi_vpd_id_descriptor) +
10025                 sizeof(struct scsi_vpd_id_rel_trgt_port_id) +
10026             sizeof(struct scsi_vpd_id_descriptor) +
10027                 sizeof(struct scsi_vpd_id_trgt_port_grp_id);
10028         if (lun && lun->lun_devid)
10029                 data_len += lun->lun_devid->len;
10030         if (port->port_devid)
10031                 data_len += port->port_devid->len;
10032         if (port->target_devid)
10033                 data_len += port->target_devid->len;
10034
10035         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10036         devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr;
10037         ctsio->kern_sg_entries = 0;
10038
10039         if (data_len < alloc_len) {
10040                 ctsio->residual = alloc_len - data_len;
10041                 ctsio->kern_data_len = data_len;
10042                 ctsio->kern_total_len = data_len;
10043         } else {
10044                 ctsio->residual = 0;
10045                 ctsio->kern_data_len = alloc_len;
10046                 ctsio->kern_total_len = alloc_len;
10047         }
10048         ctsio->kern_data_resid = 0;
10049         ctsio->kern_rel_offset = 0;
10050         ctsio->kern_sg_entries = 0;
10051
10052         /*
10053          * The control device is always connected.  The disk device, on the
10054          * other hand, may not be online all the time.
10055          */
10056         if (lun != NULL)
10057                 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
10058                                      lun->be_lun->lun_type;
10059         else
10060                 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
10061         devid_ptr->page_code = SVPD_DEVICE_ID;
10062         scsi_ulto2b(data_len - 4, devid_ptr->length);
10063
10064         if (port->port_type == CTL_PORT_FC)
10065                 proto = SCSI_PROTO_FC << 4;
10066         else if (port->port_type == CTL_PORT_ISCSI)
10067                 proto = SCSI_PROTO_ISCSI << 4;
10068         else
10069                 proto = SCSI_PROTO_SPI << 4;
10070         desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list;
10071
10072         /*
10073          * We're using a LUN association here.  i.e., this device ID is a
10074          * per-LUN identifier.
10075          */
10076         if (lun && lun->lun_devid) {
10077                 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len);
10078                 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc +
10079                     lun->lun_devid->len);
10080         }
10081
10082         /*
10083          * This is for the WWPN which is a port association.
10084          */
10085         if (port->port_devid) {
10086                 memcpy(desc, port->port_devid->data, port->port_devid->len);
10087                 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc +
10088                     port->port_devid->len);
10089         }
10090
10091         /*
10092          * This is for the Relative Target Port(type 4h) identifier
10093          */
10094         desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
10095         desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
10096             SVPD_ID_TYPE_RELTARG;
10097         desc->length = 4;
10098         scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]);
10099         desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
10100             sizeof(struct scsi_vpd_id_rel_trgt_port_id));
10101
10102         /*
10103          * This is for the Target Port Group(type 5h) identifier
10104          */
10105         desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
10106         desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
10107             SVPD_ID_TYPE_TPORTGRP;
10108         desc->length = 4;
10109         scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS + 1,
10110             &desc->identifier[2]);
10111         desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
10112             sizeof(struct scsi_vpd_id_trgt_port_grp_id));
10113
10114         /*
10115          * This is for the Target identifier
10116          */
10117         if (port->target_devid) {
10118                 memcpy(desc, port->target_devid->data, port->target_devid->len);
10119         }
10120
10121         ctsio->scsi_status = SCSI_STATUS_OK;
10122         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10123         ctsio->be_move_done = ctl_config_move_done;
10124         ctl_datamove((union ctl_io *)ctsio);
10125
10126         return (CTL_RETVAL_COMPLETE);
10127 }
10128
10129 static int
10130 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
10131 {
10132         struct ctl_softc *softc = control_softc;
10133         struct scsi_vpd_scsi_ports *sp;
10134         struct scsi_vpd_port_designation *pd;
10135         struct scsi_vpd_port_designation_cont *pdc;
10136         struct ctl_lun *lun;
10137         struct ctl_port *port;
10138         int data_len, num_target_ports, iid_len, id_len, g, pg, p;
10139         int num_target_port_groups, single;
10140
10141         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10142
10143         single = ctl_is_single;
10144         if (single)
10145                 num_target_port_groups = 1;
10146         else
10147                 num_target_port_groups = NUM_TARGET_PORT_GROUPS;
10148         num_target_ports = 0;
10149         iid_len = 0;
10150         id_len = 0;
10151         mtx_lock(&softc->ctl_lock);
10152         STAILQ_FOREACH(port, &softc->port_list, links) {
10153                 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
10154                         continue;
10155                 if (lun != NULL &&
10156                     ctl_map_lun_back(port->targ_port, lun->lun) >=
10157                     CTL_MAX_LUNS)
10158                         continue;
10159                 num_target_ports++;
10160                 if (port->init_devid)
10161                         iid_len += port->init_devid->len;
10162                 if (port->port_devid)
10163                         id_len += port->port_devid->len;
10164         }
10165         mtx_unlock(&softc->ctl_lock);
10166
10167         data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_port_groups *
10168             num_target_ports * (sizeof(struct scsi_vpd_port_designation) +
10169              sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len;
10170         ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10171         sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr;
10172         ctsio->kern_sg_entries = 0;
10173
10174         if (data_len < alloc_len) {
10175                 ctsio->residual = alloc_len - data_len;
10176                 ctsio->kern_data_len = data_len;
10177                 ctsio->kern_total_len = data_len;
10178         } else {
10179                 ctsio->residual = 0;
10180                 ctsio->kern_data_len = alloc_len;
10181                 ctsio->kern_total_len = alloc_len;
10182         }
10183         ctsio->kern_data_resid = 0;
10184         ctsio->kern_rel_offset = 0;
10185         ctsio->kern_sg_entries = 0;
10186
10187         /*
10188          * The control device is always connected.  The disk device, on the
10189          * other hand, may not be online all the time.  Need to change this
10190          * to figure out whether the disk device is actually online or not.
10191          */
10192         if (lun != NULL)
10193                 sp->device = (SID_QUAL_LU_CONNECTED << 5) |
10194                                   lun->be_lun->lun_type;
10195         else
10196                 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
10197
10198         sp->page_code = SVPD_SCSI_PORTS;
10199         scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports),
10200             sp->page_length);
10201         pd = &sp->design[0];
10202
10203         mtx_lock(&softc->ctl_lock);
10204         if (softc->flags & CTL_FLAG_MASTER_SHELF)
10205                 pg = 0;
10206         else
10207                 pg = 1;
10208         for (g = 0; g < num_target_port_groups; g++) {
10209                 STAILQ_FOREACH(port, &softc->port_list, links) {
10210                         if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
10211                                 continue;
10212                         if (lun != NULL &&
10213                             ctl_map_lun_back(port->targ_port, lun->lun) >=
10214                             CTL_MAX_LUNS)
10215                                 continue;
10216                         p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS;
10217                         scsi_ulto2b(p, pd->relative_port_id);
10218                         if (port->init_devid && g == pg) {
10219                                 iid_len = port->init_devid->len;
10220                                 memcpy(pd->initiator_transportid,
10221                                     port->init_devid->data, port->init_devid->len);
10222                         } else
10223                                 iid_len = 0;
10224                         scsi_ulto2b(iid_len, pd->initiator_transportid_length);
10225                         pdc = (struct scsi_vpd_port_designation_cont *)
10226                             (&pd->initiator_transportid[iid_len]);
10227                         if (port->port_devid && g == pg) {
10228                                 id_len = port->port_devid->len;
10229                                 memcpy(pdc->target_port_descriptors,
10230                                     port->port_devid->data, port->port_devid->len);
10231                         } else
10232                                 id_len = 0;
10233                         scsi_ulto2b(id_len, pdc->target_port_descriptors_length);
10234                         pd = (struct scsi_vpd_port_designation *)
10235                             ((uint8_t *)pdc->target_port_descriptors + id_len);
10236                 }
10237         }
10238         mtx_unlock(&softc->ctl_lock);
10239
10240         ctsio->scsi_status = SCSI_STATUS_OK;
10241         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10242         ctsio->be_move_done = ctl_config_move_done;
10243         ctl_datamove((union ctl_io *)ctsio);
10244
10245         return (CTL_RETVAL_COMPLETE);
10246 }
10247
10248 static int
10249 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len)
10250 {
10251         struct scsi_vpd_block_limits *bl_ptr;
10252         struct ctl_lun *lun;
10253         int bs;
10254
10255         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10256
10257         ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO);
10258         bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr;
10259         ctsio->kern_sg_entries = 0;
10260
10261         if (sizeof(*bl_ptr) < alloc_len) {
10262                 ctsio->residual = alloc_len - sizeof(*bl_ptr);
10263                 ctsio->kern_data_len = sizeof(*bl_ptr);
10264                 ctsio->kern_total_len = sizeof(*bl_ptr);
10265         } else {
10266                 ctsio->residual = 0;
10267                 ctsio->kern_data_len = alloc_len;
10268                 ctsio->kern_total_len = alloc_len;
10269         }
10270         ctsio->kern_data_resid = 0;
10271         ctsio->kern_rel_offset = 0;
10272         ctsio->kern_sg_entries = 0;
10273
10274         /*
10275          * The control device is always connected.  The disk device, on the
10276          * other hand, may not be online all the time.  Need to change this
10277          * to figure out whether the disk device is actually online or not.
10278          */
10279         if (lun != NULL)
10280                 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
10281                                   lun->be_lun->lun_type;
10282         else
10283                 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
10284
10285         bl_ptr->page_code = SVPD_BLOCK_LIMITS;
10286         scsi_ulto2b(sizeof(*bl_ptr), bl_ptr->page_length);
10287         bl_ptr->max_cmp_write_len = 0xff;
10288         scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len);
10289         if (lun != NULL) {
10290                 bs = lun->be_lun->blocksize;
10291                 scsi_ulto4b(MAXPHYS / bs, bl_ptr->opt_txfer_len);
10292                 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
10293                         scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt);
10294                         scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt);
10295                         if (lun->be_lun->pblockexp != 0) {
10296                                 scsi_ulto4b((1 << lun->be_lun->pblockexp),
10297                                     bl_ptr->opt_unmap_grain);
10298                                 scsi_ulto4b(0x80000000 | lun->be_lun->pblockoff,
10299                                     bl_ptr->unmap_grain_align);
10300                         }
10301                 }
10302         }
10303         scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length);
10304
10305         ctsio->scsi_status = SCSI_STATUS_OK;
10306         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10307         ctsio->be_move_done = ctl_config_move_done;
10308         ctl_datamove((union ctl_io *)ctsio);
10309
10310         return (CTL_RETVAL_COMPLETE);
10311 }
10312
10313 static int
10314 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len)
10315 {
10316         struct scsi_vpd_block_device_characteristics *bdc_ptr;
10317         struct ctl_lun *lun;
10318
10319         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10320
10321         ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO);
10322         bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr;
10323         ctsio->kern_sg_entries = 0;
10324
10325         if (sizeof(*bdc_ptr) < alloc_len) {
10326                 ctsio->residual = alloc_len - sizeof(*bdc_ptr);
10327                 ctsio->kern_data_len = sizeof(*bdc_ptr);
10328                 ctsio->kern_total_len = sizeof(*bdc_ptr);
10329         } else {
10330                 ctsio->residual = 0;
10331                 ctsio->kern_data_len = alloc_len;
10332                 ctsio->kern_total_len = alloc_len;
10333         }
10334         ctsio->kern_data_resid = 0;
10335         ctsio->kern_rel_offset = 0;
10336         ctsio->kern_sg_entries = 0;
10337
10338         /*
10339          * The control device is always connected.  The disk device, on the
10340          * other hand, may not be online all the time.  Need to change this
10341          * to figure out whether the disk device is actually online or not.
10342          */
10343         if (lun != NULL)
10344                 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
10345                                   lun->be_lun->lun_type;
10346         else
10347                 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
10348         bdc_ptr->page_code = SVPD_BDC;
10349         scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length);
10350         scsi_ulto2b(SVPD_NON_ROTATING, bdc_ptr->medium_rotation_rate);
10351         bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS;
10352
10353         ctsio->scsi_status = SCSI_STATUS_OK;
10354         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10355         ctsio->be_move_done = ctl_config_move_done;
10356         ctl_datamove((union ctl_io *)ctsio);
10357
10358         return (CTL_RETVAL_COMPLETE);
10359 }
10360
10361 static int
10362 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len)
10363 {
10364         struct scsi_vpd_logical_block_prov *lbp_ptr;
10365         struct ctl_lun *lun;
10366
10367         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10368
10369         ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO);
10370         lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr;
10371         ctsio->kern_sg_entries = 0;
10372
10373         if (sizeof(*lbp_ptr) < alloc_len) {
10374                 ctsio->residual = alloc_len - sizeof(*lbp_ptr);
10375                 ctsio->kern_data_len = sizeof(*lbp_ptr);
10376                 ctsio->kern_total_len = sizeof(*lbp_ptr);
10377         } else {
10378                 ctsio->residual = 0;
10379                 ctsio->kern_data_len = alloc_len;
10380                 ctsio->kern_total_len = alloc_len;
10381         }
10382         ctsio->kern_data_resid = 0;
10383         ctsio->kern_rel_offset = 0;
10384         ctsio->kern_sg_entries = 0;
10385
10386         /*
10387          * The control device is always connected.  The disk device, on the
10388          * other hand, may not be online all the time.  Need to change this
10389          * to figure out whether the disk device is actually online or not.
10390          */
10391         if (lun != NULL)
10392                 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
10393                                   lun->be_lun->lun_type;
10394         else
10395                 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
10396
10397         lbp_ptr->page_code = SVPD_LBP;
10398         scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length);
10399         if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
10400                 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 |
10401                     SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP;
10402                 lbp_ptr->prov_type = SVPD_LBP_RESOURCE;
10403         }
10404
10405         ctsio->scsi_status = SCSI_STATUS_OK;
10406         ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10407         ctsio->be_move_done = ctl_config_move_done;
10408         ctl_datamove((union ctl_io *)ctsio);
10409
10410         return (CTL_RETVAL_COMPLETE);
10411 }
10412
10413 static int
10414 ctl_inquiry_evpd(struct ctl_scsiio *ctsio)
10415 {
10416         struct scsi_inquiry *cdb;
10417         struct ctl_lun *lun;
10418         int alloc_len, retval;
10419
10420         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10421         cdb = (struct scsi_inquiry *)ctsio->cdb;
10422
10423         retval = CTL_RETVAL_COMPLETE;
10424
10425         alloc_len = scsi_2btoul(cdb->length);
10426
10427         switch (cdb->page_code) {
10428         case SVPD_SUPPORTED_PAGES:
10429                 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len);
10430                 break;
10431         case SVPD_UNIT_SERIAL_NUMBER:
10432                 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len);
10433                 break;
10434         case SVPD_DEVICE_ID:
10435                 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len);
10436                 break;
10437         case SVPD_EXTENDED_INQUIRY_DATA:
10438                 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len);
10439                 break;
10440         case SVPD_MODE_PAGE_POLICY:
10441                 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len);
10442                 break;
10443         case SVPD_SCSI_PORTS:
10444                 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len);
10445                 break;
10446         case SVPD_SCSI_TPC:
10447                 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len);
10448                 break;
10449         case SVPD_BLOCK_LIMITS:
10450                 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len);
10451                 break;
10452         case SVPD_BDC:
10453                 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len);
10454                 break;
10455         case SVPD_LBP:
10456                 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len);
10457                 break;
10458         default:
10459                 ctl_set_invalid_field(ctsio,
10460                                       /*sks_valid*/ 1,
10461                                       /*command*/ 1,
10462                                       /*field*/ 2,
10463                                       /*bit_valid*/ 0,
10464                                       /*bit*/ 0);
10465                 ctl_done((union ctl_io *)ctsio);
10466                 retval = CTL_RETVAL_COMPLETE;
10467                 break;
10468         }
10469
10470         return (retval);
10471 }
10472
10473 static int
10474 ctl_inquiry_std(struct ctl_scsiio *ctsio)
10475 {
10476         struct scsi_inquiry_data *inq_ptr;
10477         struct scsi_inquiry *cdb;
10478         struct ctl_softc *ctl_softc;
10479         struct ctl_lun *lun;
10480         char *val;
10481         uint32_t alloc_len;
10482         ctl_port_type port_type;
10483
10484         ctl_softc = control_softc;
10485
10486         /*
10487          * Figure out whether we're talking to a Fibre Channel port or not.
10488          * We treat the ioctl front end, and any SCSI adapters, as packetized
10489          * SCSI front ends.
10490          */
10491         port_type = ctl_softc->ctl_ports[
10492             ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type;
10493         if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL)
10494                 port_type = CTL_PORT_SCSI;
10495
10496         lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10497         cdb = (struct scsi_inquiry *)ctsio->cdb;
10498         alloc_len = scsi_2btoul(cdb->length);
10499
10500         /*
10501          * We malloc the full inquiry data size here and fill it
10502          * in.  If the user only asks for less, we'll give him
10503          * that much.
10504          */
10505         ctsio->kern_data_ptr = malloc(sizeof(*inq_ptr), M_CTL, M_WAITOK | M_ZERO);
10506         inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr;
10507         ctsio->kern_sg_entries = 0;
10508         ctsio->kern_data_resid = 0;
10509         ctsio->kern_rel_offset = 0;
10510
10511         if (sizeof(*inq_ptr) < alloc_len) {
10512                 ctsio->residual = alloc_len - sizeof(*inq_ptr);
10513                 ctsio->kern_data_len = sizeof(*inq_ptr);
10514                 ctsio->kern_total_len = sizeof(*inq_ptr);
10515         } else {
10516                 ctsio->residual = 0;
10517                 ctsio->kern_data_len = alloc_len;
10518                 ctsio->kern_total_len = alloc_len;
10519         }
10520
10521         /*
10522          * If we have a LUN configured, report it as connected.  Otherwise,
10523          * report that it is offline or no device is supported, depending 
10524          * on the value of inquiry_pq_no_lun.
10525          *
10526          * According to the spec (SPC-4 r34), the peripheral qualifier
10527          * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario:
10528          *
10529          * "A peripheral device having the specified peripheral device type 
10530          * is not connected to this logical unit. However, the device
10531          * server is capable of supporting the specified peripheral device
10532          * type on this logical unit."
10533          *
10534          * According to the same spec, the peripheral qualifier
10535          * SID_QUAL_BAD_LU (011b) is used in this scenario:
10536          *
10537          * "The device server is not capable of supporting a peripheral
10538          * device on this logical unit. For this peripheral qualifier the
10539          * peripheral device type shall be set to 1Fh. All other peripheral
10540          * device type values are reserved for this peripheral qualifier."
10541          *
10542          * Given the text, it would seem that we probably want to report that
10543          * the LUN is offline here.  There is no LUN connected, but we can
10544          * support a LUN at the given LUN number.
10545          *
10546          * In the real world, though, it sounds like things are a little
10547          * different:
10548          *
10549          * - Linux, when presented with a LUN with the offline peripheral
10550          *   qualifier, will create an sg driver instance for it.  So when
10551          *   you attach it to CTL, you wind up with a ton of sg driver
10552          *   instances.  (One for every LUN that Linux bothered to probe.)
10553          *   Linux does this despite the fact that it issues a REPORT LUNs
10554          *   to LUN 0 to get the inventory of supported LUNs.
10555          *
10556          * - There is other anecdotal evidence (from Emulex folks) about
10557          *   arrays that use the offline peripheral qualifier for LUNs that
10558          *   are on the "passive" path in an active/passive array.
10559          *
10560          * So the solution is provide a hopefully reasonable default
10561          * (return bad/no LUN) and allow the user to change the behavior
10562          * with a tunable/sysctl variable.
10563          */
10564         if (lun != NULL)
10565                 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
10566                                   lun->be_lun->lun_type;
10567         else if (ctl_softc->inquiry_pq_no_lun == 0)
10568                 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
10569         else
10570                 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE;
10571
10572         /* RMB in byte 2 is 0 */
10573         inq_ptr->version = SCSI_REV_SPC4;
10574
10575         /*
10576          * According to SAM-3, even if a device only supports a single
10577          * level of LUN addressing, it should still set the HISUP bit:
10578          *
10579          * 4.9.1 Logical unit numbers overview
10580          *
10581          * All logical unit number formats described in this standard are
10582          * hierarchical in structure even when only a single level in that
10583          * hierarchy is used. The HISUP bit shall be set to one in the
10584          * standard INQUIRY data (see SPC-2) when any logical unit number
10585          * format described in this standard is used.  Non-hierarchical
10586          * formats are outside the scope of this standard.
10587          *
10588          * Therefore we set the HiSup bit here.
10589          *
10590          * The reponse format is 2, per SPC-3.
10591          */
10592         inq_ptr->response_format = SID_HiSup | 2;
10593
10594         inq_ptr->additional_length =
10595             offsetof(struct scsi_inquiry_data, vendor_specific1) -
10596             (offsetof(struct scsi_inquiry_data, additional_length) + 1);
10597         CTL_DEBUG_PRINT(("additional_length = %d\n",
10598                          inq_ptr->additional_length));
10599
10600         inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT;
10601         /* 16 bit addressing */
10602         if (port_type == CTL_PORT_SCSI)
10603                 inq_ptr->spc2_flags = SPC2_SID_ADDR16;
10604         /* XXX set the SID_MultiP bit here if we're actually going to
10605            respond on multiple ports */
10606         inq_ptr->spc2_flags |= SPC2_SID_MultiP;
10607
10608         /* 16 bit data bus, synchronous transfers */
10609         if (port_type == CTL_PORT_SCSI)
10610                 inq_ptr->flags = SID_WBus16 | SID_Sync;
10611         /*
10612          * XXX KDM do we want to support tagged queueing on the control
10613          * device at all?
10614          */
10615         if ((lun == NULL)
10616          || (lun->be_lun->lun_type != T_PROCESSOR))
10617                 inq_ptr->flags |= SID_CmdQue;
10618         /*
10619          * Per SPC-3, unused bytes in ASCII strings are filled with spaces.
10620          * We have 8 bytes for the vendor name, and 16 bytes for the device
10621          * name and 4 bytes for the revision.
10622          */
10623         if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options,
10624             "vendor")) == NULL) {
10625                 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor));
10626         } else {
10627                 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor));
10628                 strncpy(inq_ptr->vendor, val,
10629                     min(sizeof(inq_ptr->vendor), strlen(val)));
10630         }
10631         if (lun == NULL) {
10632                 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT,
10633                     sizeof(inq_ptr->product));
10634         } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) {
10635                 switch (lun->be_lun->lun_type) {
10636                 case T_DIRECT:
10637                         strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT,
10638                             sizeof(inq_ptr->product));
10639                         break;
10640                 case T_PROCESSOR:
10641                         strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT,
10642                             sizeof(inq_ptr->product));
10643                         break;
10644                 default:
10645                         strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT,
10646                             sizeof(inq_ptr->product));
10647                         break;
10648                 }
10649         } else {
10650                 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product));
10651                 strncpy(inq_ptr->product, val,
10652                     min(sizeof(inq_ptr->product), strlen(val)));
10653         }
10654
10655         /*
10656          * XXX make this a macro somewhere so it automatically gets
10657          * incremented when we make changes.
10658          */
10659         if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options,
10660             "revision")) == NULL) {
10661                 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision));
10662         } else {
10663                 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision));
10664                 strncpy(inq_ptr->revision, val,
10665                     min(sizeof(inq_ptr->revision), strlen(val)));
10666         }
10667
10668         /*
10669          * For parallel SCSI, we support double transition and single
10670          * transition clocking.  We also support QAS (Quick Arbitration
10671          * and Selection) and Information Unit transfers on both the
10672          * control and array devices.
10673          */
10674         if (port_type == CTL_PORT_SCSI)
10675                 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS |
10676                                     SID_SPI_IUS;
10677
10678         /* SAM-5 (no version claimed) */
10679         scsi_ulto2b(0x00A0, inq_ptr->version1);
10680         /* SPC-4 (no version claimed) */
10681         scsi_ulto2b(0x0460, inq_ptr->version2);
10682         if (port_type == CTL_PORT_FC) {
10683                 /* FCP-2 ANSI INCITS.350:2003 */
10684                 scsi_ulto2b(0x0917, inq_ptr->version3);
10685         } else if (port_type == CTL_PORT_SCSI) {
10686                 /* SPI-4 ANSI INCITS.362:200x */
10687                 scsi_ulto2b(0x0B56, inq_ptr->version3);
10688         } else if (port_type == CTL_PORT_ISCSI) {
10689                 /* iSCSI (no version claimed) */
10690                 scsi_ulto2b(0x0960, inq_ptr->version3);
10691         } else if (port_type == CTL_PORT_SAS) {
10692                 /* SAS (no version claimed) */
10693                 scsi_ulto2b(0x0BE0, inq_ptr->version3);
10694         }
10695
10696         if (lun == NULL) {
10697                 /* SBC-3 (no version claimed) */
10698                 scsi_ulto2b(0x04C0, inq_ptr->version4);
10699         } else {
10700                 switch (lun->be_lun->lun_type) {
10701                 case T_DIRECT:
10702                         /* SBC-3 (no version claimed) */
10703                         scsi_ulto2b(0x04C0, inq_ptr->version4);
10704                         break;
10705                 case T_PROCESSOR:
10706                 default:
10707                         break;
10708                 }
10709         }
10710
10711         ctsio->scsi_status = SCSI_STATUS_OK;
10712         if (ctsio->kern_data_len > 0) {
10713                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10714                 ctsio->be_move_done = ctl_config_move_done;
10715                 ctl_datamove((union ctl_io *)ctsio);
10716         } else {
10717                 ctsio->io_hdr.status = CTL_SUCCESS;
10718                 ctl_done((union ctl_io *)ctsio);
10719         }
10720
10721         return (CTL_RETVAL_COMPLETE);
10722 }
10723
10724 int
10725 ctl_inquiry(struct ctl_scsiio *ctsio)
10726 {
10727         struct scsi_inquiry *cdb;
10728         int retval;
10729
10730         cdb = (struct scsi_inquiry *)ctsio->cdb;
10731
10732         retval = 0;
10733
10734         CTL_DEBUG_PRINT(("ctl_inquiry\n"));
10735
10736         /*
10737          * Right now, we don't support the CmdDt inquiry information.
10738          * This would be nice to support in the future.  When we do
10739          * support it, we should change this test so that it checks to make
10740          * sure SI_EVPD and SI_CMDDT aren't both set at the same time.
10741          */
10742 #ifdef notyet
10743         if (((cdb->byte2 & SI_EVPD)
10744          && (cdb->byte2 & SI_CMDDT)))
10745 #endif
10746         if (cdb->byte2 & SI_CMDDT) {
10747                 /*
10748                  * Point to the SI_CMDDT bit.  We might change this
10749                  * when we support SI_CMDDT, but since both bits would be
10750                  * "wrong", this should probably just stay as-is then.
10751                  */
10752                 ctl_set_invalid_field(ctsio,
10753                                       /*sks_valid*/ 1,
10754                                       /*command*/ 1,
10755                                       /*field*/ 1,
10756                                       /*bit_valid*/ 1,
10757                                       /*bit*/ 1);
10758                 ctl_done((union ctl_io *)ctsio);
10759                 return (CTL_RETVAL_COMPLETE);
10760         }
10761         if (cdb->byte2 & SI_EVPD)
10762                 retval = ctl_inquiry_evpd(ctsio);
10763 #ifdef notyet
10764         else if (cdb->byte2 & SI_CMDDT)
10765                 retval = ctl_inquiry_cmddt(ctsio);
10766 #endif
10767         else
10768                 retval = ctl_inquiry_std(ctsio);
10769
10770         return (retval);
10771 }
10772
10773 /*
10774  * For known CDB types, parse the LBA and length.
10775  */
10776 static int
10777 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len)
10778 {
10779         if (io->io_hdr.io_type != CTL_IO_SCSI)
10780                 return (1);
10781
10782         switch (io->scsiio.cdb[0]) {
10783         case COMPARE_AND_WRITE: {
10784                 struct scsi_compare_and_write *cdb;
10785
10786                 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb;
10787
10788                 *lba = scsi_8btou64(cdb->addr);
10789                 *len = cdb->length;
10790                 break;
10791         }
10792         case READ_6:
10793         case WRITE_6: {
10794                 struct scsi_rw_6 *cdb;
10795
10796                 cdb = (struct scsi_rw_6 *)io->scsiio.cdb;
10797
10798                 *lba = scsi_3btoul(cdb->addr);
10799                 /* only 5 bits are valid in the most significant address byte */
10800                 *lba &= 0x1fffff;
10801                 *len = cdb->length;
10802                 break;
10803         }
10804         case READ_10:
10805         case WRITE_10: {
10806                 struct scsi_rw_10 *cdb;
10807
10808                 cdb = (struct scsi_rw_10 *)io->scsiio.cdb;
10809
10810                 *lba = scsi_4btoul(cdb->addr);
10811                 *len = scsi_2btoul(cdb->length);
10812                 break;
10813         }
10814         case WRITE_VERIFY_10: {
10815                 struct scsi_write_verify_10 *cdb;
10816
10817                 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb;
10818
10819                 *lba = scsi_4btoul(cdb->addr);
10820                 *len = scsi_2btoul(cdb->length);
10821                 break;
10822         }
10823         case READ_12:
10824         case WRITE_12: {
10825                 struct scsi_rw_12 *cdb;
10826
10827                 cdb = (struct scsi_rw_12 *)io->scsiio.cdb;
10828
10829                 *lba = scsi_4btoul(cdb->addr);
10830                 *len = scsi_4btoul(cdb->length);
10831                 break;
10832         }
10833         case WRITE_VERIFY_12: {
10834                 struct scsi_write_verify_12 *cdb;
10835
10836                 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb;
10837
10838                 *lba = scsi_4btoul(cdb->addr);
10839                 *len = scsi_4btoul(cdb->length);
10840                 break;
10841         }
10842         case READ_16:
10843         case WRITE_16: {
10844                 struct scsi_rw_16 *cdb;
10845
10846                 cdb = (struct scsi_rw_16 *)io->scsiio.cdb;
10847
10848                 *lba = scsi_8btou64(cdb->addr);
10849                 *len = scsi_4btoul(cdb->length);
10850                 break;
10851         }
10852         case WRITE_VERIFY_16: {
10853                 struct scsi_write_verify_16 *cdb;
10854
10855                 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb;
10856
10857                 
10858                 *lba = scsi_8btou64(cdb->addr);
10859                 *len = scsi_4btoul(cdb->length);
10860                 break;
10861         }
10862         case WRITE_SAME_10: {
10863                 struct scsi_write_same_10 *cdb;
10864
10865                 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb;
10866
10867                 *lba = scsi_4btoul(cdb->addr);
10868                 *len = scsi_2btoul(cdb->length);
10869                 break;
10870         }
10871         case WRITE_SAME_16: {
10872                 struct scsi_write_same_16 *cdb;
10873
10874                 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb;
10875
10876                 *lba = scsi_8btou64(cdb->addr);
10877                 *len = scsi_4btoul(cdb->length);
10878                 break;
10879         }
10880         case VERIFY_10: {
10881                 struct scsi_verify_10 *cdb;
10882
10883                 cdb = (struct scsi_verify_10 *)io->scsiio.cdb;
10884
10885                 *lba = scsi_4btoul(cdb->addr);
10886                 *len = scsi_2btoul(cdb->length);
10887                 break;
10888         }
10889         case VERIFY_12: {
10890                 struct scsi_verify_12 *cdb;
10891
10892                 cdb = (struct scsi_verify_12 *)io->scsiio.cdb;
10893
10894                 *lba = scsi_4btoul(cdb->addr);
10895                 *len = scsi_4btoul(cdb->length);
10896                 break;
10897         }
10898         case VERIFY_16: {
10899                 struct scsi_verify_16 *cdb;
10900
10901                 cdb = (struct scsi_verify_16 *)io->scsiio.cdb;
10902
10903                 *lba = scsi_8btou64(cdb->addr);
10904                 *len = scsi_4btoul(cdb->length);
10905                 break;
10906         }
10907         case UNMAP: {
10908                 *lba = 0;
10909                 *len = UINT64_MAX;
10910                 break;
10911         }
10912         default:
10913                 return (1);
10914                 break; /* NOTREACHED */
10915         }
10916
10917         return (0);
10918 }
10919
10920 static ctl_action
10921 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2)
10922 {
10923         uint64_t endlba1, endlba2;
10924
10925         endlba1 = lba1 + len1 - 1;
10926         endlba2 = lba2 + len2 - 1;
10927
10928         if ((endlba1 < lba2)
10929          || (endlba2 < lba1))
10930                 return (CTL_ACTION_PASS);
10931         else
10932                 return (CTL_ACTION_BLOCK);
10933 }
10934
10935 static int
10936 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2)
10937 {
10938         struct ctl_ptr_len_flags *ptrlen;
10939         struct scsi_unmap_desc *buf, *end, *range;
10940         uint64_t lba;
10941         uint32_t len;
10942
10943         /* If not UNMAP -- go other way. */
10944         if (io->io_hdr.io_type != CTL_IO_SCSI ||
10945             io->scsiio.cdb[0] != UNMAP)
10946                 return (CTL_ACTION_ERROR);
10947
10948         /* If UNMAP without data -- block and wait for data. */
10949         ptrlen = (struct ctl_ptr_len_flags *)
10950             &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
10951         if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 ||
10952             ptrlen->ptr == NULL)
10953                 return (CTL_ACTION_BLOCK);
10954
10955         /* UNMAP with data -- check for collision. */
10956         buf = (struct scsi_unmap_desc *)ptrlen->ptr;
10957         end = buf + ptrlen->len / sizeof(*buf);
10958         for (range = buf; range < end; range++) {
10959                 lba = scsi_8btou64(range->lba);
10960                 len = scsi_4btoul(range->length);
10961                 if ((lba < lba2 + len2) && (lba + len > lba2))
10962                         return (CTL_ACTION_BLOCK);
10963         }
10964         return (CTL_ACTION_PASS);
10965 }
10966
10967 static ctl_action
10968 ctl_extent_check(union ctl_io *io1, union ctl_io *io2)
10969 {
10970         uint64_t lba1, lba2;
10971         uint64_t len1, len2;
10972         int retval;
10973
10974         if (ctl_get_lba_len(io1, &lba1, &len1) != 0)
10975                 return (CTL_ACTION_ERROR);
10976
10977         retval = ctl_extent_check_unmap(io2, lba1, len1);
10978         if (retval != CTL_ACTION_ERROR)
10979                 return (retval);
10980
10981         if (ctl_get_lba_len(io2, &lba2, &len2) != 0)
10982                 return (CTL_ACTION_ERROR);
10983
10984         return (ctl_extent_check_lba(lba1, len1, lba2, len2));
10985 }
10986
10987 static ctl_action
10988 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io,
10989     union ctl_io *ooa_io)
10990 {
10991         const struct ctl_cmd_entry *pending_entry, *ooa_entry;
10992         ctl_serialize_action *serialize_row;
10993
10994         /*
10995          * The initiator attempted multiple untagged commands at the same
10996          * time.  Can't do that.
10997          */
10998         if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
10999          && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
11000          && ((pending_io->io_hdr.nexus.targ_port ==
11001               ooa_io->io_hdr.nexus.targ_port)
11002           && (pending_io->io_hdr.nexus.initid.id ==
11003               ooa_io->io_hdr.nexus.initid.id))
11004          && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0))
11005                 return (CTL_ACTION_OVERLAP);
11006
11007         /*
11008          * The initiator attempted to send multiple tagged commands with
11009          * the same ID.  (It's fine if different initiators have the same
11010          * tag ID.)
11011          *
11012          * Even if all of those conditions are true, we don't kill the I/O
11013          * if the command ahead of us has been aborted.  We won't end up
11014          * sending it to the FETD, and it's perfectly legal to resend a
11015          * command with the same tag number as long as the previous
11016          * instance of this tag number has been aborted somehow.
11017          */
11018         if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
11019          && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
11020          && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num)
11021          && ((pending_io->io_hdr.nexus.targ_port ==
11022               ooa_io->io_hdr.nexus.targ_port)
11023           && (pending_io->io_hdr.nexus.initid.id ==
11024               ooa_io->io_hdr.nexus.initid.id))
11025          && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0))
11026                 return (CTL_ACTION_OVERLAP_TAG);
11027
11028         /*
11029          * If we get a head of queue tag, SAM-3 says that we should
11030          * immediately execute it.
11031          *
11032          * What happens if this command would normally block for some other
11033          * reason?  e.g. a request sense with a head of queue tag
11034          * immediately after a write.  Normally that would block, but this
11035          * will result in its getting executed immediately...
11036          *
11037          * We currently return "pass" instead of "skip", so we'll end up
11038          * going through the rest of the queue to check for overlapped tags.
11039          *
11040          * XXX KDM check for other types of blockage first??
11041          */
11042         if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)
11043                 return (CTL_ACTION_PASS);
11044
11045         /*
11046          * Ordered tags have to block until all items ahead of them
11047          * have completed.  If we get called with an ordered tag, we always
11048          * block, if something else is ahead of us in the queue.
11049          */
11050         if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED)
11051                 return (CTL_ACTION_BLOCK);
11052
11053         /*
11054          * Simple tags get blocked until all head of queue and ordered tags
11055          * ahead of them have completed.  I'm lumping untagged commands in
11056          * with simple tags here.  XXX KDM is that the right thing to do?
11057          */
11058         if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
11059           || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE))
11060          && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)
11061           || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED)))
11062                 return (CTL_ACTION_BLOCK);
11063
11064         pending_entry = ctl_get_cmd_entry(&pending_io->scsiio);
11065         ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio);
11066
11067         serialize_row = ctl_serialize_table[ooa_entry->seridx];
11068
11069         switch (serialize_row[pending_entry->seridx]) {
11070         case CTL_SER_BLOCK:
11071                 return (CTL_ACTION_BLOCK);
11072         case CTL_SER_EXTENT:
11073                 return (ctl_extent_check(pending_io, ooa_io));
11074         case CTL_SER_EXTENTOPT:
11075                 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags
11076                     & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED)
11077                         return (ctl_extent_check(pending_io, ooa_io));
11078                 /* FALLTHROUGH */
11079         case CTL_SER_PASS:
11080                 return (CTL_ACTION_PASS);
11081         case CTL_SER_BLOCKOPT:
11082                 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags
11083                     & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED)
11084                         return (CTL_ACTION_BLOCK);
11085                 return (CTL_ACTION_PASS);
11086         case CTL_SER_SKIP:
11087                 return (CTL_ACTION_SKIP);
11088         default:
11089                 panic("invalid serialization value %d",
11090                       serialize_row[pending_entry->seridx]);
11091         }
11092
11093         return (CTL_ACTION_ERROR);
11094 }
11095
11096 /*
11097  * Check for blockage or overlaps against the OOA (Order Of Arrival) queue.
11098  * Assumptions:
11099  * - pending_io is generally either incoming, or on the blocked queue
11100  * - starting I/O is the I/O we want to start the check with.
11101  */
11102 static ctl_action
11103 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
11104               union ctl_io *starting_io)
11105 {
11106         union ctl_io *ooa_io;
11107         ctl_action action;
11108
11109         mtx_assert(&lun->lun_lock, MA_OWNED);
11110
11111         /*
11112          * Run back along the OOA queue, starting with the current
11113          * blocked I/O and going through every I/O before it on the
11114          * queue.  If starting_io is NULL, we'll just end up returning
11115          * CTL_ACTION_PASS.
11116          */
11117         for (ooa_io = starting_io; ooa_io != NULL;
11118              ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq,
11119              ooa_links)){
11120
11121                 /*
11122                  * This routine just checks to see whether
11123                  * cur_blocked is blocked by ooa_io, which is ahead
11124                  * of it in the queue.  It doesn't queue/dequeue
11125                  * cur_blocked.
11126                  */
11127                 action = ctl_check_for_blockage(lun, pending_io, ooa_io);
11128                 switch (action) {
11129                 case CTL_ACTION_BLOCK:
11130                 case CTL_ACTION_OVERLAP:
11131                 case CTL_ACTION_OVERLAP_TAG:
11132                 case CTL_ACTION_SKIP:
11133                 case CTL_ACTION_ERROR:
11134                         return (action);
11135                         break; /* NOTREACHED */
11136                 case CTL_ACTION_PASS:
11137                         break;
11138                 default:
11139                         panic("invalid action %d", action);
11140                         break;  /* NOTREACHED */
11141                 }
11142         }
11143
11144         return (CTL_ACTION_PASS);
11145 }
11146
11147 /*
11148  * Assumptions:
11149  * - An I/O has just completed, and has been removed from the per-LUN OOA
11150  *   queue, so some items on the blocked queue may now be unblocked.
11151  */
11152 static int
11153 ctl_check_blocked(struct ctl_lun *lun)
11154 {
11155         union ctl_io *cur_blocked, *next_blocked;
11156
11157         mtx_assert(&lun->lun_lock, MA_OWNED);
11158
11159         /*
11160          * Run forward from the head of the blocked queue, checking each
11161          * entry against the I/Os prior to it on the OOA queue to see if
11162          * there is still any blockage.
11163          *
11164          * We cannot use the TAILQ_FOREACH() macro, because it can't deal
11165          * with our removing a variable on it while it is traversing the
11166          * list.
11167          */
11168         for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue);
11169              cur_blocked != NULL; cur_blocked = next_blocked) {
11170                 union ctl_io *prev_ooa;
11171                 ctl_action action;
11172
11173                 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr,
11174                                                           blocked_links);
11175
11176                 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr,
11177                                                       ctl_ooaq, ooa_links);
11178
11179                 /*
11180                  * If cur_blocked happens to be the first item in the OOA
11181                  * queue now, prev_ooa will be NULL, and the action
11182                  * returned will just be CTL_ACTION_PASS.
11183                  */
11184                 action = ctl_check_ooa(lun, cur_blocked, prev_ooa);
11185
11186                 switch (action) {
11187                 case CTL_ACTION_BLOCK:
11188                         /* Nothing to do here, still blocked */
11189                         break;
11190                 case CTL_ACTION_OVERLAP:
11191                 case CTL_ACTION_OVERLAP_TAG:
11192                         /*
11193                          * This shouldn't happen!  In theory we've already
11194                          * checked this command for overlap...
11195                          */
11196                         break;
11197                 case CTL_ACTION_PASS:
11198                 case CTL_ACTION_SKIP: {
11199                         struct ctl_softc *softc;
11200                         const struct ctl_cmd_entry *entry;
11201                         uint32_t initidx;
11202                         int isc_retval;
11203
11204                         /*
11205                          * The skip case shouldn't happen, this transaction
11206                          * should have never made it onto the blocked queue.
11207                          */
11208                         /*
11209                          * This I/O is no longer blocked, we can remove it
11210                          * from the blocked queue.  Since this is a TAILQ
11211                          * (doubly linked list), we can do O(1) removals
11212                          * from any place on the list.
11213                          */
11214                         TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr,
11215                                      blocked_links);
11216                         cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
11217
11218                         if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){
11219                                 /*
11220                                  * Need to send IO back to original side to
11221                                  * run
11222                                  */
11223                                 union ctl_ha_msg msg_info;
11224
11225                                 msg_info.hdr.original_sc =
11226                                         cur_blocked->io_hdr.original_sc;
11227                                 msg_info.hdr.serializing_sc = cur_blocked;
11228                                 msg_info.hdr.msg_type = CTL_MSG_R2R;
11229                                 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
11230                                      &msg_info, sizeof(msg_info), 0)) >
11231                                      CTL_HA_STATUS_SUCCESS) {
11232                                         printf("CTL:Check Blocked error from "
11233                                                "ctl_ha_msg_send %d\n",
11234                                                isc_retval);
11235                                 }
11236                                 break;
11237                         }
11238                         entry = ctl_get_cmd_entry(&cur_blocked->scsiio);
11239                         softc = control_softc;
11240
11241                         initidx = ctl_get_initindex(&cur_blocked->io_hdr.nexus);
11242
11243                         /*
11244                          * Check this I/O for LUN state changes that may
11245                          * have happened while this command was blocked.
11246                          * The LUN state may have been changed by a command
11247                          * ahead of us in the queue, so we need to re-check
11248                          * for any states that can be caused by SCSI
11249                          * commands.
11250                          */
11251                         if (ctl_scsiio_lun_check(softc, lun, entry,
11252                                                  &cur_blocked->scsiio) == 0) {
11253                                 cur_blocked->io_hdr.flags |=
11254                                                       CTL_FLAG_IS_WAS_ON_RTR;
11255                                 ctl_enqueue_rtr(cur_blocked);
11256                         } else
11257                                 ctl_done(cur_blocked);
11258                         break;
11259                 }
11260                 default:
11261                         /*
11262                          * This probably shouldn't happen -- we shouldn't
11263                          * get CTL_ACTION_ERROR, or anything else.
11264                          */
11265                         break;
11266                 }
11267         }
11268
11269         return (CTL_RETVAL_COMPLETE);
11270 }
11271
11272 /*
11273  * This routine (with one exception) checks LUN flags that can be set by
11274  * commands ahead of us in the OOA queue.  These flags have to be checked
11275  * when a command initially comes in, and when we pull a command off the
11276  * blocked queue and are preparing to execute it.  The reason we have to
11277  * check these flags for commands on the blocked queue is that the LUN
11278  * state may have been changed by a command ahead of us while we're on the
11279  * blocked queue.
11280  *
11281  * Ordering is somewhat important with these checks, so please pay
11282  * careful attention to the placement of any new checks.
11283  */
11284 static int
11285 ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
11286     const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio)
11287 {
11288         int retval;
11289
11290         retval = 0;
11291
11292         mtx_assert(&lun->lun_lock, MA_OWNED);
11293
11294         /*
11295          * If this shelf is a secondary shelf controller, we have to reject
11296          * any media access commands.
11297          */
11298 #if 0
11299         /* No longer needed for HA */
11300         if (((ctl_softc->flags & CTL_FLAG_MASTER_SHELF) == 0)
11301          && ((entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0)) {
11302                 ctl_set_lun_standby(ctsio);
11303                 retval = 1;
11304                 goto bailout;
11305         }
11306 #endif
11307
11308         /*
11309          * Check for a reservation conflict.  If this command isn't allowed
11310          * even on reserved LUNs, and if this initiator isn't the one who
11311          * reserved us, reject the command with a reservation conflict.
11312          */
11313         if ((lun->flags & CTL_LUN_RESERVED)
11314          && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) {
11315                 if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id)
11316                  || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port)
11317                  || (ctsio->io_hdr.nexus.targ_target.id !=
11318                      lun->rsv_nexus.targ_target.id)) {
11319                         ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
11320                         ctsio->io_hdr.status = CTL_SCSI_ERROR;
11321                         retval = 1;
11322                         goto bailout;
11323                 }
11324         }
11325
11326         if ( (lun->flags & CTL_LUN_PR_RESERVED)
11327          && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV) == 0)) {
11328                 uint32_t residx;
11329
11330                 residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
11331                 /*
11332                  * if we aren't registered or it's a res holder type
11333                  * reservation and this isn't the res holder then set a
11334                  * conflict.
11335                  * NOTE: Commands which might be allowed on write exclusive
11336                  * type reservations are checked in the particular command
11337                  * for a conflict. Read and SSU are the only ones.
11338                  */
11339                 if (!lun->per_res[residx].registered
11340                  || (residx != lun->pr_res_idx && lun->res_type < 4)) {
11341                         ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
11342                         ctsio->io_hdr.status = CTL_SCSI_ERROR;
11343                         retval = 1;
11344                         goto bailout;
11345                 }
11346
11347         }
11348
11349         if ((lun->flags & CTL_LUN_OFFLINE)
11350          && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) {
11351                 ctl_set_lun_not_ready(ctsio);
11352                 retval = 1;
11353                 goto bailout;
11354         }
11355
11356         /*
11357          * If the LUN is stopped, see if this particular command is allowed
11358          * for a stopped lun.  Otherwise, reject it with 0x04,0x02.
11359          */
11360         if ((lun->flags & CTL_LUN_STOPPED)
11361          && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) {
11362                 /* "Logical unit not ready, initializing cmd. required" */
11363                 ctl_set_lun_stopped(ctsio);
11364                 retval = 1;
11365                 goto bailout;
11366         }
11367
11368         if ((lun->flags & CTL_LUN_INOPERABLE)
11369          && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) {
11370                 /* "Medium format corrupted" */
11371                 ctl_set_medium_format_corrupted(ctsio);
11372                 retval = 1;
11373                 goto bailout;
11374         }
11375
11376 bailout:
11377         return (retval);
11378
11379 }
11380
11381 static void
11382 ctl_failover_io(union ctl_io *io, int have_lock)
11383 {
11384         ctl_set_busy(&io->scsiio);
11385         ctl_done(io);
11386 }
11387
11388 static void
11389 ctl_failover(void)
11390 {
11391         struct ctl_lun *lun;
11392         struct ctl_softc *ctl_softc;
11393         union ctl_io *next_io, *pending_io;
11394         union ctl_io *io;
11395         int lun_idx;
11396         int i;
11397
11398         ctl_softc = control_softc;
11399
11400         mtx_lock(&ctl_softc->ctl_lock);
11401         /*
11402          * Remove any cmds from the other SC from the rtr queue.  These
11403          * will obviously only be for LUNs for which we're the primary.
11404          * We can't send status or get/send data for these commands.
11405          * Since they haven't been executed yet, we can just remove them.
11406          * We'll either abort them or delete them below, depending on
11407          * which HA mode we're in.
11408          */
11409 #ifdef notyet
11410         mtx_lock(&ctl_softc->queue_lock);
11411         for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->rtr_queue);
11412              io != NULL; io = next_io) {
11413                 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
11414                 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
11415                         STAILQ_REMOVE(&ctl_softc->rtr_queue, &io->io_hdr,
11416                                       ctl_io_hdr, links);
11417         }
11418         mtx_unlock(&ctl_softc->queue_lock);
11419 #endif
11420
11421         for (lun_idx=0; lun_idx < ctl_softc->num_luns; lun_idx++) {
11422                 lun = ctl_softc->ctl_luns[lun_idx];
11423                 if (lun==NULL)
11424                         continue;
11425
11426                 /*
11427                  * Processor LUNs are primary on both sides.
11428                  * XXX will this always be true?
11429                  */
11430                 if (lun->be_lun->lun_type == T_PROCESSOR)
11431                         continue;
11432
11433                 if ((lun->flags & CTL_LUN_PRIMARY_SC)
11434                  && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) {
11435                         printf("FAILOVER: primary lun %d\n", lun_idx);
11436                         /*
11437                          * Remove all commands from the other SC. First from the
11438                          * blocked queue then from the ooa queue. Once we have
11439                          * removed them. Call ctl_check_blocked to see if there
11440                          * is anything that can run.
11441                          */
11442                         for (io = (union ctl_io *)TAILQ_FIRST(
11443                              &lun->blocked_queue); io != NULL; io = next_io) {
11444
11445                                 next_io = (union ctl_io *)TAILQ_NEXT(
11446                                     &io->io_hdr, blocked_links);
11447
11448                                 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) {
11449                                         TAILQ_REMOVE(&lun->blocked_queue,
11450                                                      &io->io_hdr,blocked_links);
11451                                         io->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
11452                                         TAILQ_REMOVE(&lun->ooa_queue,
11453                                                      &io->io_hdr, ooa_links);
11454
11455                                         ctl_free_io(io);
11456                                 }
11457                         }
11458
11459                         for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
11460                              io != NULL; io = next_io) {
11461
11462                                 next_io = (union ctl_io *)TAILQ_NEXT(
11463                                     &io->io_hdr, ooa_links);
11464
11465                                 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) {
11466
11467                                         TAILQ_REMOVE(&lun->ooa_queue,
11468                                                 &io->io_hdr,
11469                                                 ooa_links);
11470
11471                                         ctl_free_io(io);
11472                                 }
11473                         }
11474                         ctl_check_blocked(lun);
11475                 } else if ((lun->flags & CTL_LUN_PRIMARY_SC)
11476                         && (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) {
11477
11478                         printf("FAILOVER: primary lun %d\n", lun_idx);
11479                         /*
11480                          * Abort all commands from the other SC.  We can't
11481                          * send status back for them now.  These should get
11482                          * cleaned up when they are completed or come out
11483                          * for a datamove operation.
11484                          */
11485                         for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
11486                              io != NULL; io = next_io) {
11487                                 next_io = (union ctl_io *)TAILQ_NEXT(
11488                                         &io->io_hdr, ooa_links);
11489
11490                                 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
11491                                         io->io_hdr.flags |= CTL_FLAG_ABORT;
11492                         }
11493                 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0)
11494                         && (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) {
11495
11496                         printf("FAILOVER: secondary lun %d\n", lun_idx);
11497
11498                         lun->flags |= CTL_LUN_PRIMARY_SC;
11499
11500                         /*
11501                          * We send all I/O that was sent to this controller
11502                          * and redirected to the other side back with
11503                          * busy status, and have the initiator retry it.
11504                          * Figuring out how much data has been transferred,
11505                          * etc. and picking up where we left off would be 
11506                          * very tricky.
11507                          *
11508                          * XXX KDM need to remove I/O from the blocked
11509                          * queue as well!
11510                          */
11511                         for (pending_io = (union ctl_io *)TAILQ_FIRST(
11512                              &lun->ooa_queue); pending_io != NULL;
11513                              pending_io = next_io) {
11514
11515                                 next_io =  (union ctl_io *)TAILQ_NEXT(
11516                                         &pending_io->io_hdr, ooa_links);
11517
11518                                 pending_io->io_hdr.flags &=
11519                                         ~CTL_FLAG_SENT_2OTHER_SC;
11520
11521                                 if (pending_io->io_hdr.flags &
11522                                     CTL_FLAG_IO_ACTIVE) {
11523                                         pending_io->io_hdr.flags |=
11524                                                 CTL_FLAG_FAILOVER;
11525                                 } else {
11526                                         ctl_set_busy(&pending_io->scsiio);
11527                                         ctl_done(pending_io);
11528                                 }
11529                         }
11530
11531                         /*
11532                          * Build Unit Attention
11533                          */
11534                         for (i = 0; i < CTL_MAX_INITIATORS; i++) {
11535                                 lun->pending_ua[i] |=
11536                                                      CTL_UA_ASYM_ACC_CHANGE;
11537                         }
11538                 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0)
11539                         && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) {
11540                         printf("FAILOVER: secondary lun %d\n", lun_idx);
11541                         /*
11542                          * if the first io on the OOA is not on the RtR queue
11543                          * add it.
11544                          */
11545                         lun->flags |= CTL_LUN_PRIMARY_SC;
11546
11547                         pending_io = (union ctl_io *)TAILQ_FIRST(
11548                             &lun->ooa_queue);
11549                         if (pending_io==NULL) {
11550                                 printf("Nothing on OOA queue\n");
11551                                 continue;
11552                         }
11553
11554                         pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
11555                         if ((pending_io->io_hdr.flags &
11556                              CTL_FLAG_IS_WAS_ON_RTR) == 0) {
11557                                 pending_io->io_hdr.flags |=
11558                                     CTL_FLAG_IS_WAS_ON_RTR;
11559                                 ctl_enqueue_rtr(pending_io);
11560                         }
11561 #if 0
11562                         else
11563                         {
11564                                 printf("Tag 0x%04x is running\n",
11565                                       pending_io->scsiio.tag_num);
11566                         }
11567 #endif
11568
11569                         next_io = (union ctl_io *)TAILQ_NEXT(
11570                             &pending_io->io_hdr, ooa_links);
11571                         for (pending_io=next_io; pending_io != NULL;
11572                              pending_io = next_io) {
11573                                 pending_io->io_hdr.flags &=
11574                                     ~CTL_FLAG_SENT_2OTHER_SC;
11575                                 next_io = (union ctl_io *)TAILQ_NEXT(
11576                                         &pending_io->io_hdr, ooa_links);
11577                                 if (pending_io->io_hdr.flags &
11578                                     CTL_FLAG_IS_WAS_ON_RTR) {
11579 #if 0
11580                                         printf("Tag 0x%04x is running\n",
11581                                                 pending_io->scsiio.tag_num);
11582 #endif
11583                                         continue;
11584                                 }
11585
11586                                 switch (ctl_check_ooa(lun, pending_io,
11587                                     (union ctl_io *)TAILQ_PREV(
11588                                     &pending_io->io_hdr, ctl_ooaq,
11589                                     ooa_links))) {
11590
11591                                 case CTL_ACTION_BLOCK:
11592                                         TAILQ_INSERT_TAIL(&lun->blocked_queue,
11593                                                           &pending_io->io_hdr,
11594                                                           blocked_links);
11595                                         pending_io->io_hdr.flags |=
11596                                             CTL_FLAG_BLOCKED;
11597                                         break;
11598                                 case CTL_ACTION_PASS:
11599                                 case CTL_ACTION_SKIP:
11600                                         pending_io->io_hdr.flags |=
11601                                             CTL_FLAG_IS_WAS_ON_RTR;
11602                                         ctl_enqueue_rtr(pending_io);
11603                                         break;
11604                                 case CTL_ACTION_OVERLAP:
11605                                         ctl_set_overlapped_cmd(
11606                                             (struct ctl_scsiio *)pending_io);
11607                                         ctl_done(pending_io);
11608                                         break;
11609                                 case CTL_ACTION_OVERLAP_TAG:
11610                                         ctl_set_overlapped_tag(
11611                                             (struct ctl_scsiio *)pending_io,
11612                                             pending_io->scsiio.tag_num & 0xff);
11613                                         ctl_done(pending_io);
11614                                         break;
11615                                 case CTL_ACTION_ERROR:
11616                                 default:
11617                                         ctl_set_internal_failure(
11618                                                 (struct ctl_scsiio *)pending_io,
11619                                                 0,  // sks_valid
11620                                                 0); //retry count
11621                                         ctl_done(pending_io);
11622                                         break;
11623                                 }
11624                         }
11625
11626                         /*
11627                          * Build Unit Attention
11628                          */
11629                         for (i = 0; i < CTL_MAX_INITIATORS; i++) {
11630                                 lun->pending_ua[i] |=
11631                                                      CTL_UA_ASYM_ACC_CHANGE;
11632                         }
11633                 } else {
11634                         panic("Unhandled HA mode failover, LUN flags = %#x, "
11635                               "ha_mode = #%x", lun->flags, ctl_softc->ha_mode);
11636                 }
11637         }
11638         ctl_pause_rtr = 0;
11639         mtx_unlock(&ctl_softc->ctl_lock);
11640 }
11641
11642 static int
11643 ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
11644 {
11645         struct ctl_lun *lun;
11646         const struct ctl_cmd_entry *entry;
11647         uint32_t initidx, targ_lun;
11648         int retval;
11649
11650         retval = 0;
11651
11652         lun = NULL;
11653
11654         targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
11655         if ((targ_lun < CTL_MAX_LUNS)
11656          && (ctl_softc->ctl_luns[targ_lun] != NULL)) {
11657                 lun = ctl_softc->ctl_luns[targ_lun];
11658                 /*
11659                  * If the LUN is invalid, pretend that it doesn't exist.
11660                  * It will go away as soon as all pending I/O has been
11661                  * completed.
11662                  */
11663                 if (lun->flags & CTL_LUN_DISABLED) {
11664                         lun = NULL;
11665                 } else {
11666                         ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun;
11667                         ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr =
11668                                 lun->be_lun;
11669                         if (lun->be_lun->lun_type == T_PROCESSOR) {
11670                                 ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV;
11671                         }
11672
11673                         /*
11674                          * Every I/O goes into the OOA queue for a
11675                          * particular LUN, and stays there until completion.
11676                          */
11677                         mtx_lock(&lun->lun_lock);
11678                         TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr,
11679                             ooa_links);
11680                 }
11681         } else {
11682                 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL;
11683                 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL;
11684         }
11685
11686         /* Get command entry and return error if it is unsuppotyed. */
11687         entry = ctl_validate_command(ctsio);
11688         if (entry == NULL) {
11689                 if (lun)
11690                         mtx_unlock(&lun->lun_lock);
11691                 return (retval);
11692         }
11693
11694         ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
11695         ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK;
11696
11697         /*
11698          * Check to see whether we can send this command to LUNs that don't
11699          * exist.  This should pretty much only be the case for inquiry
11700          * and request sense.  Further checks, below, really require having
11701          * a LUN, so we can't really check the command anymore.  Just put
11702          * it on the rtr queue.
11703          */
11704         if (lun == NULL) {
11705                 if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) {
11706                         ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
11707                         ctl_enqueue_rtr((union ctl_io *)ctsio);
11708                         return (retval);
11709                 }
11710
11711                 ctl_set_unsupported_lun(ctsio);
11712                 ctl_done((union ctl_io *)ctsio);
11713                 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n"));
11714                 return (retval);
11715         } else {
11716                 /*
11717                  * Make sure we support this particular command on this LUN.
11718                  * e.g., we don't support writes to the control LUN.
11719                  */
11720                 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) {
11721                         mtx_unlock(&lun->lun_lock);
11722                         ctl_set_invalid_opcode(ctsio);
11723                         ctl_done((union ctl_io *)ctsio);
11724                         return (retval);
11725                 }
11726         }
11727
11728         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
11729
11730 #ifdef CTL_WITH_CA
11731         /*
11732          * If we've got a request sense, it'll clear the contingent
11733          * allegiance condition.  Otherwise, if we have a CA condition for
11734          * this initiator, clear it, because it sent down a command other
11735          * than request sense.
11736          */
11737         if ((ctsio->cdb[0] != REQUEST_SENSE)
11738          && (ctl_is_set(lun->have_ca, initidx)))
11739                 ctl_clear_mask(lun->have_ca, initidx);
11740 #endif
11741
11742         /*
11743          * If the command has this flag set, it handles its own unit
11744          * attention reporting, we shouldn't do anything.  Otherwise we
11745          * check for any pending unit attentions, and send them back to the
11746          * initiator.  We only do this when a command initially comes in,
11747          * not when we pull it off the blocked queue.
11748          *
11749          * According to SAM-3, section 5.3.2, the order that things get
11750          * presented back to the host is basically unit attentions caused
11751          * by some sort of reset event, busy status, reservation conflicts
11752          * or task set full, and finally any other status.
11753          *
11754          * One issue here is that some of the unit attentions we report
11755          * don't fall into the "reset" category (e.g. "reported luns data
11756          * has changed").  So reporting it here, before the reservation
11757          * check, may be technically wrong.  I guess the only thing to do
11758          * would be to check for and report the reset events here, and then
11759          * check for the other unit attention types after we check for a
11760          * reservation conflict.
11761          *
11762          * XXX KDM need to fix this
11763          */
11764         if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) {
11765                 ctl_ua_type ua_type;
11766
11767                 if (lun->pending_ua[initidx] != CTL_UA_NONE) {
11768                         scsi_sense_data_type sense_format;
11769
11770                         if (lun != NULL)
11771                                 sense_format = (lun->flags &
11772                                     CTL_LUN_SENSE_DESC) ? SSD_TYPE_DESC :
11773                                     SSD_TYPE_FIXED;
11774                         else
11775                                 sense_format = SSD_TYPE_FIXED;
11776
11777                         ua_type = ctl_build_ua(&lun->pending_ua[initidx],
11778                             &ctsio->sense_data, sense_format);
11779                         if (ua_type != CTL_UA_NONE) {
11780                                 ctsio->scsi_status = SCSI_STATUS_CHECK_COND;
11781                                 ctsio->io_hdr.status = CTL_SCSI_ERROR |
11782                                                        CTL_AUTOSENSE;
11783                                 ctsio->sense_len = SSD_FULL_SIZE;
11784                                 mtx_unlock(&lun->lun_lock);
11785                                 ctl_done((union ctl_io *)ctsio);
11786                                 return (retval);
11787                         }
11788                 }
11789         }
11790
11791
11792         if (ctl_scsiio_lun_check(ctl_softc, lun, entry, ctsio) != 0) {
11793                 mtx_unlock(&lun->lun_lock);
11794                 ctl_done((union ctl_io *)ctsio);
11795                 return (retval);
11796         }
11797
11798         /*
11799          * XXX CHD this is where we want to send IO to other side if
11800          * this LUN is secondary on this SC. We will need to make a copy
11801          * of the IO and flag the IO on this side as SENT_2OTHER and the flag
11802          * the copy we send as FROM_OTHER.
11803          * We also need to stuff the address of the original IO so we can
11804          * find it easily. Something similar will need be done on the other
11805          * side so when we are done we can find the copy.
11806          */
11807         if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) {
11808                 union ctl_ha_msg msg_info;
11809                 int isc_retval;
11810
11811                 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
11812
11813                 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE;
11814                 msg_info.hdr.original_sc = (union ctl_io *)ctsio;
11815 #if 0
11816                 printf("1. ctsio %p\n", ctsio);
11817 #endif
11818                 msg_info.hdr.serializing_sc = NULL;
11819                 msg_info.hdr.nexus = ctsio->io_hdr.nexus;
11820                 msg_info.scsi.tag_num = ctsio->tag_num;
11821                 msg_info.scsi.tag_type = ctsio->tag_type;
11822                 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN);
11823
11824                 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
11825
11826                 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
11827                     (void *)&msg_info, sizeof(msg_info), 0)) >
11828                     CTL_HA_STATUS_SUCCESS) {
11829                         printf("CTL:precheck, ctl_ha_msg_send returned %d\n",
11830                                isc_retval);
11831                         printf("CTL:opcode is %x\n", ctsio->cdb[0]);
11832                 } else {
11833 #if 0
11834                         printf("CTL:Precheck sent msg, opcode is %x\n",opcode);
11835 #endif
11836                 }
11837
11838                 /*
11839                  * XXX KDM this I/O is off the incoming queue, but hasn't
11840                  * been inserted on any other queue.  We may need to come
11841                  * up with a holding queue while we wait for serialization
11842                  * so that we have an idea of what we're waiting for from
11843                  * the other side.
11844                  */
11845                 mtx_unlock(&lun->lun_lock);
11846                 return (retval);
11847         }
11848
11849         switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
11850                               (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr,
11851                               ctl_ooaq, ooa_links))) {
11852         case CTL_ACTION_BLOCK:
11853                 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
11854                 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
11855                                   blocked_links);
11856                 mtx_unlock(&lun->lun_lock);
11857                 return (retval);
11858         case CTL_ACTION_PASS:
11859         case CTL_ACTION_SKIP:
11860                 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
11861                 mtx_unlock(&lun->lun_lock);
11862                 ctl_enqueue_rtr((union ctl_io *)ctsio);
11863                 break;
11864         case CTL_ACTION_OVERLAP:
11865                 mtx_unlock(&lun->lun_lock);
11866                 ctl_set_overlapped_cmd(ctsio);
11867                 ctl_done((union ctl_io *)ctsio);
11868                 break;
11869         case CTL_ACTION_OVERLAP_TAG:
11870                 mtx_unlock(&lun->lun_lock);
11871                 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff);
11872                 ctl_done((union ctl_io *)ctsio);
11873                 break;
11874         case CTL_ACTION_ERROR:
11875         default:
11876                 mtx_unlock(&lun->lun_lock);
11877                 ctl_set_internal_failure(ctsio,
11878                                          /*sks_valid*/ 0,
11879                                          /*retry_count*/ 0);
11880                 ctl_done((union ctl_io *)ctsio);
11881                 break;
11882         }
11883         return (retval);
11884 }
11885
11886 const struct ctl_cmd_entry *
11887 ctl_get_cmd_entry(struct ctl_scsiio *ctsio)
11888 {
11889         const struct ctl_cmd_entry *entry;
11890         int service_action;
11891
11892         entry = &ctl_cmd_table[ctsio->cdb[0]];
11893         if (entry->flags & CTL_CMD_FLAG_SA5) {
11894                 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK;
11895                 entry = &((const struct ctl_cmd_entry *)
11896                     entry->execute)[service_action];
11897         }
11898         return (entry);
11899 }
11900
11901 const struct ctl_cmd_entry *
11902 ctl_validate_command(struct ctl_scsiio *ctsio)
11903 {
11904         const struct ctl_cmd_entry *entry;
11905         int i;
11906         uint8_t diff;
11907
11908         entry = ctl_get_cmd_entry(ctsio);
11909         if (entry->execute == NULL) {
11910                 ctl_set_invalid_opcode(ctsio);
11911                 ctl_done((union ctl_io *)ctsio);
11912                 return (NULL);
11913         }
11914         KASSERT(entry->length > 0,
11915             ("Not defined length for command 0x%02x/0x%02x",
11916              ctsio->cdb[0], ctsio->cdb[1]));
11917         for (i = 1; i < entry->length; i++) {
11918                 diff = ctsio->cdb[i] & ~entry->usage[i - 1];
11919                 if (diff == 0)
11920                         continue;
11921                 ctl_set_invalid_field(ctsio,
11922                                       /*sks_valid*/ 1,
11923                                       /*command*/ 1,
11924                                       /*field*/ i,
11925                                       /*bit_valid*/ 1,
11926                                       /*bit*/ fls(diff) - 1);
11927                 ctl_done((union ctl_io *)ctsio);
11928                 return (NULL);
11929         }
11930         return (entry);
11931 }
11932
11933 static int
11934 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry)
11935 {
11936
11937         switch (lun_type) {
11938         case T_PROCESSOR:
11939                 if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) &&
11940                     ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0))
11941                         return (0);
11942                 break;
11943         case T_DIRECT:
11944                 if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) &&
11945                     ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0))
11946                         return (0);
11947                 break;
11948         default:
11949                 return (0);
11950         }
11951         return (1);
11952 }
11953
11954 static int
11955 ctl_scsiio(struct ctl_scsiio *ctsio)
11956 {
11957         int retval;
11958         const struct ctl_cmd_entry *entry;
11959
11960         retval = CTL_RETVAL_COMPLETE;
11961
11962         CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0]));
11963
11964         entry = ctl_get_cmd_entry(ctsio);
11965
11966         /*
11967          * If this I/O has been aborted, just send it straight to
11968          * ctl_done() without executing it.
11969          */
11970         if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) {
11971                 ctl_done((union ctl_io *)ctsio);
11972                 goto bailout;
11973         }
11974
11975         /*
11976          * All the checks should have been handled by ctl_scsiio_precheck().
11977          * We should be clear now to just execute the I/O.
11978          */
11979         retval = entry->execute(ctsio);
11980
11981 bailout:
11982         return (retval);
11983 }
11984
11985 /*
11986  * Since we only implement one target right now, a bus reset simply resets
11987  * our single target.
11988  */
11989 static int
11990 ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io)
11991 {
11992         return(ctl_target_reset(ctl_softc, io, CTL_UA_BUS_RESET));
11993 }
11994
11995 static int
11996 ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
11997                  ctl_ua_type ua_type)
11998 {
11999         struct ctl_lun *lun;
12000         int retval;
12001
12002         if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
12003                 union ctl_ha_msg msg_info;
12004
12005                 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
12006                 msg_info.hdr.nexus = io->io_hdr.nexus;
12007                 if (ua_type==CTL_UA_TARG_RESET)
12008                         msg_info.task.task_action = CTL_TASK_TARGET_RESET;
12009                 else
12010                         msg_info.task.task_action = CTL_TASK_BUS_RESET;
12011                 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
12012                 msg_info.hdr.original_sc = NULL;
12013                 msg_info.hdr.serializing_sc = NULL;
12014                 if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL,
12015                     (void *)&msg_info, sizeof(msg_info), 0)) {
12016                 }
12017         }
12018         retval = 0;
12019
12020         mtx_lock(&ctl_softc->ctl_lock);
12021         STAILQ_FOREACH(lun, &ctl_softc->lun_list, links)
12022                 retval += ctl_lun_reset(lun, io, ua_type);
12023         mtx_unlock(&ctl_softc->ctl_lock);
12024
12025         return (retval);
12026 }
12027
12028 /*
12029  * The LUN should always be set.  The I/O is optional, and is used to
12030  * distinguish between I/Os sent by this initiator, and by other
12031  * initiators.  We set unit attention for initiators other than this one.
12032  * SAM-3 is vague on this point.  It does say that a unit attention should
12033  * be established for other initiators when a LUN is reset (see section
12034  * 5.7.3), but it doesn't specifically say that the unit attention should
12035  * be established for this particular initiator when a LUN is reset.  Here
12036  * is the relevant text, from SAM-3 rev 8:
12037  *
12038  * 5.7.2 When a SCSI initiator port aborts its own tasks
12039  *
12040  * When a SCSI initiator port causes its own task(s) to be aborted, no
12041  * notification that the task(s) have been aborted shall be returned to
12042  * the SCSI initiator port other than the completion response for the
12043  * command or task management function action that caused the task(s) to
12044  * be aborted and notification(s) associated with related effects of the
12045  * action (e.g., a reset unit attention condition).
12046  *
12047  * XXX KDM for now, we're setting unit attention for all initiators.
12048  */
12049 static int
12050 ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
12051 {
12052         union ctl_io *xio;
12053 #if 0
12054         uint32_t initindex;
12055 #endif
12056         int i;
12057
12058         mtx_lock(&lun->lun_lock);
12059         /*
12060          * Run through the OOA queue and abort each I/O.
12061          */
12062 #if 0
12063         TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) {
12064 #endif
12065         for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
12066              xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
12067                 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS;
12068         }
12069
12070         /*
12071          * This version sets unit attention for every
12072          */
12073 #if 0
12074         initindex = ctl_get_initindex(&io->io_hdr.nexus);
12075         for (i = 0; i < CTL_MAX_INITIATORS; i++) {
12076                 if (initindex == i)
12077                         continue;
12078                 lun->pending_ua[i] |= ua_type;
12079         }
12080 #endif
12081
12082         /*
12083          * A reset (any kind, really) clears reservations established with
12084          * RESERVE/RELEASE.  It does not clear reservations established
12085          * with PERSISTENT RESERVE OUT, but we don't support that at the
12086          * moment anyway.  See SPC-2, section 5.6.  SPC-3 doesn't address
12087          * reservations made with the RESERVE/RELEASE commands, because
12088          * those commands are obsolete in SPC-3.
12089          */
12090         lun->flags &= ~CTL_LUN_RESERVED;
12091
12092         for (i = 0; i < CTL_MAX_INITIATORS; i++) {
12093 #ifdef CTL_WITH_CA
12094                 ctl_clear_mask(lun->have_ca, i);
12095 #endif
12096                 lun->pending_ua[i] |= ua_type;
12097         }
12098         mtx_unlock(&lun->lun_lock);
12099
12100         return (0);
12101 }
12102
12103 static void
12104 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id,
12105     int other_sc)
12106 {
12107         union ctl_io *xio;
12108
12109         mtx_assert(&lun->lun_lock, MA_OWNED);
12110
12111         /*
12112          * Run through the OOA queue and attempt to find the given I/O.
12113          * The target port, initiator ID, tag type and tag number have to
12114          * match the values that we got from the initiator.  If we have an
12115          * untagged command to abort, simply abort the first untagged command
12116          * we come to.  We only allow one untagged command at a time of course.
12117          */
12118         for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
12119              xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
12120
12121                 if ((targ_port == UINT32_MAX ||
12122                      targ_port == xio->io_hdr.nexus.targ_port) &&
12123                     (init_id == UINT32_MAX ||
12124                      init_id == xio->io_hdr.nexus.initid.id)) {
12125                         if (targ_port != xio->io_hdr.nexus.targ_port ||
12126                             init_id != xio->io_hdr.nexus.initid.id)
12127                                 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS;
12128                         xio->io_hdr.flags |= CTL_FLAG_ABORT;
12129                         if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) {
12130                                 union ctl_ha_msg msg_info;
12131
12132                                 msg_info.hdr.nexus = xio->io_hdr.nexus;
12133                                 msg_info.task.task_action = CTL_TASK_ABORT_TASK;
12134                                 msg_info.task.tag_num = xio->scsiio.tag_num;
12135                                 msg_info.task.tag_type = xio->scsiio.tag_type;
12136                                 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
12137                                 msg_info.hdr.original_sc = NULL;
12138                                 msg_info.hdr.serializing_sc = NULL;
12139                                 ctl_ha_msg_send(CTL_HA_CHAN_CTL,
12140                                     (void *)&msg_info, sizeof(msg_info), 0);
12141                         }
12142                 }
12143         }
12144 }
12145
12146 static int
12147 ctl_abort_task_set(union ctl_io *io)
12148 {
12149         struct ctl_softc *softc = control_softc;
12150         struct ctl_lun *lun;
12151         uint32_t targ_lun;
12152
12153         /*
12154          * Look up the LUN.
12155          */
12156         targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12157         mtx_lock(&softc->ctl_lock);
12158         if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL))
12159                 lun = softc->ctl_luns[targ_lun];
12160         else {
12161                 mtx_unlock(&softc->ctl_lock);
12162                 return (1);
12163         }
12164
12165         mtx_lock(&lun->lun_lock);
12166         mtx_unlock(&softc->ctl_lock);
12167         if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) {
12168                 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port,
12169                     io->io_hdr.nexus.initid.id,
12170                     (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
12171         } else { /* CTL_TASK_CLEAR_TASK_SET */
12172                 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX,
12173                     (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
12174         }
12175         mtx_unlock(&lun->lun_lock);
12176         return (0);
12177 }
12178
12179 static int
12180 ctl_i_t_nexus_reset(union ctl_io *io)
12181 {
12182         struct ctl_softc *softc = control_softc;
12183         struct ctl_lun *lun;
12184         uint32_t initindex;
12185
12186         initindex = ctl_get_initindex(&io->io_hdr.nexus);
12187         mtx_lock(&softc->ctl_lock);
12188         STAILQ_FOREACH(lun, &softc->lun_list, links) {
12189                 mtx_lock(&lun->lun_lock);
12190                 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port,
12191                     io->io_hdr.nexus.initid.id,
12192                     (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
12193 #ifdef CTL_WITH_CA
12194                 ctl_clear_mask(lun->have_ca, initindex);
12195 #endif
12196                 lun->pending_ua[initindex] |= CTL_UA_I_T_NEXUS_LOSS;
12197                 mtx_unlock(&lun->lun_lock);
12198         }
12199         mtx_unlock(&softc->ctl_lock);
12200         return (0);
12201 }
12202
12203 static int
12204 ctl_abort_task(union ctl_io *io)
12205 {
12206         union ctl_io *xio;
12207         struct ctl_lun *lun;
12208         struct ctl_softc *ctl_softc;
12209 #if 0
12210         struct sbuf sb;
12211         char printbuf[128];
12212 #endif
12213         int found;
12214         uint32_t targ_lun;
12215
12216         ctl_softc = control_softc;
12217         found = 0;
12218
12219         /*
12220          * Look up the LUN.
12221          */
12222         targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12223         mtx_lock(&ctl_softc->ctl_lock);
12224         if ((targ_lun < CTL_MAX_LUNS)
12225          && (ctl_softc->ctl_luns[targ_lun] != NULL))
12226                 lun = ctl_softc->ctl_luns[targ_lun];
12227         else {
12228                 mtx_unlock(&ctl_softc->ctl_lock);
12229                 return (1);
12230         }
12231
12232 #if 0
12233         printf("ctl_abort_task: called for lun %lld, tag %d type %d\n",
12234                lun->lun, io->taskio.tag_num, io->taskio.tag_type);
12235 #endif
12236
12237         mtx_lock(&lun->lun_lock);
12238         mtx_unlock(&ctl_softc->ctl_lock);
12239         /*
12240          * Run through the OOA queue and attempt to find the given I/O.
12241          * The target port, initiator ID, tag type and tag number have to
12242          * match the values that we got from the initiator.  If we have an
12243          * untagged command to abort, simply abort the first untagged command
12244          * we come to.  We only allow one untagged command at a time of course.
12245          */
12246 #if 0
12247         TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) {
12248 #endif
12249         for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
12250              xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
12251 #if 0
12252                 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN);
12253
12254                 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ",
12255                             lun->lun, xio->scsiio.tag_num,
12256                             xio->scsiio.tag_type,
12257                             (xio->io_hdr.blocked_links.tqe_prev
12258                             == NULL) ? "" : " BLOCKED",
12259                             (xio->io_hdr.flags &
12260                             CTL_FLAG_DMA_INPROG) ? " DMA" : "",
12261                             (xio->io_hdr.flags &
12262                             CTL_FLAG_ABORT) ? " ABORT" : "",
12263                             (xio->io_hdr.flags &
12264                             CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : ""));
12265                 ctl_scsi_command_string(&xio->scsiio, NULL, &sb);
12266                 sbuf_finish(&sb);
12267                 printf("%s\n", sbuf_data(&sb));
12268 #endif
12269
12270                 if ((xio->io_hdr.nexus.targ_port == io->io_hdr.nexus.targ_port)
12271                  && (xio->io_hdr.nexus.initid.id ==
12272                      io->io_hdr.nexus.initid.id)) {
12273                         /*
12274                          * If the abort says that the task is untagged, the
12275                          * task in the queue must be untagged.  Otherwise,
12276                          * we just check to see whether the tag numbers
12277                          * match.  This is because the QLogic firmware
12278                          * doesn't pass back the tag type in an abort
12279                          * request.
12280                          */
12281 #if 0
12282                         if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED)
12283                           && (io->taskio.tag_type == CTL_TAG_UNTAGGED))
12284                          || (xio->scsiio.tag_num == io->taskio.tag_num)) {
12285 #endif
12286                         /*
12287                          * XXX KDM we've got problems with FC, because it
12288                          * doesn't send down a tag type with aborts.  So we
12289                          * can only really go by the tag number...
12290                          * This may cause problems with parallel SCSI.
12291                          * Need to figure that out!!
12292                          */
12293                         if (xio->scsiio.tag_num == io->taskio.tag_num) {
12294                                 xio->io_hdr.flags |= CTL_FLAG_ABORT;
12295                                 found = 1;
12296                                 if ((io->io_hdr.flags &
12297                                      CTL_FLAG_FROM_OTHER_SC) == 0 &&
12298                                     !(lun->flags & CTL_LUN_PRIMARY_SC)) {
12299                                         union ctl_ha_msg msg_info;
12300
12301                                         io->io_hdr.flags |=
12302                                                         CTL_FLAG_SENT_2OTHER_SC;
12303                                         msg_info.hdr.nexus = io->io_hdr.nexus;
12304                                         msg_info.task.task_action =
12305                                                 CTL_TASK_ABORT_TASK;
12306                                         msg_info.task.tag_num =
12307                                                 io->taskio.tag_num;
12308                                         msg_info.task.tag_type =
12309                                                 io->taskio.tag_type;
12310                                         msg_info.hdr.msg_type =
12311                                                 CTL_MSG_MANAGE_TASKS;
12312                                         msg_info.hdr.original_sc = NULL;
12313                                         msg_info.hdr.serializing_sc = NULL;
12314 #if 0
12315                                         printf("Sent Abort to other side\n");
12316 #endif
12317                                         if (CTL_HA_STATUS_SUCCESS !=
12318                                                 ctl_ha_msg_send(CTL_HA_CHAN_CTL,
12319                                                 (void *)&msg_info,
12320                                                 sizeof(msg_info), 0)) {
12321                                         }
12322                                 }
12323 #if 0
12324                                 printf("ctl_abort_task: found I/O to abort\n");
12325 #endif
12326                                 break;
12327                         }
12328                 }
12329         }
12330         mtx_unlock(&lun->lun_lock);
12331
12332         if (found == 0) {
12333                 /*
12334                  * This isn't really an error.  It's entirely possible for
12335                  * the abort and command completion to cross on the wire.
12336                  * This is more of an informative/diagnostic error.
12337                  */
12338 #if 0
12339                 printf("ctl_abort_task: ABORT sent for nonexistent I/O: "
12340                        "%d:%d:%d:%d tag %d type %d\n",
12341                        io->io_hdr.nexus.initid.id,
12342                        io->io_hdr.nexus.targ_port,
12343                        io->io_hdr.nexus.targ_target.id,
12344                        io->io_hdr.nexus.targ_lun, io->taskio.tag_num,
12345                        io->taskio.tag_type);
12346 #endif
12347         }
12348         return (0);
12349 }
12350
12351 static void
12352 ctl_run_task(union ctl_io *io)
12353 {
12354         struct ctl_softc *ctl_softc = control_softc;
12355         int retval = 1;
12356         const char *task_desc;
12357
12358         CTL_DEBUG_PRINT(("ctl_run_task\n"));
12359
12360         KASSERT(io->io_hdr.io_type == CTL_IO_TASK,
12361             ("ctl_run_task: Unextected io_type %d\n",
12362              io->io_hdr.io_type));
12363
12364         task_desc = ctl_scsi_task_string(&io->taskio);
12365         if (task_desc != NULL) {
12366 #ifdef NEEDTOPORT
12367                 csevent_log(CSC_CTL | CSC_SHELF_SW |
12368                             CTL_TASK_REPORT,
12369                             csevent_LogType_Trace,
12370                             csevent_Severity_Information,
12371                             csevent_AlertLevel_Green,
12372                             csevent_FRU_Firmware,
12373                             csevent_FRU_Unknown,
12374                             "CTL: received task: %s",task_desc);
12375 #endif
12376         } else {
12377 #ifdef NEEDTOPORT
12378                 csevent_log(CSC_CTL | CSC_SHELF_SW |
12379                             CTL_TASK_REPORT,
12380                             csevent_LogType_Trace,
12381                             csevent_Severity_Information,
12382                             csevent_AlertLevel_Green,
12383                             csevent_FRU_Firmware,
12384                             csevent_FRU_Unknown,
12385                             "CTL: received unknown task "
12386                             "type: %d (%#x)",
12387                             io->taskio.task_action,
12388                             io->taskio.task_action);
12389 #endif
12390         }
12391         switch (io->taskio.task_action) {
12392         case CTL_TASK_ABORT_TASK:
12393                 retval = ctl_abort_task(io);
12394                 break;
12395         case CTL_TASK_ABORT_TASK_SET:
12396         case CTL_TASK_CLEAR_TASK_SET:
12397                 retval = ctl_abort_task_set(io);
12398                 break;
12399         case CTL_TASK_CLEAR_ACA:
12400                 break;
12401         case CTL_TASK_I_T_NEXUS_RESET:
12402                 retval = ctl_i_t_nexus_reset(io);
12403                 break;
12404         case CTL_TASK_LUN_RESET: {
12405                 struct ctl_lun *lun;
12406                 uint32_t targ_lun;
12407
12408                 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12409                 mtx_lock(&ctl_softc->ctl_lock);
12410                 if ((targ_lun < CTL_MAX_LUNS)
12411                  && (ctl_softc->ctl_luns[targ_lun] != NULL))
12412                         lun = ctl_softc->ctl_luns[targ_lun];
12413                 else {
12414                         mtx_unlock(&ctl_softc->ctl_lock);
12415                         retval = 1;
12416                         break;
12417                 }
12418
12419                 if (!(io->io_hdr.flags &
12420                     CTL_FLAG_FROM_OTHER_SC)) {
12421                         union ctl_ha_msg msg_info;
12422
12423                         io->io_hdr.flags |=
12424                                 CTL_FLAG_SENT_2OTHER_SC;
12425                         msg_info.hdr.msg_type =
12426                                 CTL_MSG_MANAGE_TASKS;
12427                         msg_info.hdr.nexus = io->io_hdr.nexus;
12428                         msg_info.task.task_action =
12429                                 CTL_TASK_LUN_RESET;
12430                         msg_info.hdr.original_sc = NULL;
12431                         msg_info.hdr.serializing_sc = NULL;
12432                         if (CTL_HA_STATUS_SUCCESS !=
12433                             ctl_ha_msg_send(CTL_HA_CHAN_CTL,
12434                             (void *)&msg_info,
12435                             sizeof(msg_info), 0)) {
12436                         }
12437                 }
12438
12439                 retval = ctl_lun_reset(lun, io,
12440                                        CTL_UA_LUN_RESET);
12441                 mtx_unlock(&ctl_softc->ctl_lock);
12442                 break;
12443         }
12444         case CTL_TASK_TARGET_RESET:
12445                 retval = ctl_target_reset(ctl_softc, io, CTL_UA_TARG_RESET);
12446                 break;
12447         case CTL_TASK_BUS_RESET:
12448                 retval = ctl_bus_reset(ctl_softc, io);
12449                 break;
12450         case CTL_TASK_PORT_LOGIN:
12451                 break;
12452         case CTL_TASK_PORT_LOGOUT:
12453                 break;
12454         default:
12455                 printf("ctl_run_task: got unknown task management event %d\n",
12456                        io->taskio.task_action);
12457                 break;
12458         }
12459         if (retval == 0)
12460                 io->io_hdr.status = CTL_SUCCESS;
12461         else
12462                 io->io_hdr.status = CTL_ERROR;
12463         ctl_done(io);
12464 }
12465
12466 /*
12467  * For HA operation.  Handle commands that come in from the other
12468  * controller.
12469  */
12470 static void
12471 ctl_handle_isc(union ctl_io *io)
12472 {
12473         int free_io;
12474         struct ctl_lun *lun;
12475         struct ctl_softc *ctl_softc;
12476         uint32_t targ_lun;
12477
12478         ctl_softc = control_softc;
12479
12480         targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12481         lun = ctl_softc->ctl_luns[targ_lun];
12482
12483         switch (io->io_hdr.msg_type) {
12484         case CTL_MSG_SERIALIZE:
12485                 free_io = ctl_serialize_other_sc_cmd(&io->scsiio);
12486                 break;
12487         case CTL_MSG_R2R: {
12488                 const struct ctl_cmd_entry *entry;
12489
12490                 /*
12491                  * This is only used in SER_ONLY mode.
12492                  */
12493                 free_io = 0;
12494                 entry = ctl_get_cmd_entry(&io->scsiio);
12495                 mtx_lock(&lun->lun_lock);
12496                 if (ctl_scsiio_lun_check(ctl_softc, lun,
12497                     entry, (struct ctl_scsiio *)io) != 0) {
12498                         mtx_unlock(&lun->lun_lock);
12499                         ctl_done(io);
12500                         break;
12501                 }
12502                 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
12503                 mtx_unlock(&lun->lun_lock);
12504                 ctl_enqueue_rtr(io);
12505                 break;
12506         }
12507         case CTL_MSG_FINISH_IO:
12508                 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
12509                         free_io = 0;
12510                         ctl_done(io);
12511                 } else {
12512                         free_io = 1;
12513                         mtx_lock(&lun->lun_lock);
12514                         TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr,
12515                                      ooa_links);
12516                         ctl_check_blocked(lun);
12517                         mtx_unlock(&lun->lun_lock);
12518                 }
12519                 break;
12520         case CTL_MSG_PERS_ACTION:
12521                 ctl_hndl_per_res_out_on_other_sc(
12522                         (union ctl_ha_msg *)&io->presio.pr_msg);
12523                 free_io = 1;
12524                 break;
12525         case CTL_MSG_BAD_JUJU:
12526                 free_io = 0;
12527                 ctl_done(io);
12528                 break;
12529         case CTL_MSG_DATAMOVE:
12530                 /* Only used in XFER mode */
12531                 free_io = 0;
12532                 ctl_datamove_remote(io);
12533                 break;
12534         case CTL_MSG_DATAMOVE_DONE:
12535                 /* Only used in XFER mode */
12536                 free_io = 0;
12537                 io->scsiio.be_move_done(io);
12538                 break;
12539         default:
12540                 free_io = 1;
12541                 printf("%s: Invalid message type %d\n",
12542                        __func__, io->io_hdr.msg_type);
12543                 break;
12544         }
12545         if (free_io)
12546                 ctl_free_io(io);
12547
12548 }
12549
12550
12551 /*
12552  * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if
12553  * there is no match.
12554  */
12555 static ctl_lun_error_pattern
12556 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc)
12557 {
12558         const struct ctl_cmd_entry *entry;
12559         ctl_lun_error_pattern filtered_pattern, pattern;
12560
12561         pattern = desc->error_pattern;
12562
12563         /*
12564          * XXX KDM we need more data passed into this function to match a
12565          * custom pattern, and we actually need to implement custom pattern
12566          * matching.
12567          */
12568         if (pattern & CTL_LUN_PAT_CMD)
12569                 return (CTL_LUN_PAT_CMD);
12570
12571         if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY)
12572                 return (CTL_LUN_PAT_ANY);
12573
12574         entry = ctl_get_cmd_entry(ctsio);
12575
12576         filtered_pattern = entry->pattern & pattern;
12577
12578         /*
12579          * If the user requested specific flags in the pattern (e.g.
12580          * CTL_LUN_PAT_RANGE), make sure the command supports all of those
12581          * flags.
12582          *
12583          * If the user did not specify any flags, it doesn't matter whether
12584          * or not the command supports the flags.
12585          */
12586         if ((filtered_pattern & ~CTL_LUN_PAT_MASK) !=
12587              (pattern & ~CTL_LUN_PAT_MASK))
12588                 return (CTL_LUN_PAT_NONE);
12589
12590         /*
12591          * If the user asked for a range check, see if the requested LBA
12592          * range overlaps with this command's LBA range.
12593          */
12594         if (filtered_pattern & CTL_LUN_PAT_RANGE) {
12595                 uint64_t lba1;
12596                 uint64_t len1;
12597                 ctl_action action;
12598                 int retval;
12599
12600                 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1);
12601                 if (retval != 0)
12602                         return (CTL_LUN_PAT_NONE);
12603
12604                 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba,
12605                                               desc->lba_range.len);
12606                 /*
12607                  * A "pass" means that the LBA ranges don't overlap, so
12608                  * this doesn't match the user's range criteria.
12609                  */
12610                 if (action == CTL_ACTION_PASS)
12611                         return (CTL_LUN_PAT_NONE);
12612         }
12613
12614         return (filtered_pattern);
12615 }
12616
12617 static void
12618 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io)
12619 {
12620         struct ctl_error_desc *desc, *desc2;
12621
12622         mtx_assert(&lun->lun_lock, MA_OWNED);
12623
12624         STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
12625                 ctl_lun_error_pattern pattern;
12626                 /*
12627                  * Check to see whether this particular command matches
12628                  * the pattern in the descriptor.
12629                  */
12630                 pattern = ctl_cmd_pattern_match(&io->scsiio, desc);
12631                 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE)
12632                         continue;
12633
12634                 switch (desc->lun_error & CTL_LUN_INJ_TYPE) {
12635                 case CTL_LUN_INJ_ABORTED:
12636                         ctl_set_aborted(&io->scsiio);
12637                         break;
12638                 case CTL_LUN_INJ_MEDIUM_ERR:
12639                         ctl_set_medium_error(&io->scsiio);
12640                         break;
12641                 case CTL_LUN_INJ_UA:
12642                         /* 29h/00h  POWER ON, RESET, OR BUS DEVICE RESET
12643                          * OCCURRED */
12644                         ctl_set_ua(&io->scsiio, 0x29, 0x00);
12645                         break;
12646                 case CTL_LUN_INJ_CUSTOM:
12647                         /*
12648                          * We're assuming the user knows what he is doing.
12649                          * Just copy the sense information without doing
12650                          * checks.
12651                          */
12652                         bcopy(&desc->custom_sense, &io->scsiio.sense_data,
12653                               ctl_min(sizeof(desc->custom_sense),
12654                                       sizeof(io->scsiio.sense_data)));
12655                         io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND;
12656                         io->scsiio.sense_len = SSD_FULL_SIZE;
12657                         io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
12658                         break;
12659                 case CTL_LUN_INJ_NONE:
12660                 default:
12661                         /*
12662                          * If this is an error injection type we don't know
12663                          * about, clear the continuous flag (if it is set)
12664                          * so it will get deleted below.
12665                          */
12666                         desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS;
12667                         break;
12668                 }
12669                 /*
12670                  * By default, each error injection action is a one-shot
12671                  */
12672                 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS)
12673                         continue;
12674
12675                 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links);
12676
12677                 free(desc, M_CTL);
12678         }
12679 }
12680
12681 #ifdef CTL_IO_DELAY
12682 static void
12683 ctl_datamove_timer_wakeup(void *arg)
12684 {
12685         union ctl_io *io;
12686
12687         io = (union ctl_io *)arg;
12688
12689         ctl_datamove(io);
12690 }
12691 #endif /* CTL_IO_DELAY */
12692
12693 void
12694 ctl_datamove(union ctl_io *io)
12695 {
12696         void (*fe_datamove)(union ctl_io *io);
12697
12698         mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED);
12699
12700         CTL_DEBUG_PRINT(("ctl_datamove\n"));
12701
12702 #ifdef CTL_TIME_IO
12703         if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
12704                 char str[256];
12705                 char path_str[64];
12706                 struct sbuf sb;
12707
12708                 ctl_scsi_path_string(io, path_str, sizeof(path_str));
12709                 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
12710
12711                 sbuf_cat(&sb, path_str);
12712                 switch (io->io_hdr.io_type) {
12713                 case CTL_IO_SCSI:
12714                         ctl_scsi_command_string(&io->scsiio, NULL, &sb);
12715                         sbuf_printf(&sb, "\n");
12716                         sbuf_cat(&sb, path_str);
12717                         sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
12718                                     io->scsiio.tag_num, io->scsiio.tag_type);
12719                         break;
12720                 case CTL_IO_TASK:
12721                         sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, "
12722                                     "Tag Type: %d\n", io->taskio.task_action,
12723                                     io->taskio.tag_num, io->taskio.tag_type);
12724                         break;
12725                 default:
12726                         printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
12727                         panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
12728                         break;
12729                 }
12730                 sbuf_cat(&sb, path_str);
12731                 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n",
12732                             (intmax_t)time_uptime - io->io_hdr.start_time);
12733                 sbuf_finish(&sb);
12734                 printf("%s", sbuf_data(&sb));
12735         }
12736 #endif /* CTL_TIME_IO */
12737
12738 #ifdef CTL_IO_DELAY
12739         if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
12740                 struct ctl_lun *lun;
12741
12742                 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
12743
12744                 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
12745         } else {
12746                 struct ctl_lun *lun;
12747
12748                 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
12749                 if ((lun != NULL)
12750                  && (lun->delay_info.datamove_delay > 0)) {
12751                         struct callout *callout;
12752
12753                         callout = (struct callout *)&io->io_hdr.timer_bytes;
12754                         callout_init(callout, /*mpsafe*/ 1);
12755                         io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
12756                         callout_reset(callout,
12757                                       lun->delay_info.datamove_delay * hz,
12758                                       ctl_datamove_timer_wakeup, io);
12759                         if (lun->delay_info.datamove_type ==
12760                             CTL_DELAY_TYPE_ONESHOT)
12761                                 lun->delay_info.datamove_delay = 0;
12762                         return;
12763                 }
12764         }
12765 #endif
12766
12767         /*
12768          * This command has been aborted.  Set the port status, so we fail
12769          * the data move.
12770          */
12771         if (io->io_hdr.flags & CTL_FLAG_ABORT) {
12772                 printf("ctl_datamove: tag 0x%04x on (%ju:%d:%ju:%d) aborted\n",
12773                        io->scsiio.tag_num,(uintmax_t)io->io_hdr.nexus.initid.id,
12774                        io->io_hdr.nexus.targ_port,
12775                        (uintmax_t)io->io_hdr.nexus.targ_target.id,
12776                        io->io_hdr.nexus.targ_lun);
12777                 io->io_hdr.port_status = 31337;
12778                 /*
12779                  * Note that the backend, in this case, will get the
12780                  * callback in its context.  In other cases it may get
12781                  * called in the frontend's interrupt thread context.
12782                  */
12783                 io->scsiio.be_move_done(io);
12784                 return;
12785         }
12786
12787         /*
12788          * If we're in XFER mode and this I/O is from the other shelf
12789          * controller, we need to send the DMA to the other side to
12790          * actually transfer the data to/from the host.  In serialize only
12791          * mode the transfer happens below CTL and ctl_datamove() is only
12792          * called on the machine that originally received the I/O.
12793          */
12794         if ((control_softc->ha_mode == CTL_HA_MODE_XFER)
12795          && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
12796                 union ctl_ha_msg msg;
12797                 uint32_t sg_entries_sent;
12798                 int do_sg_copy;
12799                 int i;
12800
12801                 memset(&msg, 0, sizeof(msg));
12802                 msg.hdr.msg_type = CTL_MSG_DATAMOVE;
12803                 msg.hdr.original_sc = io->io_hdr.original_sc;
12804                 msg.hdr.serializing_sc = io;
12805                 msg.hdr.nexus = io->io_hdr.nexus;
12806                 msg.dt.flags = io->io_hdr.flags;
12807                 /*
12808                  * We convert everything into a S/G list here.  We can't
12809                  * pass by reference, only by value between controllers.
12810                  * So we can't pass a pointer to the S/G list, only as many
12811                  * S/G entries as we can fit in here.  If it's possible for
12812                  * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries,
12813                  * then we need to break this up into multiple transfers.
12814                  */
12815                 if (io->scsiio.kern_sg_entries == 0) {
12816                         msg.dt.kern_sg_entries = 1;
12817                         /*
12818                          * If this is in cached memory, flush the cache
12819                          * before we send the DMA request to the other
12820                          * controller.  We want to do this in either the
12821                          * read or the write case.  The read case is
12822                          * straightforward.  In the write case, we want to
12823                          * make sure nothing is in the local cache that
12824                          * could overwrite the DMAed data.
12825                          */
12826                         if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
12827                                 /*
12828                                  * XXX KDM use bus_dmamap_sync() here.
12829                                  */
12830                         }
12831
12832                         /*
12833                          * Convert to a physical address if this is a
12834                          * virtual address.
12835                          */
12836                         if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
12837                                 msg.dt.sg_list[0].addr =
12838                                         io->scsiio.kern_data_ptr;
12839                         } else {
12840                                 /*
12841                                  * XXX KDM use busdma here!
12842                                  */
12843 #if 0
12844                                 msg.dt.sg_list[0].addr = (void *)
12845                                         vtophys(io->scsiio.kern_data_ptr);
12846 #endif
12847                         }
12848
12849                         msg.dt.sg_list[0].len = io->scsiio.kern_data_len;
12850                         do_sg_copy = 0;
12851                 } else {
12852                         struct ctl_sg_entry *sgl;
12853
12854                         do_sg_copy = 1;
12855                         msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries;
12856                         sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
12857                         if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
12858                                 /*
12859                                  * XXX KDM use bus_dmamap_sync() here.
12860                                  */
12861                         }
12862                 }
12863
12864                 msg.dt.kern_data_len = io->scsiio.kern_data_len;
12865                 msg.dt.kern_total_len = io->scsiio.kern_total_len;
12866                 msg.dt.kern_data_resid = io->scsiio.kern_data_resid;
12867                 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset;
12868                 msg.dt.sg_sequence = 0;
12869
12870                 /*
12871                  * Loop until we've sent all of the S/G entries.  On the
12872                  * other end, we'll recompose these S/G entries into one
12873                  * contiguous list before passing it to the
12874                  */
12875                 for (sg_entries_sent = 0; sg_entries_sent <
12876                      msg.dt.kern_sg_entries; msg.dt.sg_sequence++) {
12877                         msg.dt.cur_sg_entries = ctl_min((sizeof(msg.dt.sg_list)/
12878                                 sizeof(msg.dt.sg_list[0])),
12879                                 msg.dt.kern_sg_entries - sg_entries_sent);
12880
12881                         if (do_sg_copy != 0) {
12882                                 struct ctl_sg_entry *sgl;
12883                                 int j;
12884
12885                                 sgl = (struct ctl_sg_entry *)
12886                                         io->scsiio.kern_data_ptr;
12887                                 /*
12888                                  * If this is in cached memory, flush the cache
12889                                  * before we send the DMA request to the other
12890                                  * controller.  We want to do this in either
12891                                  * the * read or the write case.  The read
12892                                  * case is straightforward.  In the write
12893                                  * case, we want to make sure nothing is
12894                                  * in the local cache that could overwrite
12895                                  * the DMAed data.
12896                                  */
12897
12898                                 for (i = sg_entries_sent, j = 0;
12899                                      i < msg.dt.cur_sg_entries; i++, j++) {
12900                                         if ((io->io_hdr.flags &
12901                                              CTL_FLAG_NO_DATASYNC) == 0) {
12902                                                 /*
12903                                                  * XXX KDM use bus_dmamap_sync()
12904                                                  */
12905                                         }
12906                                         if ((io->io_hdr.flags &
12907                                              CTL_FLAG_BUS_ADDR) == 0) {
12908                                                 /*
12909                                                  * XXX KDM use busdma.
12910                                                  */
12911 #if 0
12912                                                 msg.dt.sg_list[j].addr =(void *)
12913                                                        vtophys(sgl[i].addr);
12914 #endif
12915                                         } else {
12916                                                 msg.dt.sg_list[j].addr =
12917                                                         sgl[i].addr;
12918                                         }
12919                                         msg.dt.sg_list[j].len = sgl[i].len;
12920                                 }
12921                         }
12922
12923                         sg_entries_sent += msg.dt.cur_sg_entries;
12924                         if (sg_entries_sent >= msg.dt.kern_sg_entries)
12925                                 msg.dt.sg_last = 1;
12926                         else
12927                                 msg.dt.sg_last = 0;
12928
12929                         /*
12930                          * XXX KDM drop and reacquire the lock here?
12931                          */
12932                         if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
12933                             sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) {
12934                                 /*
12935                                  * XXX do something here.
12936                                  */
12937                         }
12938
12939                         msg.dt.sent_sg_entries = sg_entries_sent;
12940                 }
12941                 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
12942                 if (io->io_hdr.flags & CTL_FLAG_FAILOVER)
12943                         ctl_failover_io(io, /*have_lock*/ 0);
12944
12945         } else {
12946
12947                 /*
12948                  * Lookup the fe_datamove() function for this particular
12949                  * front end.
12950                  */
12951                 fe_datamove =
12952                     control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
12953
12954                 fe_datamove(io);
12955         }
12956 }
12957
12958 static void
12959 ctl_send_datamove_done(union ctl_io *io, int have_lock)
12960 {
12961         union ctl_ha_msg msg;
12962         int isc_status;
12963
12964         memset(&msg, 0, sizeof(msg));
12965
12966         msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
12967         msg.hdr.original_sc = io;
12968         msg.hdr.serializing_sc = io->io_hdr.serializing_sc;
12969         msg.hdr.nexus = io->io_hdr.nexus;
12970         msg.hdr.status = io->io_hdr.status;
12971         msg.scsi.tag_num = io->scsiio.tag_num;
12972         msg.scsi.tag_type = io->scsiio.tag_type;
12973         msg.scsi.scsi_status = io->scsiio.scsi_status;
12974         memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
12975                sizeof(io->scsiio.sense_data));
12976         msg.scsi.sense_len = io->scsiio.sense_len;
12977         msg.scsi.sense_residual = io->scsiio.sense_residual;
12978         msg.scsi.fetd_status = io->io_hdr.port_status;
12979         msg.scsi.residual = io->scsiio.residual;
12980         io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
12981
12982         if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
12983                 ctl_failover_io(io, /*have_lock*/ have_lock);
12984                 return;
12985         }
12986
12987         isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0);
12988         if (isc_status > CTL_HA_STATUS_SUCCESS) {
12989                 /* XXX do something if this fails */
12990         }
12991
12992 }
12993
12994 /*
12995  * The DMA to the remote side is done, now we need to tell the other side
12996  * we're done so it can continue with its data movement.
12997  */
12998 static void
12999 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq)
13000 {
13001         union ctl_io *io;
13002
13003         io = rq->context;
13004
13005         if (rq->ret != CTL_HA_STATUS_SUCCESS) {
13006                 printf("%s: ISC DMA write failed with error %d", __func__,
13007                        rq->ret);
13008                 ctl_set_internal_failure(&io->scsiio,
13009                                          /*sks_valid*/ 1,
13010                                          /*retry_count*/ rq->ret);
13011         }
13012
13013         ctl_dt_req_free(rq);
13014
13015         /*
13016          * In this case, we had to malloc the memory locally.  Free it.
13017          */
13018         if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) {
13019                 int i;
13020                 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
13021                         free(io->io_hdr.local_sglist[i].addr, M_CTL);
13022         }
13023         /*
13024          * The data is in local and remote memory, so now we need to send
13025          * status (good or back) back to the other side.
13026          */
13027         ctl_send_datamove_done(io, /*have_lock*/ 0);
13028 }
13029
13030 /*
13031  * We've moved the data from the host/controller into local memory.  Now we
13032  * need to push it over to the remote controller's memory.
13033  */
13034 static int
13035 ctl_datamove_remote_dm_write_cb(union ctl_io *io)
13036 {
13037         int retval;
13038
13039         retval = 0;
13040
13041         retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE,
13042                                           ctl_datamove_remote_write_cb);
13043
13044         return (retval);
13045 }
13046
13047 static void
13048 ctl_datamove_remote_write(union ctl_io *io)
13049 {
13050         int retval;
13051         void (*fe_datamove)(union ctl_io *io);
13052
13053         /*
13054          * - Get the data from the host/HBA into local memory.
13055          * - DMA memory from the local controller to the remote controller.
13056          * - Send status back to the remote controller.
13057          */
13058
13059         retval = ctl_datamove_remote_sgl_setup(io);
13060         if (retval != 0)
13061                 return;
13062
13063         /* Switch the pointer over so the FETD knows what to do */
13064         io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist;
13065
13066         /*
13067          * Use a custom move done callback, since we need to send completion
13068          * back to the other controller, not to the backend on this side.
13069          */
13070         io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb;
13071
13072         fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
13073
13074         fe_datamove(io);
13075
13076         return;
13077
13078 }
13079
13080 static int
13081 ctl_datamove_remote_dm_read_cb(union ctl_io *io)
13082 {
13083 #if 0
13084         char str[256];
13085         char path_str[64];
13086         struct sbuf sb;
13087 #endif
13088
13089         /*
13090          * In this case, we had to malloc the memory locally.  Free it.
13091          */
13092         if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) {
13093                 int i;
13094                 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
13095                         free(io->io_hdr.local_sglist[i].addr, M_CTL);
13096         }
13097
13098 #if 0
13099         scsi_path_string(io, path_str, sizeof(path_str));
13100         sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
13101         sbuf_cat(&sb, path_str);
13102         scsi_command_string(&io->scsiio, NULL, &sb);
13103         sbuf_printf(&sb, "\n");
13104         sbuf_cat(&sb, path_str);
13105         sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
13106                     io->scsiio.tag_num, io->scsiio.tag_type);
13107         sbuf_cat(&sb, path_str);
13108         sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__,
13109                     io->io_hdr.flags, io->io_hdr.status);
13110         sbuf_finish(&sb);
13111         printk("%s", sbuf_data(&sb));
13112 #endif
13113
13114
13115         /*
13116          * The read is done, now we need to send status (good or bad) back
13117          * to the other side.
13118          */
13119         ctl_send_datamove_done(io, /*have_lock*/ 0);
13120
13121         return (0);
13122 }
13123
13124 static void
13125 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq)
13126 {
13127         union ctl_io *io;
13128         void (*fe_datamove)(union ctl_io *io);
13129
13130         io = rq->context;
13131
13132         if (rq->ret != CTL_HA_STATUS_SUCCESS) {
13133                 printf("%s: ISC DMA read failed with error %d", __func__,
13134                        rq->ret);
13135                 ctl_set_internal_failure(&io->scsiio,
13136                                          /*sks_valid*/ 1,
13137                                          /*retry_count*/ rq->ret);
13138         }
13139
13140         ctl_dt_req_free(rq);
13141
13142         /* Switch the pointer over so the FETD knows what to do */
13143         io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist;
13144
13145         /*
13146          * Use a custom move done callback, since we need to send completion
13147          * back to the other controller, not to the backend on this side.
13148          */
13149         io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb;
13150
13151         /* XXX KDM add checks like the ones in ctl_datamove? */
13152
13153         fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
13154
13155         fe_datamove(io);
13156 }
13157
13158 static int
13159 ctl_datamove_remote_sgl_setup(union ctl_io *io)
13160 {
13161         struct ctl_sg_entry *local_sglist, *remote_sglist;
13162         struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist;
13163         struct ctl_softc *softc;
13164         int retval;
13165         int i;
13166
13167         retval = 0;
13168         softc = control_softc;
13169
13170         local_sglist = io->io_hdr.local_sglist;
13171         local_dma_sglist = io->io_hdr.local_dma_sglist;
13172         remote_sglist = io->io_hdr.remote_sglist;
13173         remote_dma_sglist = io->io_hdr.remote_dma_sglist;
13174
13175         if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) {
13176                 for (i = 0; i < io->scsiio.kern_sg_entries; i++) {
13177                         local_sglist[i].len = remote_sglist[i].len;
13178
13179                         /*
13180                          * XXX Detect the situation where the RS-level I/O
13181                          * redirector on the other side has already read the
13182                          * data off of the AOR RS on this side, and
13183                          * transferred it to remote (mirror) memory on the
13184                          * other side.  Since we already have the data in
13185                          * memory here, we just need to use it.
13186                          *
13187                          * XXX KDM this can probably be removed once we
13188                          * get the cache device code in and take the
13189                          * current AOR implementation out.
13190                          */
13191 #ifdef NEEDTOPORT
13192                         if ((remote_sglist[i].addr >=
13193                              (void *)vtophys(softc->mirr->addr))
13194                          && (remote_sglist[i].addr <
13195                              ((void *)vtophys(softc->mirr->addr) +
13196                              CacheMirrorOffset))) {
13197                                 local_sglist[i].addr = remote_sglist[i].addr -
13198                                         CacheMirrorOffset;
13199                                 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
13200                                      CTL_FLAG_DATA_IN)
13201                                         io->io_hdr.flags |= CTL_FLAG_REDIR_DONE;
13202                         } else {
13203                                 local_sglist[i].addr = remote_sglist[i].addr +
13204                                         CacheMirrorOffset;
13205                         }
13206 #endif
13207 #if 0
13208                         printf("%s: local %p, remote %p, len %d\n",
13209                                __func__, local_sglist[i].addr,
13210                                remote_sglist[i].addr, local_sglist[i].len);
13211 #endif
13212                 }
13213         } else {
13214                 uint32_t len_to_go;
13215
13216                 /*
13217                  * In this case, we don't have automatically allocated
13218                  * memory for this I/O on this controller.  This typically
13219                  * happens with internal CTL I/O -- e.g. inquiry, mode
13220                  * sense, etc.  Anything coming from RAIDCore will have
13221                  * a mirror area available.
13222                  */
13223                 len_to_go = io->scsiio.kern_data_len;
13224
13225                 /*
13226                  * Clear the no datasync flag, we have to use malloced
13227                  * buffers.
13228                  */
13229                 io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC;
13230
13231                 /*
13232                  * The difficult thing here is that the size of the various
13233                  * S/G segments may be different than the size from the
13234                  * remote controller.  That'll make it harder when DMAing
13235                  * the data back to the other side.
13236                  */
13237                 for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) /
13238                      sizeof(io->io_hdr.remote_sglist[0])) &&
13239                      (len_to_go > 0); i++) {
13240                         local_sglist[i].len = ctl_min(len_to_go, 131072);
13241                         CTL_SIZE_8B(local_dma_sglist[i].len,
13242                                     local_sglist[i].len);
13243                         local_sglist[i].addr =
13244                                 malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK);
13245
13246                         local_dma_sglist[i].addr = local_sglist[i].addr;
13247
13248                         if (local_sglist[i].addr == NULL) {
13249                                 int j;
13250
13251                                 printf("malloc failed for %zd bytes!",
13252                                        local_dma_sglist[i].len);
13253                                 for (j = 0; j < i; j++) {
13254                                         free(local_sglist[j].addr, M_CTL);
13255                                 }
13256                                 ctl_set_internal_failure(&io->scsiio,
13257                                                          /*sks_valid*/ 1,
13258                                                          /*retry_count*/ 4857);
13259                                 retval = 1;
13260                                 goto bailout_error;
13261                                 
13262                         }
13263                         /* XXX KDM do we need a sync here? */
13264
13265                         len_to_go -= local_sglist[i].len;
13266                 }
13267                 /*
13268                  * Reset the number of S/G entries accordingly.  The
13269                  * original number of S/G entries is available in
13270                  * rem_sg_entries.
13271                  */
13272                 io->scsiio.kern_sg_entries = i;
13273
13274 #if 0
13275                 printf("%s: kern_sg_entries = %d\n", __func__,
13276                        io->scsiio.kern_sg_entries);
13277                 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
13278                         printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i,
13279                                local_sglist[i].addr, local_sglist[i].len,
13280                                local_dma_sglist[i].len);
13281 #endif
13282         }
13283
13284
13285         return (retval);
13286
13287 bailout_error:
13288
13289         ctl_send_datamove_done(io, /*have_lock*/ 0);
13290
13291         return (retval);
13292 }
13293
13294 static int
13295 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
13296                          ctl_ha_dt_cb callback)
13297 {
13298         struct ctl_ha_dt_req *rq;
13299         struct ctl_sg_entry *remote_sglist, *local_sglist;
13300         struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist;
13301         uint32_t local_used, remote_used, total_used;
13302         int retval;
13303         int i, j;
13304
13305         retval = 0;
13306
13307         rq = ctl_dt_req_alloc();
13308
13309         /*
13310          * If we failed to allocate the request, and if the DMA didn't fail
13311          * anyway, set busy status.  This is just a resource allocation
13312          * failure.
13313          */
13314         if ((rq == NULL)
13315          && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE))
13316                 ctl_set_busy(&io->scsiio);
13317
13318         if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) {
13319
13320                 if (rq != NULL)
13321                         ctl_dt_req_free(rq);
13322
13323                 /*
13324                  * The data move failed.  We need to return status back
13325                  * to the other controller.  No point in trying to DMA
13326                  * data to the remote controller.
13327                  */
13328
13329                 ctl_send_datamove_done(io, /*have_lock*/ 0);
13330
13331                 retval = 1;
13332
13333                 goto bailout;
13334         }
13335
13336         local_sglist = io->io_hdr.local_sglist;
13337         local_dma_sglist = io->io_hdr.local_dma_sglist;
13338         remote_sglist = io->io_hdr.remote_sglist;
13339         remote_dma_sglist = io->io_hdr.remote_dma_sglist;
13340         local_used = 0;
13341         remote_used = 0;
13342         total_used = 0;
13343
13344         if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) {
13345                 rq->ret = CTL_HA_STATUS_SUCCESS;
13346                 rq->context = io;
13347                 callback(rq);
13348                 goto bailout;
13349         }
13350
13351         /*
13352          * Pull/push the data over the wire from/to the other controller.
13353          * This takes into account the possibility that the local and
13354          * remote sglists may not be identical in terms of the size of
13355          * the elements and the number of elements.
13356          *
13357          * One fundamental assumption here is that the length allocated for
13358          * both the local and remote sglists is identical.  Otherwise, we've
13359          * essentially got a coding error of some sort.
13360          */
13361         for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) {
13362                 int isc_ret;
13363                 uint32_t cur_len, dma_length;
13364                 uint8_t *tmp_ptr;
13365
13366                 rq->id = CTL_HA_DATA_CTL;
13367                 rq->command = command;
13368                 rq->context = io;
13369
13370                 /*
13371                  * Both pointers should be aligned.  But it is possible
13372                  * that the allocation length is not.  They should both
13373                  * also have enough slack left over at the end, though,
13374                  * to round up to the next 8 byte boundary.
13375                  */
13376                 cur_len = ctl_min(local_sglist[i].len - local_used,
13377                                   remote_sglist[j].len - remote_used);
13378
13379                 /*
13380                  * In this case, we have a size issue and need to decrease
13381                  * the size, except in the case where we actually have less
13382                  * than 8 bytes left.  In that case, we need to increase
13383                  * the DMA length to get the last bit.
13384                  */
13385                 if ((cur_len & 0x7) != 0) {
13386                         if (cur_len > 0x7) {
13387                                 cur_len = cur_len - (cur_len & 0x7);
13388                                 dma_length = cur_len;
13389                         } else {
13390                                 CTL_SIZE_8B(dma_length, cur_len);
13391                         }
13392
13393                 } else
13394                         dma_length = cur_len;
13395
13396                 /*
13397                  * If we had to allocate memory for this I/O, instead of using
13398                  * the non-cached mirror memory, we'll need to flush the cache
13399                  * before trying to DMA to the other controller.
13400                  *
13401                  * We could end up doing this multiple times for the same
13402                  * segment if we have a larger local segment than remote
13403                  * segment.  That shouldn't be an issue.
13404                  */
13405                 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
13406                         /*
13407                          * XXX KDM use bus_dmamap_sync() here.
13408                          */
13409                 }
13410
13411                 rq->size = dma_length;
13412
13413                 tmp_ptr = (uint8_t *)local_sglist[i].addr;
13414                 tmp_ptr += local_used;
13415
13416                 /* Use physical addresses when talking to ISC hardware */
13417                 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) {
13418                         /* XXX KDM use busdma */
13419 #if 0
13420                         rq->local = vtophys(tmp_ptr);
13421 #endif
13422                 } else
13423                         rq->local = tmp_ptr;
13424
13425                 tmp_ptr = (uint8_t *)remote_sglist[j].addr;
13426                 tmp_ptr += remote_used;
13427                 rq->remote = tmp_ptr;
13428
13429                 rq->callback = NULL;
13430
13431                 local_used += cur_len;
13432                 if (local_used >= local_sglist[i].len) {
13433                         i++;
13434                         local_used = 0;
13435                 }
13436
13437                 remote_used += cur_len;
13438                 if (remote_used >= remote_sglist[j].len) {
13439                         j++;
13440                         remote_used = 0;
13441                 }
13442                 total_used += cur_len;
13443
13444                 if (total_used >= io->scsiio.kern_data_len)
13445                         rq->callback = callback;
13446
13447                 if ((rq->size & 0x7) != 0) {
13448                         printf("%s: warning: size %d is not on 8b boundary\n",
13449                                __func__, rq->size);
13450                 }
13451                 if (((uintptr_t)rq->local & 0x7) != 0) {
13452                         printf("%s: warning: local %p not on 8b boundary\n",
13453                                __func__, rq->local);
13454                 }
13455                 if (((uintptr_t)rq->remote & 0x7) != 0) {
13456                         printf("%s: warning: remote %p not on 8b boundary\n",
13457                                __func__, rq->local);
13458                 }
13459 #if 0
13460                 printf("%s: %s: local %#x remote %#x size %d\n", __func__,
13461                        (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ",
13462                        rq->local, rq->remote, rq->size);
13463 #endif
13464
13465                 isc_ret = ctl_dt_single(rq);
13466                 if (isc_ret == CTL_HA_STATUS_WAIT)
13467                         continue;
13468
13469                 if (isc_ret == CTL_HA_STATUS_DISCONNECT) {
13470                         rq->ret = CTL_HA_STATUS_SUCCESS;
13471                 } else {
13472                         rq->ret = isc_ret;
13473                 }
13474                 callback(rq);
13475                 goto bailout;
13476         }
13477
13478 bailout:
13479         return (retval);
13480
13481 }
13482
13483 static void
13484 ctl_datamove_remote_read(union ctl_io *io)
13485 {
13486         int retval;
13487         int i;
13488
13489         /*
13490          * This will send an error to the other controller in the case of a
13491          * failure.
13492          */
13493         retval = ctl_datamove_remote_sgl_setup(io);
13494         if (retval != 0)
13495                 return;
13496
13497         retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ,
13498                                           ctl_datamove_remote_read_cb);
13499         if ((retval != 0)
13500          && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) {
13501                 /*
13502                  * Make sure we free memory if there was an error..  The
13503                  * ctl_datamove_remote_xfer() function will send the
13504                  * datamove done message, or call the callback with an
13505                  * error if there is a problem.
13506                  */
13507                 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
13508                         free(io->io_hdr.local_sglist[i].addr, M_CTL);
13509         }
13510
13511         return;
13512 }
13513
13514 /*
13515  * Process a datamove request from the other controller.  This is used for
13516  * XFER mode only, not SER_ONLY mode.  For writes, we DMA into local memory
13517  * first.  Once that is complete, the data gets DMAed into the remote
13518  * controller's memory.  For reads, we DMA from the remote controller's
13519  * memory into our memory first, and then move it out to the FETD.
13520  */
13521 static void
13522 ctl_datamove_remote(union ctl_io *io)
13523 {
13524         struct ctl_softc *softc;
13525
13526         softc = control_softc;
13527
13528         mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
13529
13530         /*
13531          * Note that we look for an aborted I/O here, but don't do some of
13532          * the other checks that ctl_datamove() normally does.
13533          * We don't need to run the datamove delay code, since that should
13534          * have been done if need be on the other controller.
13535          */
13536         if (io->io_hdr.flags & CTL_FLAG_ABORT) {
13537                 printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__,
13538                        io->scsiio.tag_num, io->io_hdr.nexus.initid.id,
13539                        io->io_hdr.nexus.targ_port,
13540                        io->io_hdr.nexus.targ_target.id,
13541                        io->io_hdr.nexus.targ_lun);
13542                 io->io_hdr.port_status = 31338;
13543                 ctl_send_datamove_done(io, /*have_lock*/ 0);
13544                 return;
13545         }
13546
13547         if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) {
13548                 ctl_datamove_remote_write(io);
13549         } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){
13550                 ctl_datamove_remote_read(io);
13551         } else {
13552                 union ctl_ha_msg msg;
13553                 struct scsi_sense_data *sense;
13554                 uint8_t sks[3];
13555                 int retry_count;
13556
13557                 memset(&msg, 0, sizeof(msg));
13558
13559                 msg.hdr.msg_type = CTL_MSG_BAD_JUJU;
13560                 msg.hdr.status = CTL_SCSI_ERROR;
13561                 msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
13562
13563                 retry_count = 4243;
13564
13565                 sense = &msg.scsi.sense_data;
13566                 sks[0] = SSD_SCS_VALID;
13567                 sks[1] = (retry_count >> 8) & 0xff;
13568                 sks[2] = retry_count & 0xff;
13569
13570                 /* "Internal target failure" */
13571                 scsi_set_sense_data(sense,
13572                                     /*sense_format*/ SSD_TYPE_NONE,
13573                                     /*current_error*/ 1,
13574                                     /*sense_key*/ SSD_KEY_HARDWARE_ERROR,
13575                                     /*asc*/ 0x44,
13576                                     /*ascq*/ 0x00,
13577                                     /*type*/ SSD_ELEM_SKS,
13578                                     /*size*/ sizeof(sks),
13579                                     /*data*/ sks,
13580                                     SSD_ELEM_NONE);
13581
13582                 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
13583                 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
13584                         ctl_failover_io(io, /*have_lock*/ 1);
13585                         return;
13586                 }
13587
13588                 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) >
13589                     CTL_HA_STATUS_SUCCESS) {
13590                         /* XXX KDM what to do if this fails? */
13591                 }
13592                 return;
13593         }
13594         
13595 }
13596
13597 static int
13598 ctl_process_done(union ctl_io *io)
13599 {
13600         struct ctl_lun *lun;
13601         struct ctl_softc *ctl_softc;
13602         void (*fe_done)(union ctl_io *io);
13603         uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port);
13604
13605         CTL_DEBUG_PRINT(("ctl_process_done\n"));
13606
13607         fe_done =
13608             control_softc->ctl_ports[targ_port]->fe_done;
13609
13610 #ifdef CTL_TIME_IO
13611         if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
13612                 char str[256];
13613                 char path_str[64];
13614                 struct sbuf sb;
13615
13616                 ctl_scsi_path_string(io, path_str, sizeof(path_str));
13617                 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
13618
13619                 sbuf_cat(&sb, path_str);
13620                 switch (io->io_hdr.io_type) {
13621                 case CTL_IO_SCSI:
13622                         ctl_scsi_command_string(&io->scsiio, NULL, &sb);
13623                         sbuf_printf(&sb, "\n");
13624                         sbuf_cat(&sb, path_str);
13625                         sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
13626                                     io->scsiio.tag_num, io->scsiio.tag_type);
13627                         break;
13628                 case CTL_IO_TASK:
13629                         sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, "
13630                                     "Tag Type: %d\n", io->taskio.task_action,
13631                                     io->taskio.tag_num, io->taskio.tag_type);
13632                         break;
13633                 default:
13634                         printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
13635                         panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
13636                         break;
13637                 }
13638                 sbuf_cat(&sb, path_str);
13639                 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n",
13640                             (intmax_t)time_uptime - io->io_hdr.start_time);
13641                 sbuf_finish(&sb);
13642                 printf("%s", sbuf_data(&sb));
13643         }
13644 #endif /* CTL_TIME_IO */
13645
13646         switch (io->io_hdr.io_type) {
13647         case CTL_IO_SCSI:
13648                 break;
13649         case CTL_IO_TASK:
13650                 if (bootverbose || verbose > 0)
13651                         ctl_io_error_print(io, NULL);
13652                 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
13653                         ctl_free_io(io);
13654                 else
13655                         fe_done(io);
13656                 return (CTL_RETVAL_COMPLETE);
13657                 break;
13658         default:
13659                 printf("ctl_process_done: invalid io type %d\n",
13660                        io->io_hdr.io_type);
13661                 panic("ctl_process_done: invalid io type %d\n",
13662                       io->io_hdr.io_type);
13663                 break; /* NOTREACHED */
13664         }
13665
13666         lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
13667         if (lun == NULL) {
13668                 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n",
13669                                  io->io_hdr.nexus.targ_mapped_lun));
13670                 fe_done(io);
13671                 goto bailout;
13672         }
13673         ctl_softc = lun->ctl_softc;
13674
13675         mtx_lock(&lun->lun_lock);
13676
13677         /*
13678          * Check to see if we have any errors to inject here.  We only
13679          * inject errors for commands that don't already have errors set.
13680          */
13681         if ((STAILQ_FIRST(&lun->error_list) != NULL)
13682          && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS))
13683                 ctl_inject_error(lun, io);
13684
13685         /*
13686          * XXX KDM how do we treat commands that aren't completed
13687          * successfully?
13688          *
13689          * XXX KDM should we also track I/O latency?
13690          */
13691         if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS &&
13692             io->io_hdr.io_type == CTL_IO_SCSI) {
13693 #ifdef CTL_TIME_IO
13694                 struct bintime cur_bt;
13695 #endif
13696                 int type;
13697
13698                 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
13699                     CTL_FLAG_DATA_IN)
13700                         type = CTL_STATS_READ;
13701                 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
13702                     CTL_FLAG_DATA_OUT)
13703                         type = CTL_STATS_WRITE;
13704                 else
13705                         type = CTL_STATS_NO_IO;
13706
13707                 lun->stats.ports[targ_port].bytes[type] +=
13708                     io->scsiio.kern_total_len;
13709                 lun->stats.ports[targ_port].operations[type]++;
13710 #ifdef CTL_TIME_IO
13711                 bintime_add(&lun->stats.ports[targ_port].dma_time[type],
13712                    &io->io_hdr.dma_bt);
13713                 lun->stats.ports[targ_port].num_dmas[type] +=
13714                     io->io_hdr.num_dmas;
13715                 getbintime(&cur_bt);
13716                 bintime_sub(&cur_bt, &io->io_hdr.start_bt);
13717                 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt);
13718 #endif
13719         }
13720
13721         /*
13722          * Remove this from the OOA queue.
13723          */
13724         TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
13725
13726         /*
13727          * Run through the blocked queue on this LUN and see if anything
13728          * has become unblocked, now that this transaction is done.
13729          */
13730         ctl_check_blocked(lun);
13731
13732         /*
13733          * If the LUN has been invalidated, free it if there is nothing
13734          * left on its OOA queue.
13735          */
13736         if ((lun->flags & CTL_LUN_INVALID)
13737          && TAILQ_EMPTY(&lun->ooa_queue)) {
13738                 mtx_unlock(&lun->lun_lock);
13739                 mtx_lock(&ctl_softc->ctl_lock);
13740                 ctl_free_lun(lun);
13741                 mtx_unlock(&ctl_softc->ctl_lock);
13742         } else
13743                 mtx_unlock(&lun->lun_lock);
13744
13745         /*
13746          * If this command has been aborted, make sure we set the status
13747          * properly.  The FETD is responsible for freeing the I/O and doing
13748          * whatever it needs to do to clean up its state.
13749          */
13750         if (io->io_hdr.flags & CTL_FLAG_ABORT)
13751                 ctl_set_task_aborted(&io->scsiio);
13752
13753         /*
13754          * We print out status for every task management command.  For SCSI
13755          * commands, we filter out any unit attention errors; they happen
13756          * on every boot, and would clutter up the log.  Note:  task
13757          * management commands aren't printed here, they are printed above,
13758          * since they should never even make it down here.
13759          */
13760         switch (io->io_hdr.io_type) {
13761         case CTL_IO_SCSI: {
13762                 int error_code, sense_key, asc, ascq;
13763
13764                 sense_key = 0;
13765
13766                 if (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR)
13767                  && (io->scsiio.scsi_status == SCSI_STATUS_CHECK_COND)) {
13768                         /*
13769                          * Since this is just for printing, no need to
13770                          * show errors here.
13771                          */
13772                         scsi_extract_sense_len(&io->scsiio.sense_data,
13773                                                io->scsiio.sense_len,
13774                                                &error_code,
13775                                                &sense_key,
13776                                                &asc,
13777                                                &ascq,
13778                                                /*show_errors*/ 0);
13779                 }
13780
13781                 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
13782                  && (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SCSI_ERROR)
13783                   || (io->scsiio.scsi_status != SCSI_STATUS_CHECK_COND)
13784                   || (sense_key != SSD_KEY_UNIT_ATTENTION))) {
13785
13786                         if ((time_uptime - ctl_softc->last_print_jiffies) <= 0){
13787                                 ctl_softc->skipped_prints++;
13788                         } else {
13789                                 uint32_t skipped_prints;
13790
13791                                 skipped_prints = ctl_softc->skipped_prints;
13792
13793                                 ctl_softc->skipped_prints = 0;
13794                                 ctl_softc->last_print_jiffies = time_uptime;
13795
13796                                 if (skipped_prints > 0) {
13797 #ifdef NEEDTOPORT
13798                                         csevent_log(CSC_CTL | CSC_SHELF_SW |
13799                                             CTL_ERROR_REPORT,
13800                                             csevent_LogType_Trace,
13801                                             csevent_Severity_Information,
13802                                             csevent_AlertLevel_Green,
13803                                             csevent_FRU_Firmware,
13804                                             csevent_FRU_Unknown,
13805                                             "High CTL error volume, %d prints "
13806                                             "skipped", skipped_prints);
13807 #endif
13808                                 }
13809                                 if (bootverbose || verbose > 0)
13810                                         ctl_io_error_print(io, NULL);
13811                         }
13812                 }
13813                 break;
13814         }
13815         case CTL_IO_TASK:
13816                 if (bootverbose || verbose > 0)
13817                         ctl_io_error_print(io, NULL);
13818                 break;
13819         default:
13820                 break;
13821         }
13822
13823         /*
13824          * Tell the FETD or the other shelf controller we're done with this
13825          * command.  Note that only SCSI commands get to this point.  Task
13826          * management commands are completed above.
13827          *
13828          * We only send status to the other controller if we're in XFER
13829          * mode.  In SER_ONLY mode, the I/O is done on the controller that
13830          * received the I/O (from CTL's perspective), and so the status is
13831          * generated there.
13832          * 
13833          * XXX KDM if we hold the lock here, we could cause a deadlock
13834          * if the frontend comes back in in this context to queue
13835          * something.
13836          */
13837         if ((ctl_softc->ha_mode == CTL_HA_MODE_XFER)
13838          && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
13839                 union ctl_ha_msg msg;
13840
13841                 memset(&msg, 0, sizeof(msg));
13842                 msg.hdr.msg_type = CTL_MSG_FINISH_IO;
13843                 msg.hdr.original_sc = io->io_hdr.original_sc;
13844                 msg.hdr.nexus = io->io_hdr.nexus;
13845                 msg.hdr.status = io->io_hdr.status;
13846                 msg.scsi.scsi_status = io->scsiio.scsi_status;
13847                 msg.scsi.tag_num = io->scsiio.tag_num;
13848                 msg.scsi.tag_type = io->scsiio.tag_type;
13849                 msg.scsi.sense_len = io->scsiio.sense_len;
13850                 msg.scsi.sense_residual = io->scsiio.sense_residual;
13851                 msg.scsi.residual = io->scsiio.residual;
13852                 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
13853                        sizeof(io->scsiio.sense_data));
13854                 /*
13855                  * We copy this whether or not this is an I/O-related
13856                  * command.  Otherwise, we'd have to go and check to see
13857                  * whether it's a read/write command, and it really isn't
13858                  * worth it.
13859                  */
13860                 memcpy(&msg.scsi.lbalen,
13861                        &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
13862                        sizeof(msg.scsi.lbalen));
13863
13864                 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
13865                                 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) {
13866                         /* XXX do something here */
13867                 }
13868
13869                 ctl_free_io(io);
13870         } else 
13871                 fe_done(io);
13872
13873 bailout:
13874
13875         return (CTL_RETVAL_COMPLETE);
13876 }
13877
13878 #ifdef CTL_WITH_CA
13879 /*
13880  * Front end should call this if it doesn't do autosense.  When the request
13881  * sense comes back in from the initiator, we'll dequeue this and send it.
13882  */
13883 int
13884 ctl_queue_sense(union ctl_io *io)
13885 {
13886         struct ctl_lun *lun;
13887         struct ctl_softc *ctl_softc;
13888         uint32_t initidx, targ_lun;
13889
13890         ctl_softc = control_softc;
13891
13892         CTL_DEBUG_PRINT(("ctl_queue_sense\n"));
13893
13894         /*
13895          * LUN lookup will likely move to the ctl_work_thread() once we
13896          * have our new queueing infrastructure (that doesn't put things on
13897          * a per-LUN queue initially).  That is so that we can handle
13898          * things like an INQUIRY to a LUN that we don't have enabled.  We
13899          * can't deal with that right now.
13900          */
13901         mtx_lock(&ctl_softc->ctl_lock);
13902
13903         /*
13904          * If we don't have a LUN for this, just toss the sense
13905          * information.
13906          */
13907         targ_lun = io->io_hdr.nexus.targ_lun;
13908         targ_lun = ctl_map_lun(io->io_hdr.nexus.targ_port, targ_lun);
13909         if ((targ_lun < CTL_MAX_LUNS)
13910          && (ctl_softc->ctl_luns[targ_lun] != NULL))
13911                 lun = ctl_softc->ctl_luns[targ_lun];
13912         else
13913                 goto bailout;
13914
13915         initidx = ctl_get_initindex(&io->io_hdr.nexus);
13916
13917         mtx_lock(&lun->lun_lock);
13918         /*
13919          * Already have CA set for this LUN...toss the sense information.
13920          */
13921         if (ctl_is_set(lun->have_ca, initidx)) {
13922                 mtx_unlock(&lun->lun_lock);
13923                 goto bailout;
13924         }
13925
13926         memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data,
13927                ctl_min(sizeof(lun->pending_sense[initidx]),
13928                sizeof(io->scsiio.sense_data)));
13929         ctl_set_mask(lun->have_ca, initidx);
13930         mtx_unlock(&lun->lun_lock);
13931
13932 bailout:
13933         mtx_unlock(&ctl_softc->ctl_lock);
13934
13935         ctl_free_io(io);
13936
13937         return (CTL_RETVAL_COMPLETE);
13938 }
13939 #endif
13940
13941 /*
13942  * Primary command inlet from frontend ports.  All SCSI and task I/O
13943  * requests must go through this function.
13944  */
13945 int
13946 ctl_queue(union ctl_io *io)
13947 {
13948         struct ctl_softc *ctl_softc;
13949
13950         CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0]));
13951
13952         ctl_softc = control_softc;
13953
13954 #ifdef CTL_TIME_IO
13955         io->io_hdr.start_time = time_uptime;
13956         getbintime(&io->io_hdr.start_bt);
13957 #endif /* CTL_TIME_IO */
13958
13959         /* Map FE-specific LUN ID into global one. */
13960         io->io_hdr.nexus.targ_mapped_lun =
13961             ctl_map_lun(io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun);
13962
13963         switch (io->io_hdr.io_type) {
13964         case CTL_IO_SCSI:
13965         case CTL_IO_TASK:
13966                 ctl_enqueue_incoming(io);
13967                 break;
13968         default:
13969                 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type);
13970                 return (EINVAL);
13971         }
13972
13973         return (CTL_RETVAL_COMPLETE);
13974 }
13975
13976 #ifdef CTL_IO_DELAY
13977 static void
13978 ctl_done_timer_wakeup(void *arg)
13979 {
13980         union ctl_io *io;
13981
13982         io = (union ctl_io *)arg;
13983         ctl_done(io);
13984 }
13985 #endif /* CTL_IO_DELAY */
13986
13987 void
13988 ctl_done(union ctl_io *io)
13989 {
13990         struct ctl_softc *ctl_softc;
13991
13992         ctl_softc = control_softc;
13993
13994         /*
13995          * Enable this to catch duplicate completion issues.
13996          */
13997 #if 0
13998         if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) {
13999                 printf("%s: type %d msg %d cdb %x iptl: "
14000                        "%d:%d:%d:%d tag 0x%04x "
14001                        "flag %#x status %x\n",
14002                         __func__,
14003                         io->io_hdr.io_type,
14004                         io->io_hdr.msg_type,
14005                         io->scsiio.cdb[0],
14006                         io->io_hdr.nexus.initid.id,
14007                         io->io_hdr.nexus.targ_port,
14008                         io->io_hdr.nexus.targ_target.id,
14009                         io->io_hdr.nexus.targ_lun,
14010                         (io->io_hdr.io_type ==
14011                         CTL_IO_TASK) ?
14012                         io->taskio.tag_num :
14013                         io->scsiio.tag_num,
14014                         io->io_hdr.flags,
14015                         io->io_hdr.status);
14016         } else
14017                 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE;
14018 #endif
14019
14020         /*
14021          * This is an internal copy of an I/O, and should not go through
14022          * the normal done processing logic.
14023          */
14024         if (io->io_hdr.flags & CTL_FLAG_INT_COPY)
14025                 return;
14026
14027         /*
14028          * We need to send a msg to the serializing shelf to finish the IO
14029          * as well.  We don't send a finish message to the other shelf if
14030          * this is a task management command.  Task management commands
14031          * aren't serialized in the OOA queue, but rather just executed on
14032          * both shelf controllers for commands that originated on that
14033          * controller.
14034          */
14035         if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)
14036          && (io->io_hdr.io_type != CTL_IO_TASK)) {
14037                 union ctl_ha_msg msg_io;
14038
14039                 msg_io.hdr.msg_type = CTL_MSG_FINISH_IO;
14040                 msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc;
14041                 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io,
14042                     sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) {
14043                 }
14044                 /* continue on to finish IO */
14045         }
14046 #ifdef CTL_IO_DELAY
14047         if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
14048                 struct ctl_lun *lun;
14049
14050                 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
14051
14052                 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
14053         } else {
14054                 struct ctl_lun *lun;
14055
14056                 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
14057
14058                 if ((lun != NULL)
14059                  && (lun->delay_info.done_delay > 0)) {
14060                         struct callout *callout;
14061
14062                         callout = (struct callout *)&io->io_hdr.timer_bytes;
14063                         callout_init(callout, /*mpsafe*/ 1);
14064                         io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
14065                         callout_reset(callout,
14066                                       lun->delay_info.done_delay * hz,
14067                                       ctl_done_timer_wakeup, io);
14068                         if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT)
14069                                 lun->delay_info.done_delay = 0;
14070                         return;
14071                 }
14072         }
14073 #endif /* CTL_IO_DELAY */
14074
14075         ctl_enqueue_done(io);
14076 }
14077
14078 int
14079 ctl_isc(struct ctl_scsiio *ctsio)
14080 {
14081         struct ctl_lun *lun;
14082         int retval;
14083
14084         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
14085
14086         CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0]));
14087
14088         CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n"));
14089
14090         retval = lun->backend->data_submit((union ctl_io *)ctsio);
14091
14092         return (retval);
14093 }
14094
14095
14096 static void
14097 ctl_work_thread(void *arg)
14098 {
14099         struct ctl_thread *thr = (struct ctl_thread *)arg;
14100         struct ctl_softc *softc = thr->ctl_softc;
14101         union ctl_io *io;
14102         int retval;
14103
14104         CTL_DEBUG_PRINT(("ctl_work_thread starting\n"));
14105
14106         for (;;) {
14107                 retval = 0;
14108
14109                 /*
14110                  * We handle the queues in this order:
14111                  * - ISC
14112                  * - done queue (to free up resources, unblock other commands)
14113                  * - RtR queue
14114                  * - incoming queue
14115                  *
14116                  * If those queues are empty, we break out of the loop and
14117                  * go to sleep.
14118                  */
14119                 mtx_lock(&thr->queue_lock);
14120                 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue);
14121                 if (io != NULL) {
14122                         STAILQ_REMOVE_HEAD(&thr->isc_queue, links);
14123                         mtx_unlock(&thr->queue_lock);
14124                         ctl_handle_isc(io);
14125                         continue;
14126                 }
14127                 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue);
14128                 if (io != NULL) {
14129                         STAILQ_REMOVE_HEAD(&thr->done_queue, links);
14130                         /* clear any blocked commands, call fe_done */
14131                         mtx_unlock(&thr->queue_lock);
14132                         retval = ctl_process_done(io);
14133                         continue;
14134                 }
14135                 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue);
14136                 if (io != NULL) {
14137                         STAILQ_REMOVE_HEAD(&thr->incoming_queue, links);
14138                         mtx_unlock(&thr->queue_lock);
14139                         if (io->io_hdr.io_type == CTL_IO_TASK)
14140                                 ctl_run_task(io);
14141                         else
14142                                 ctl_scsiio_precheck(softc, &io->scsiio);
14143                         continue;
14144                 }
14145                 if (!ctl_pause_rtr) {
14146                         io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue);
14147                         if (io != NULL) {
14148                                 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links);
14149                                 mtx_unlock(&thr->queue_lock);
14150                                 retval = ctl_scsiio(&io->scsiio);
14151                                 if (retval != CTL_RETVAL_COMPLETE)
14152                                         CTL_DEBUG_PRINT(("ctl_scsiio failed\n"));
14153                                 continue;
14154                         }
14155                 }
14156
14157                 /* Sleep until we have something to do. */
14158                 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0);
14159         }
14160 }
14161
14162 static void
14163 ctl_lun_thread(void *arg)
14164 {
14165         struct ctl_softc *softc = (struct ctl_softc *)arg;
14166         struct ctl_be_lun *be_lun;
14167         int retval;
14168
14169         CTL_DEBUG_PRINT(("ctl_lun_thread starting\n"));
14170
14171         for (;;) {
14172                 retval = 0;
14173                 mtx_lock(&softc->ctl_lock);
14174                 be_lun = STAILQ_FIRST(&softc->pending_lun_queue);
14175                 if (be_lun != NULL) {
14176                         STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links);
14177                         mtx_unlock(&softc->ctl_lock);
14178                         ctl_create_lun(be_lun);
14179                         continue;
14180                 }
14181
14182                 /* Sleep until we have something to do. */
14183                 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock,
14184                     PDROP | PRIBIO, "-", 0);
14185         }
14186 }
14187
14188 static void
14189 ctl_enqueue_incoming(union ctl_io *io)
14190 {
14191         struct ctl_softc *softc = control_softc;
14192         struct ctl_thread *thr;
14193         u_int idx;
14194
14195         idx = (io->io_hdr.nexus.targ_port * 127 +
14196                io->io_hdr.nexus.initid.id) % worker_threads;
14197         thr = &softc->threads[idx];
14198         mtx_lock(&thr->queue_lock);
14199         STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links);
14200         mtx_unlock(&thr->queue_lock);
14201         wakeup(thr);
14202 }
14203
14204 static void
14205 ctl_enqueue_rtr(union ctl_io *io)
14206 {
14207         struct ctl_softc *softc = control_softc;
14208         struct ctl_thread *thr;
14209
14210         thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
14211         mtx_lock(&thr->queue_lock);
14212         STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links);
14213         mtx_unlock(&thr->queue_lock);
14214         wakeup(thr);
14215 }
14216
14217 static void
14218 ctl_enqueue_done(union ctl_io *io)
14219 {
14220         struct ctl_softc *softc = control_softc;
14221         struct ctl_thread *thr;
14222
14223         thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
14224         mtx_lock(&thr->queue_lock);
14225         STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links);
14226         mtx_unlock(&thr->queue_lock);
14227         wakeup(thr);
14228 }
14229
14230 static void
14231 ctl_enqueue_isc(union ctl_io *io)
14232 {
14233         struct ctl_softc *softc = control_softc;
14234         struct ctl_thread *thr;
14235
14236         thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
14237         mtx_lock(&thr->queue_lock);
14238         STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links);
14239         mtx_unlock(&thr->queue_lock);
14240         wakeup(thr);
14241 }
14242
14243 /* Initialization and failover */
14244
14245 void
14246 ctl_init_isc_msg(void)
14247 {
14248         printf("CTL: Still calling this thing\n");
14249 }
14250
14251 /*
14252  * Init component
14253  *      Initializes component into configuration defined by bootMode
14254  *      (see hasc-sv.c)
14255  *      returns hasc_Status:
14256  *              OK
14257  *              ERROR - fatal error
14258  */
14259 static ctl_ha_comp_status
14260 ctl_isc_init(struct ctl_ha_component *c)
14261 {
14262         ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK;
14263
14264         c->status = ret;
14265         return ret;
14266 }
14267
14268 /* Start component
14269  *      Starts component in state requested. If component starts successfully,
14270  *      it must set its own state to the requestrd state
14271  *      When requested state is HASC_STATE_HA, the component may refine it
14272  *      by adding _SLAVE or _MASTER flags.
14273  *      Currently allowed state transitions are:
14274  *      UNKNOWN->HA             - initial startup
14275  *      UNKNOWN->SINGLE - initial startup when no parter detected
14276  *      HA->SINGLE              - failover
14277  * returns ctl_ha_comp_status:
14278  *              OK      - component successfully started in requested state
14279  *              FAILED  - could not start the requested state, failover may
14280  *                        be possible
14281  *              ERROR   - fatal error detected, no future startup possible
14282  */
14283 static ctl_ha_comp_status
14284 ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state)
14285 {
14286         ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK;
14287
14288         printf("%s: go\n", __func__);
14289
14290         // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap)
14291         if (c->state == CTL_HA_STATE_UNKNOWN ) {
14292                 ctl_is_single = 0;
14293                 if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler)
14294                     != CTL_HA_STATUS_SUCCESS) {
14295                         printf("ctl_isc_start: ctl_ha_msg_create failed.\n");
14296                         ret = CTL_HA_COMP_STATUS_ERROR;
14297                 }
14298         } else if (CTL_HA_STATE_IS_HA(c->state)
14299                 && CTL_HA_STATE_IS_SINGLE(state)){
14300                 // HA->SINGLE transition
14301                 ctl_failover();
14302                 ctl_is_single = 1;
14303         } else {
14304                 printf("ctl_isc_start:Invalid state transition %X->%X\n",
14305                        c->state, state);
14306                 ret = CTL_HA_COMP_STATUS_ERROR;
14307         }
14308         if (CTL_HA_STATE_IS_SINGLE(state))
14309                 ctl_is_single = 1;
14310
14311         c->state = state;
14312         c->status = ret;
14313         return ret;
14314 }
14315
14316 /*
14317  * Quiesce component
14318  * The component must clear any error conditions (set status to OK) and
14319  * prepare itself to another Start call
14320  * returns ctl_ha_comp_status:
14321  *      OK
14322  *      ERROR
14323  */
14324 static ctl_ha_comp_status
14325 ctl_isc_quiesce(struct ctl_ha_component *c)
14326 {
14327         int ret = CTL_HA_COMP_STATUS_OK;
14328
14329         ctl_pause_rtr = 1;
14330         c->status = ret;
14331         return ret;
14332 }
14333
14334 struct ctl_ha_component ctl_ha_component_ctlisc =
14335 {
14336         .name = "CTL ISC",
14337         .state = CTL_HA_STATE_UNKNOWN,
14338         .init = ctl_isc_init,
14339         .start = ctl_isc_start,
14340         .quiesce = ctl_isc_quiesce
14341 };
14342
14343 /*
14344  *  vim: ts=8
14345  */