]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/cam/ctl/ctl.c
MFC 262782:
[FreeBSD/stable/10.git] / sys / cam / ctl / ctl.c
1 /*-
2  * Copyright (c) 2003-2009 Silicon Graphics International Corp.
3  * Copyright (c) 2012 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Edward Tomasz Napierala
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions, and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    substantially similar to the "NO WARRANTY" disclaimer below
17  *    ("Disclaimer") and any redistribution must be conditioned upon
18  *    including a substantially similar Disclaimer requirement for further
19  *    binary redistribution.
20  *
21  * NO WARRANTY
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
25  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
30  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
31  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGES.
33  *
34  * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl.c#8 $
35  */
36 /*
37  * CAM Target Layer, a SCSI device emulation subsystem.
38  *
39  * Author: Ken Merry <ken@FreeBSD.org>
40  */
41
42 #define _CTL_C
43
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/types.h>
51 #include <sys/kthread.h>
52 #include <sys/bio.h>
53 #include <sys/fcntl.h>
54 #include <sys/lock.h>
55 #include <sys/module.h>
56 #include <sys/mutex.h>
57 #include <sys/condvar.h>
58 #include <sys/malloc.h>
59 #include <sys/conf.h>
60 #include <sys/ioccom.h>
61 #include <sys/queue.h>
62 #include <sys/sbuf.h>
63 #include <sys/endian.h>
64 #include <sys/sysctl.h>
65
66 #include <cam/cam.h>
67 #include <cam/scsi/scsi_all.h>
68 #include <cam/scsi/scsi_da.h>
69 #include <cam/ctl/ctl_io.h>
70 #include <cam/ctl/ctl.h>
71 #include <cam/ctl/ctl_frontend.h>
72 #include <cam/ctl/ctl_frontend_internal.h>
73 #include <cam/ctl/ctl_util.h>
74 #include <cam/ctl/ctl_backend.h>
75 #include <cam/ctl/ctl_ioctl.h>
76 #include <cam/ctl/ctl_ha.h>
77 #include <cam/ctl/ctl_private.h>
78 #include <cam/ctl/ctl_debug.h>
79 #include <cam/ctl/ctl_scsi_all.h>
80 #include <cam/ctl/ctl_error.h>
81
82 struct ctl_softc *control_softc = NULL;
83
84 /*
85  * The default is to run with CTL_DONE_THREAD turned on.  Completed
86  * transactions are queued for processing by the CTL work thread.  When
87  * CTL_DONE_THREAD is not defined, completed transactions are processed in
88  * the caller's context.
89  */
90 #define CTL_DONE_THREAD
91
92 /*
93  * Use the serial number and device ID provided by the backend, rather than
94  * making up our own.
95  */
96 #define CTL_USE_BACKEND_SN
97
98 /*
99  * Size and alignment macros needed for Copan-specific HA hardware.  These
100  * can go away when the HA code is re-written, and uses busdma for any
101  * hardware.
102  */
103 #define CTL_ALIGN_8B(target, source, type)                              \
104         if (((uint32_t)source & 0x7) != 0)                              \
105                 target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\
106         else                                                            \
107                 target = (type)source;
108
109 #define CTL_SIZE_8B(target, size)                                       \
110         if ((size & 0x7) != 0)                                          \
111                 target = size + (0x8 - (size & 0x7));                   \
112         else                                                            \
113                 target = size;
114
115 #define CTL_ALIGN_8B_MARGIN     16
116
117 /*
118  * Template mode pages.
119  */
120
121 /*
122  * Note that these are default values only.  The actual values will be
123  * filled in when the user does a mode sense.
124  */
125 static struct copan_power_subpage power_page_default = {
126         /*page_code*/ PWR_PAGE_CODE | SMPH_SPF,
127         /*subpage*/ PWR_SUBPAGE_CODE,
128         /*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00,
129                          (sizeof(struct copan_power_subpage) - 4) & 0x00ff},
130         /*page_version*/ PWR_VERSION,
131         /* total_luns */ 26,
132         /* max_active_luns*/ PWR_DFLT_MAX_LUNS,
133         /*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0,
134                       0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
135                       0, 0, 0, 0, 0, 0}
136 };
137
138 static struct copan_power_subpage power_page_changeable = {
139         /*page_code*/ PWR_PAGE_CODE | SMPH_SPF,
140         /*subpage*/ PWR_SUBPAGE_CODE,
141         /*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00,
142                          (sizeof(struct copan_power_subpage) - 4) & 0x00ff},
143         /*page_version*/ 0,
144         /* total_luns */ 0,
145         /* max_active_luns*/ 0,
146         /*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0,
147                       0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
148                       0, 0, 0, 0, 0, 0}
149 };
150
151 static struct copan_aps_subpage aps_page_default = {
152         APS_PAGE_CODE | SMPH_SPF, //page_code
153         APS_SUBPAGE_CODE, //subpage
154         {(sizeof(struct copan_aps_subpage) - 4) & 0xff00,
155          (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length
156         APS_VERSION, //page_version
157         0, //lock_active
158         {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
159         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
160         0, 0, 0, 0, 0} //reserved
161 };
162
163 static struct copan_aps_subpage aps_page_changeable = {
164         APS_PAGE_CODE | SMPH_SPF, //page_code
165         APS_SUBPAGE_CODE, //subpage
166         {(sizeof(struct copan_aps_subpage) - 4) & 0xff00,
167          (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length
168         0, //page_version
169         0, //lock_active
170         {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
171         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
172         0, 0, 0, 0, 0} //reserved
173 };
174
175 static struct copan_debugconf_subpage debugconf_page_default = {
176         DBGCNF_PAGE_CODE | SMPH_SPF,    /* page_code */
177         DBGCNF_SUBPAGE_CODE,            /* subpage */
178         {(sizeof(struct copan_debugconf_subpage) - 4) >> 8,
179          (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */
180         DBGCNF_VERSION,                 /* page_version */
181         {CTL_TIME_IO_DEFAULT_SECS>>8,
182          CTL_TIME_IO_DEFAULT_SECS>>0},  /* ctl_time_io_secs */
183 };
184
185 static struct copan_debugconf_subpage debugconf_page_changeable = {
186         DBGCNF_PAGE_CODE | SMPH_SPF,    /* page_code */
187         DBGCNF_SUBPAGE_CODE,            /* subpage */
188         {(sizeof(struct copan_debugconf_subpage) - 4) >> 8,
189          (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */
190         0,                              /* page_version */
191         {0xff,0xff},                    /* ctl_time_io_secs */
192 };
193
194 static struct scsi_format_page format_page_default = {
195         /*page_code*/SMS_FORMAT_DEVICE_PAGE,
196         /*page_length*/sizeof(struct scsi_format_page) - 2,
197         /*tracks_per_zone*/ {0, 0},
198         /*alt_sectors_per_zone*/ {0, 0},
199         /*alt_tracks_per_zone*/ {0, 0},
200         /*alt_tracks_per_lun*/ {0, 0},
201         /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff,
202                                 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff},
203         /*bytes_per_sector*/ {0, 0},
204         /*interleave*/ {0, 0},
205         /*track_skew*/ {0, 0},
206         /*cylinder_skew*/ {0, 0},
207         /*flags*/ SFP_HSEC,
208         /*reserved*/ {0, 0, 0}
209 };
210
211 static struct scsi_format_page format_page_changeable = {
212         /*page_code*/SMS_FORMAT_DEVICE_PAGE,
213         /*page_length*/sizeof(struct scsi_format_page) - 2,
214         /*tracks_per_zone*/ {0, 0},
215         /*alt_sectors_per_zone*/ {0, 0},
216         /*alt_tracks_per_zone*/ {0, 0},
217         /*alt_tracks_per_lun*/ {0, 0},
218         /*sectors_per_track*/ {0, 0},
219         /*bytes_per_sector*/ {0, 0},
220         /*interleave*/ {0, 0},
221         /*track_skew*/ {0, 0},
222         /*cylinder_skew*/ {0, 0},
223         /*flags*/ 0,
224         /*reserved*/ {0, 0, 0}
225 };
226
227 static struct scsi_rigid_disk_page rigid_disk_page_default = {
228         /*page_code*/SMS_RIGID_DISK_PAGE,
229         /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2,
230         /*cylinders*/ {0, 0, 0},
231         /*heads*/ CTL_DEFAULT_HEADS,
232         /*start_write_precomp*/ {0, 0, 0},
233         /*start_reduced_current*/ {0, 0, 0},
234         /*step_rate*/ {0, 0},
235         /*landing_zone_cylinder*/ {0, 0, 0},
236         /*rpl*/ SRDP_RPL_DISABLED,
237         /*rotational_offset*/ 0,
238         /*reserved1*/ 0,
239         /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff,
240                            CTL_DEFAULT_ROTATION_RATE & 0xff},
241         /*reserved2*/ {0, 0}
242 };
243
244 static struct scsi_rigid_disk_page rigid_disk_page_changeable = {
245         /*page_code*/SMS_RIGID_DISK_PAGE,
246         /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2,
247         /*cylinders*/ {0, 0, 0},
248         /*heads*/ 0,
249         /*start_write_precomp*/ {0, 0, 0},
250         /*start_reduced_current*/ {0, 0, 0},
251         /*step_rate*/ {0, 0},
252         /*landing_zone_cylinder*/ {0, 0, 0},
253         /*rpl*/ 0,
254         /*rotational_offset*/ 0,
255         /*reserved1*/ 0,
256         /*rotation_rate*/ {0, 0},
257         /*reserved2*/ {0, 0}
258 };
259
260 static struct scsi_caching_page caching_page_default = {
261         /*page_code*/SMS_CACHING_PAGE,
262         /*page_length*/sizeof(struct scsi_caching_page) - 2,
263         /*flags1*/ SCP_DISC | SCP_WCE,
264         /*ret_priority*/ 0,
265         /*disable_pf_transfer_len*/ {0xff, 0xff},
266         /*min_prefetch*/ {0, 0},
267         /*max_prefetch*/ {0xff, 0xff},
268         /*max_pf_ceiling*/ {0xff, 0xff},
269         /*flags2*/ 0,
270         /*cache_segments*/ 0,
271         /*cache_seg_size*/ {0, 0},
272         /*reserved*/ 0,
273         /*non_cache_seg_size*/ {0, 0, 0}
274 };
275
276 static struct scsi_caching_page caching_page_changeable = {
277         /*page_code*/SMS_CACHING_PAGE,
278         /*page_length*/sizeof(struct scsi_caching_page) - 2,
279         /*flags1*/ 0,
280         /*ret_priority*/ 0,
281         /*disable_pf_transfer_len*/ {0, 0},
282         /*min_prefetch*/ {0, 0},
283         /*max_prefetch*/ {0, 0},
284         /*max_pf_ceiling*/ {0, 0},
285         /*flags2*/ 0,
286         /*cache_segments*/ 0,
287         /*cache_seg_size*/ {0, 0},
288         /*reserved*/ 0,
289         /*non_cache_seg_size*/ {0, 0, 0}
290 };
291
292 static struct scsi_control_page control_page_default = {
293         /*page_code*/SMS_CONTROL_MODE_PAGE,
294         /*page_length*/sizeof(struct scsi_control_page) - 2,
295         /*rlec*/0,
296         /*queue_flags*/0,
297         /*eca_and_aen*/0,
298         /*reserved*/0,
299         /*aen_holdoff_period*/{0, 0}
300 };
301
302 static struct scsi_control_page control_page_changeable = {
303         /*page_code*/SMS_CONTROL_MODE_PAGE,
304         /*page_length*/sizeof(struct scsi_control_page) - 2,
305         /*rlec*/SCP_DSENSE,
306         /*queue_flags*/0,
307         /*eca_and_aen*/0,
308         /*reserved*/0,
309         /*aen_holdoff_period*/{0, 0}
310 };
311
312
313 /*
314  * XXX KDM move these into the softc.
315  */
316 static int rcv_sync_msg;
317 static int persis_offset;
318 static uint8_t ctl_pause_rtr;
319 static int     ctl_is_single = 1;
320 static int     index_to_aps_page;
321
322 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer");
323
324 /*
325  * Serial number (0x80), device id (0x83), and supported pages (0x00)
326  */
327 #define SCSI_EVPD_NUM_SUPPORTED_PAGES   3
328
329 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event,
330                                   int param);
331 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest);
332 static int ctl_init(void);
333 void ctl_shutdown(void);
334 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td);
335 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td);
336 static void ctl_ioctl_online(void *arg);
337 static void ctl_ioctl_offline(void *arg);
338 static int ctl_ioctl_targ_enable(void *arg, struct ctl_id targ_id);
339 static int ctl_ioctl_targ_disable(void *arg, struct ctl_id targ_id);
340 static int ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id);
341 static int ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id);
342 static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio);
343 static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock);
344 static int ctl_ioctl_submit_wait(union ctl_io *io);
345 static void ctl_ioctl_datamove(union ctl_io *io);
346 static void ctl_ioctl_done(union ctl_io *io);
347 static void ctl_ioctl_hard_startstop_callback(void *arg,
348                                               struct cfi_metatask *metatask);
349 static void ctl_ioctl_bbrread_callback(void *arg,struct cfi_metatask *metatask);
350 static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
351                               struct ctl_ooa *ooa_hdr,
352                               struct ctl_ooa_entry *kern_entries);
353 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
354                      struct thread *td);
355 uint32_t ctl_get_resindex(struct ctl_nexus *nexus);
356 uint32_t ctl_port_idx(int port_num);
357 #ifdef unused
358 static union ctl_io *ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port,
359                                    uint32_t targ_target, uint32_t targ_lun,
360                                    int can_wait);
361 static void ctl_kfree_io(union ctl_io *io);
362 #endif /* unused */
363 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
364                          struct ctl_be_lun *be_lun, struct ctl_id target_id);
365 static int ctl_free_lun(struct ctl_lun *lun);
366 static void ctl_create_lun(struct ctl_be_lun *be_lun);
367 /**
368 static void ctl_failover_change_pages(struct ctl_softc *softc,
369                                       struct ctl_scsiio *ctsio, int master);
370 **/
371
372 static int ctl_do_mode_select(union ctl_io *io);
373 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun,
374                            uint64_t res_key, uint64_t sa_res_key,
375                            uint8_t type, uint32_t residx,
376                            struct ctl_scsiio *ctsio,
377                            struct scsi_per_res_out *cdb,
378                            struct scsi_per_res_out_parms* param);
379 static void ctl_pro_preempt_other(struct ctl_lun *lun,
380                                   union ctl_ha_msg *msg);
381 static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg);
382 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len);
383 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len);
384 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len);
385 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio);
386 static int ctl_inquiry_std(struct ctl_scsiio *ctsio);
387 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len);
388 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2);
389 static ctl_action ctl_check_for_blockage(union ctl_io *pending_io,
390                                          union ctl_io *ooa_io);
391 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
392                                 union ctl_io *starting_io);
393 static int ctl_check_blocked(struct ctl_lun *lun);
394 static int ctl_scsiio_lun_check(struct ctl_softc *ctl_softc,
395                                 struct ctl_lun *lun,
396                                 struct ctl_cmd_entry *entry,
397                                 struct ctl_scsiio *ctsio);
398 //static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc);
399 static void ctl_failover(void);
400 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc,
401                                struct ctl_scsiio *ctsio);
402 static int ctl_scsiio(struct ctl_scsiio *ctsio);
403
404 static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io);
405 static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
406                             ctl_ua_type ua_type);
407 static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io,
408                          ctl_ua_type ua_type);
409 static int ctl_abort_task(union ctl_io *io);
410 static void ctl_run_task_queue(struct ctl_softc *ctl_softc);
411 #ifdef CTL_IO_DELAY
412 static void ctl_datamove_timer_wakeup(void *arg);
413 static void ctl_done_timer_wakeup(void *arg);
414 #endif /* CTL_IO_DELAY */
415
416 static void ctl_send_datamove_done(union ctl_io *io, int have_lock);
417 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq);
418 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io);
419 static void ctl_datamove_remote_write(union ctl_io *io);
420 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io);
421 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq);
422 static int ctl_datamove_remote_sgl_setup(union ctl_io *io);
423 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
424                                     ctl_ha_dt_cb callback);
425 static void ctl_datamove_remote_read(union ctl_io *io);
426 static void ctl_datamove_remote(union ctl_io *io);
427 static int ctl_process_done(union ctl_io *io, int have_lock);
428 static void ctl_work_thread(void *arg);
429
430 /*
431  * Load the serialization table.  This isn't very pretty, but is probably
432  * the easiest way to do it.
433  */
434 #include "ctl_ser_table.c"
435
436 /*
437  * We only need to define open, close and ioctl routines for this driver.
438  */
439 static struct cdevsw ctl_cdevsw = {
440         .d_version =    D_VERSION,
441         .d_flags =      0,
442         .d_open =       ctl_open,
443         .d_close =      ctl_close,
444         .d_ioctl =      ctl_ioctl,
445         .d_name =       "ctl",
446 };
447
448
449 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL");
450
451 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *);
452
453 static moduledata_t ctl_moduledata = {
454         "ctl",
455         ctl_module_event_handler,
456         NULL
457 };
458
459 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD);
460 MODULE_VERSION(ctl, 1);
461
462 static void
463 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc,
464                             union ctl_ha_msg *msg_info)
465 {
466         struct ctl_scsiio *ctsio;
467
468         if (msg_info->hdr.original_sc == NULL) {
469                 printf("%s: original_sc == NULL!\n", __func__);
470                 /* XXX KDM now what? */
471                 return;
472         }
473
474         ctsio = &msg_info->hdr.original_sc->scsiio;
475         ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
476         ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
477         ctsio->io_hdr.status = msg_info->hdr.status;
478         ctsio->scsi_status = msg_info->scsi.scsi_status;
479         ctsio->sense_len = msg_info->scsi.sense_len;
480         ctsio->sense_residual = msg_info->scsi.sense_residual;
481         ctsio->residual = msg_info->scsi.residual;
482         memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data,
483                sizeof(ctsio->sense_data));
484         memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
485                &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen));
486         STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, &ctsio->io_hdr, links);
487         ctl_wakeup_thread();
488 }
489
490 static void
491 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc,
492                                 union ctl_ha_msg *msg_info)
493 {
494         struct ctl_scsiio *ctsio;
495
496         if (msg_info->hdr.serializing_sc == NULL) {
497                 printf("%s: serializing_sc == NULL!\n", __func__);
498                 /* XXX KDM now what? */
499                 return;
500         }
501
502         ctsio = &msg_info->hdr.serializing_sc->scsiio;
503 #if 0
504         /*
505          * Attempt to catch the situation where an I/O has
506          * been freed, and we're using it again.
507          */
508         if (ctsio->io_hdr.io_type == 0xff) {
509                 union ctl_io *tmp_io;
510                 tmp_io = (union ctl_io *)ctsio;
511                 printf("%s: %p use after free!\n", __func__,
512                        ctsio);
513                 printf("%s: type %d msg %d cdb %x iptl: "
514                        "%d:%d:%d:%d tag 0x%04x "
515                        "flag %#x status %x\n",
516                         __func__,
517                         tmp_io->io_hdr.io_type,
518                         tmp_io->io_hdr.msg_type,
519                         tmp_io->scsiio.cdb[0],
520                         tmp_io->io_hdr.nexus.initid.id,
521                         tmp_io->io_hdr.nexus.targ_port,
522                         tmp_io->io_hdr.nexus.targ_target.id,
523                         tmp_io->io_hdr.nexus.targ_lun,
524                         (tmp_io->io_hdr.io_type ==
525                         CTL_IO_TASK) ?
526                         tmp_io->taskio.tag_num :
527                         tmp_io->scsiio.tag_num,
528                         tmp_io->io_hdr.flags,
529                         tmp_io->io_hdr.status);
530         }
531 #endif
532         ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
533         STAILQ_INSERT_TAIL(&ctl_softc->isc_queue, &ctsio->io_hdr, links);
534         ctl_wakeup_thread();
535 }
536
537 /*
538  * ISC (Inter Shelf Communication) event handler.  Events from the HA
539  * subsystem come in here.
540  */
541 static void
542 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
543 {
544         struct ctl_softc *ctl_softc;
545         union ctl_io *io;
546         struct ctl_prio *presio;
547         ctl_ha_status isc_status;
548
549         ctl_softc = control_softc;
550         io = NULL;
551
552
553 #if 0
554         printf("CTL: Isc Msg event %d\n", event);
555 #endif
556         if (event == CTL_HA_EVT_MSG_RECV) {
557                 union ctl_ha_msg msg_info;
558
559                 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info,
560                                              sizeof(msg_info), /*wait*/ 0);
561 #if 0
562                 printf("CTL: msg_type %d\n", msg_info.msg_type);
563 #endif
564                 if (isc_status != 0) {
565                         printf("Error receiving message, status = %d\n",
566                                isc_status);
567                         return;
568                 }
569                 mtx_lock(&ctl_softc->ctl_lock);
570
571                 switch (msg_info.hdr.msg_type) {
572                 case CTL_MSG_SERIALIZE:
573 #if 0
574                         printf("Serialize\n");
575 #endif
576                         io = ctl_alloc_io((void *)ctl_softc->othersc_pool);
577                         if (io == NULL) {
578                                 printf("ctl_isc_event_handler: can't allocate "
579                                        "ctl_io!\n");
580                                 /* Bad Juju */
581                                 /* Need to set busy and send msg back */
582                                 mtx_unlock(&ctl_softc->ctl_lock);
583                                 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
584                                 msg_info.hdr.status = CTL_SCSI_ERROR;
585                                 msg_info.scsi.scsi_status = SCSI_STATUS_BUSY;
586                                 msg_info.scsi.sense_len = 0;
587                                 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
588                                     sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){
589                                 }
590                                 goto bailout;
591                         }
592                         ctl_zero_io(io);
593                         // populate ctsio from msg_info
594                         io->io_hdr.io_type = CTL_IO_SCSI;
595                         io->io_hdr.msg_type = CTL_MSG_SERIALIZE;
596                         io->io_hdr.original_sc = msg_info.hdr.original_sc;
597 #if 0
598                         printf("pOrig %x\n", (int)msg_info.original_sc);
599 #endif
600                         io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC |
601                                             CTL_FLAG_IO_ACTIVE;
602                         /*
603                          * If we're in serialization-only mode, we don't
604                          * want to go through full done processing.  Thus
605                          * the COPY flag.
606                          *
607                          * XXX KDM add another flag that is more specific.
608                          */
609                         if (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)
610                                 io->io_hdr.flags |= CTL_FLAG_INT_COPY;
611                         io->io_hdr.nexus = msg_info.hdr.nexus;
612 #if 0
613                         printf("targ %d, port %d, iid %d, lun %d\n",
614                                io->io_hdr.nexus.targ_target.id,
615                                io->io_hdr.nexus.targ_port,
616                                io->io_hdr.nexus.initid.id,
617                                io->io_hdr.nexus.targ_lun);
618 #endif
619                         io->scsiio.tag_num = msg_info.scsi.tag_num;
620                         io->scsiio.tag_type = msg_info.scsi.tag_type;
621                         memcpy(io->scsiio.cdb, msg_info.scsi.cdb,
622                                CTL_MAX_CDBLEN);
623                         if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
624                                 struct ctl_cmd_entry *entry;
625                                 uint8_t opcode;
626
627                                 opcode = io->scsiio.cdb[0];
628                                 entry = &ctl_cmd_table[opcode];
629                                 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
630                                 io->io_hdr.flags |=
631                                         entry->flags & CTL_FLAG_DATA_MASK;
632                         }
633                         STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
634                                            &io->io_hdr, links);
635                         ctl_wakeup_thread();
636                         break;
637
638                 /* Performed on the Originating SC, XFER mode only */
639                 case CTL_MSG_DATAMOVE: {
640                         struct ctl_sg_entry *sgl;
641                         int i, j;
642
643                         io = msg_info.hdr.original_sc;
644                         if (io == NULL) {
645                                 printf("%s: original_sc == NULL!\n", __func__);
646                                 /* XXX KDM do something here */
647                                 break;
648                         }
649                         io->io_hdr.msg_type = CTL_MSG_DATAMOVE;
650                         io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
651                         /*
652                          * Keep track of this, we need to send it back over
653                          * when the datamove is complete.
654                          */
655                         io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
656
657                         if (msg_info.dt.sg_sequence == 0) {
658                                 /*
659                                  * XXX KDM we use the preallocated S/G list
660                                  * here, but we'll need to change this to
661                                  * dynamic allocation if we need larger S/G
662                                  * lists.
663                                  */
664                                 if (msg_info.dt.kern_sg_entries >
665                                     sizeof(io->io_hdr.remote_sglist) /
666                                     sizeof(io->io_hdr.remote_sglist[0])) {
667                                         printf("%s: number of S/G entries "
668                                             "needed %u > allocated num %zd\n",
669                                             __func__,
670                                             msg_info.dt.kern_sg_entries,
671                                             sizeof(io->io_hdr.remote_sglist)/
672                                             sizeof(io->io_hdr.remote_sglist[0]));
673                                 
674                                         /*
675                                          * XXX KDM send a message back to
676                                          * the other side to shut down the
677                                          * DMA.  The error will come back
678                                          * through via the normal channel.
679                                          */
680                                         break;
681                                 }
682                                 sgl = io->io_hdr.remote_sglist;
683                                 memset(sgl, 0,
684                                        sizeof(io->io_hdr.remote_sglist));
685
686                                 io->scsiio.kern_data_ptr = (uint8_t *)sgl;
687
688                                 io->scsiio.kern_sg_entries =
689                                         msg_info.dt.kern_sg_entries;
690                                 io->scsiio.rem_sg_entries =
691                                         msg_info.dt.kern_sg_entries;
692                                 io->scsiio.kern_data_len =
693                                         msg_info.dt.kern_data_len;
694                                 io->scsiio.kern_total_len =
695                                         msg_info.dt.kern_total_len;
696                                 io->scsiio.kern_data_resid =
697                                         msg_info.dt.kern_data_resid;
698                                 io->scsiio.kern_rel_offset =
699                                         msg_info.dt.kern_rel_offset;
700                                 /*
701                                  * Clear out per-DMA flags.
702                                  */
703                                 io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK;
704                                 /*
705                                  * Add per-DMA flags that are set for this
706                                  * particular DMA request.
707                                  */
708                                 io->io_hdr.flags |= msg_info.dt.flags &
709                                                     CTL_FLAG_RDMA_MASK;
710                         } else
711                                 sgl = (struct ctl_sg_entry *)
712                                         io->scsiio.kern_data_ptr;
713
714                         for (i = msg_info.dt.sent_sg_entries, j = 0;
715                              i < (msg_info.dt.sent_sg_entries +
716                              msg_info.dt.cur_sg_entries); i++, j++) {
717                                 sgl[i].addr = msg_info.dt.sg_list[j].addr;
718                                 sgl[i].len = msg_info.dt.sg_list[j].len;
719
720 #if 0
721                                 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n",
722                                        __func__,
723                                        msg_info.dt.sg_list[j].addr,
724                                        msg_info.dt.sg_list[j].len,
725                                        sgl[i].addr, sgl[i].len, j, i);
726 #endif
727                         }
728 #if 0
729                         memcpy(&sgl[msg_info.dt.sent_sg_entries],
730                                msg_info.dt.sg_list,
731                                sizeof(*sgl) * msg_info.dt.cur_sg_entries);
732 #endif
733
734                         /*
735                          * If this is the last piece of the I/O, we've got
736                          * the full S/G list.  Queue processing in the thread.
737                          * Otherwise wait for the next piece.
738                          */
739                         if (msg_info.dt.sg_last != 0) {
740                                 STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
741                                                    &io->io_hdr, links);
742                                 ctl_wakeup_thread();
743                         }
744                         break;
745                 }
746                 /* Performed on the Serializing (primary) SC, XFER mode only */
747                 case CTL_MSG_DATAMOVE_DONE: {
748                         if (msg_info.hdr.serializing_sc == NULL) {
749                                 printf("%s: serializing_sc == NULL!\n",
750                                        __func__);
751                                 /* XXX KDM now what? */
752                                 break;
753                         }
754                         /*
755                          * We grab the sense information here in case
756                          * there was a failure, so we can return status
757                          * back to the initiator.
758                          */
759                         io = msg_info.hdr.serializing_sc;
760                         io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
761                         io->io_hdr.status = msg_info.hdr.status;
762                         io->scsiio.scsi_status = msg_info.scsi.scsi_status;
763                         io->scsiio.sense_len = msg_info.scsi.sense_len;
764                         io->scsiio.sense_residual =msg_info.scsi.sense_residual;
765                         io->io_hdr.port_status = msg_info.scsi.fetd_status;
766                         io->scsiio.residual = msg_info.scsi.residual;
767                         memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data,
768                                sizeof(io->scsiio.sense_data));
769
770                         STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
771                                            &io->io_hdr, links);
772                         ctl_wakeup_thread();
773                         break;
774                 }
775
776                 /* Preformed on Originating SC, SER_ONLY mode */
777                 case CTL_MSG_R2R:
778                         io = msg_info.hdr.original_sc;
779                         if (io == NULL) {
780                                 printf("%s: Major Bummer\n", __func__);
781                                 mtx_unlock(&ctl_softc->ctl_lock);
782                                 return;
783                         } else {
784 #if 0
785                                 printf("pOrig %x\n",(int) ctsio);
786 #endif
787                         }
788                         io->io_hdr.msg_type = CTL_MSG_R2R;
789                         io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
790                         STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
791                                            &io->io_hdr, links);
792                         ctl_wakeup_thread();
793                         break;
794
795                 /*
796                  * Performed on Serializing(i.e. primary SC) SC in SER_ONLY
797                  * mode.
798                  * Performed on the Originating (i.e. secondary) SC in XFER
799                  * mode
800                  */
801                 case CTL_MSG_FINISH_IO:
802                         if (ctl_softc->ha_mode == CTL_HA_MODE_XFER)
803                                 ctl_isc_handler_finish_xfer(ctl_softc,
804                                                             &msg_info);
805                         else
806                                 ctl_isc_handler_finish_ser_only(ctl_softc,
807                                                                 &msg_info);
808                         break;
809
810                 /* Preformed on Originating SC */
811                 case CTL_MSG_BAD_JUJU:
812                         io = msg_info.hdr.original_sc;
813                         if (io == NULL) {
814                                 printf("%s: Bad JUJU!, original_sc is NULL!\n",
815                                        __func__);
816                                 break;
817                         }
818                         ctl_copy_sense_data(&msg_info, io);
819                         /*
820                          * IO should have already been cleaned up on other
821                          * SC so clear this flag so we won't send a message
822                          * back to finish the IO there.
823                          */
824                         io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
825                         io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
826
827                         /* io = msg_info.hdr.serializing_sc; */
828                         io->io_hdr.msg_type = CTL_MSG_BAD_JUJU;
829                         STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
830                                            &io->io_hdr, links);
831                         ctl_wakeup_thread();
832                         break;
833
834                 /* Handle resets sent from the other side */
835                 case CTL_MSG_MANAGE_TASKS: {
836                         struct ctl_taskio *taskio;
837                         taskio = (struct ctl_taskio *)ctl_alloc_io(
838                                 (void *)ctl_softc->othersc_pool);
839                         if (taskio == NULL) {
840                                 printf("ctl_isc_event_handler: can't allocate "
841                                        "ctl_io!\n");
842                                 /* Bad Juju */
843                                 /* should I just call the proper reset func
844                                    here??? */
845                                 mtx_unlock(&ctl_softc->ctl_lock);
846                                 goto bailout;
847                         }
848                         ctl_zero_io((union ctl_io *)taskio);
849                         taskio->io_hdr.io_type = CTL_IO_TASK;
850                         taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC;
851                         taskio->io_hdr.nexus = msg_info.hdr.nexus;
852                         taskio->task_action = msg_info.task.task_action;
853                         taskio->tag_num = msg_info.task.tag_num;
854                         taskio->tag_type = msg_info.task.tag_type;
855 #ifdef CTL_TIME_IO
856                         taskio->io_hdr.start_time = time_uptime;
857                         getbintime(&taskio->io_hdr.start_bt);
858 #if 0
859                         cs_prof_gettime(&taskio->io_hdr.start_ticks);
860 #endif
861 #endif /* CTL_TIME_IO */
862                         STAILQ_INSERT_TAIL(&ctl_softc->task_queue,
863                                            &taskio->io_hdr, links);
864                         ctl_softc->flags |= CTL_FLAG_TASK_PENDING;
865                         ctl_wakeup_thread();
866                         break;
867                 }
868                 /* Persistent Reserve action which needs attention */
869                 case CTL_MSG_PERS_ACTION:
870                         presio = (struct ctl_prio *)ctl_alloc_io(
871                                 (void *)ctl_softc->othersc_pool);
872                         if (presio == NULL) {
873                                 printf("ctl_isc_event_handler: can't allocate "
874                                        "ctl_io!\n");
875                                 /* Bad Juju */
876                                 /* Need to set busy and send msg back */
877                                 mtx_unlock(&ctl_softc->ctl_lock);
878                                 goto bailout;
879                         }
880                         ctl_zero_io((union ctl_io *)presio);
881                         presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION;
882                         presio->pr_msg = msg_info.pr;
883                         STAILQ_INSERT_TAIL(&ctl_softc->isc_queue,
884                                            &presio->io_hdr, links);
885                         ctl_wakeup_thread();
886                         break;
887                 case CTL_MSG_SYNC_FE:
888                         rcv_sync_msg = 1;
889                         break;
890                 case CTL_MSG_APS_LOCK: {
891                         // It's quicker to execute this then to
892                         // queue it.
893                         struct ctl_lun *lun;
894                         struct ctl_page_index *page_index;
895                         struct copan_aps_subpage *current_sp;
896                         uint32_t targ_lun;
897
898                         targ_lun = msg_info.hdr.nexus.targ_lun;
899                         if (msg_info.hdr.nexus.lun_map_fn != NULL)
900                                 targ_lun = msg_info.hdr.nexus.lun_map_fn(msg_info.hdr.nexus.lun_map_arg, targ_lun);
901
902                         lun = ctl_softc->ctl_luns[targ_lun];
903                         page_index = &lun->mode_pages.index[index_to_aps_page];
904                         current_sp = (struct copan_aps_subpage *)
905                                      (page_index->page_data +
906                                      (page_index->page_len * CTL_PAGE_CURRENT));
907
908                         current_sp->lock_active = msg_info.aps.lock_flag;
909                         break;
910                 }
911                 default:
912                         printf("How did I get here?\n");
913                 }
914                 mtx_unlock(&ctl_softc->ctl_lock);
915         } else if (event == CTL_HA_EVT_MSG_SENT) {
916                 if (param != CTL_HA_STATUS_SUCCESS) {
917                         printf("Bad status from ctl_ha_msg_send status %d\n",
918                                param);
919                 }
920                 return;
921         } else if (event == CTL_HA_EVT_DISCONNECT) {
922                 printf("CTL: Got a disconnect from Isc\n");
923                 return;
924         } else {
925                 printf("ctl_isc_event_handler: Unknown event %d\n", event);
926                 return;
927         }
928
929 bailout:
930         return;
931 }
932
933 static void
934 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest)
935 {
936         struct scsi_sense_data *sense;
937
938         sense = &dest->scsiio.sense_data;
939         bcopy(&src->scsi.sense_data, sense, sizeof(*sense));
940         dest->scsiio.scsi_status = src->scsi.scsi_status;
941         dest->scsiio.sense_len = src->scsi.sense_len;
942         dest->io_hdr.status = src->hdr.status;
943 }
944
945 static int
946 ctl_init(void)
947 {
948         struct ctl_softc *softc;
949         struct ctl_io_pool *internal_pool, *emergency_pool, *other_pool;
950         struct ctl_frontend *fe;
951         struct ctl_lun *lun;
952         uint8_t sc_id =0;
953 #if 0
954         int i;
955 #endif
956         int error, retval;
957         //int isc_retval;
958
959         retval = 0;
960         ctl_pause_rtr = 0;
961         rcv_sync_msg = 0;
962
963         control_softc = malloc(sizeof(*control_softc), M_DEVBUF,
964                                M_WAITOK | M_ZERO);
965         softc = control_softc;
966
967         softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600,
968                               "cam/ctl");
969
970         softc->dev->si_drv1 = softc;
971
972         /*
973          * By default, return a "bad LUN" peripheral qualifier for unknown
974          * LUNs.  The user can override this default using the tunable or
975          * sysctl.  See the comment in ctl_inquiry_std() for more details.
976          */
977         softc->inquiry_pq_no_lun = 1;
978         TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun",
979                           &softc->inquiry_pq_no_lun);
980         sysctl_ctx_init(&softc->sysctl_ctx);
981         softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
982                 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl",
983                 CTLFLAG_RD, 0, "CAM Target Layer");
984
985         if (softc->sysctl_tree == NULL) {
986                 printf("%s: unable to allocate sysctl tree\n", __func__);
987                 destroy_dev(softc->dev);
988                 free(control_softc, M_DEVBUF);
989                 control_softc = NULL;
990                 return (ENOMEM);
991         }
992
993         SYSCTL_ADD_INT(&softc->sysctl_ctx,
994                        SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
995                        "inquiry_pq_no_lun", CTLFLAG_RW,
996                        &softc->inquiry_pq_no_lun, 0,
997                        "Report no lun possible for invalid LUNs");
998
999         mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF);
1000         mtx_init(&softc->pool_lock, "CTL pool mutex", NULL, MTX_DEF);
1001         softc->open_count = 0;
1002
1003         /*
1004          * Default to actually sending a SYNCHRONIZE CACHE command down to
1005          * the drive.
1006          */
1007         softc->flags = CTL_FLAG_REAL_SYNC;
1008
1009         /*
1010          * In Copan's HA scheme, the "master" and "slave" roles are
1011          * figured out through the slot the controller is in.  Although it
1012          * is an active/active system, someone has to be in charge.
1013          */
1014 #ifdef NEEDTOPORT
1015         scmicro_rw(SCMICRO_GET_SHELF_ID, &sc_id);
1016 #endif
1017
1018         if (sc_id == 0) {
1019                 softc->flags |= CTL_FLAG_MASTER_SHELF;
1020                 persis_offset = 0;
1021         } else
1022                 persis_offset = CTL_MAX_INITIATORS;
1023
1024         /*
1025          * XXX KDM need to figure out where we want to get our target ID
1026          * and WWID.  Is it different on each port?
1027          */
1028         softc->target.id = 0;
1029         softc->target.wwid[0] = 0x12345678;
1030         softc->target.wwid[1] = 0x87654321;
1031         STAILQ_INIT(&softc->lun_list);
1032         STAILQ_INIT(&softc->pending_lun_queue);
1033         STAILQ_INIT(&softc->task_queue);
1034         STAILQ_INIT(&softc->incoming_queue);
1035         STAILQ_INIT(&softc->rtr_queue);
1036         STAILQ_INIT(&softc->done_queue);
1037         STAILQ_INIT(&softc->isc_queue);
1038         STAILQ_INIT(&softc->fe_list);
1039         STAILQ_INIT(&softc->be_list);
1040         STAILQ_INIT(&softc->io_pools);
1041
1042         lun = &softc->lun;
1043
1044         /*
1045          * We don't bother calling these with ctl_lock held here, because,
1046          * in theory, no one else can try to do anything while we're in our
1047          * module init routine.
1048          */
1049         if (ctl_pool_create(softc, CTL_POOL_INTERNAL, CTL_POOL_ENTRIES_INTERNAL,
1050                             &internal_pool)!= 0){
1051                 printf("ctl: can't allocate %d entry internal pool, "
1052                        "exiting\n", CTL_POOL_ENTRIES_INTERNAL);
1053                 return (ENOMEM);
1054         }
1055
1056         if (ctl_pool_create(softc, CTL_POOL_EMERGENCY,
1057                             CTL_POOL_ENTRIES_EMERGENCY, &emergency_pool) != 0) {
1058                 printf("ctl: can't allocate %d entry emergency pool, "
1059                        "exiting\n", CTL_POOL_ENTRIES_EMERGENCY);
1060                 ctl_pool_free(internal_pool);
1061                 return (ENOMEM);
1062         }
1063
1064         if (ctl_pool_create(softc, CTL_POOL_4OTHERSC, CTL_POOL_ENTRIES_OTHER_SC,
1065                             &other_pool) != 0)
1066         {
1067                 printf("ctl: can't allocate %d entry other SC pool, "
1068                        "exiting\n", CTL_POOL_ENTRIES_OTHER_SC);
1069                 ctl_pool_free(internal_pool);
1070                 ctl_pool_free(emergency_pool);
1071                 return (ENOMEM);
1072         }
1073
1074         softc->internal_pool = internal_pool;
1075         softc->emergency_pool = emergency_pool;
1076         softc->othersc_pool = other_pool;
1077
1078         /*
1079          * We used to allocate a processor LUN here.  The new scheme is to
1080          * just let the user allocate LUNs as he sees fit.
1081          */
1082 #if 0
1083         mtx_lock(&softc->ctl_lock);
1084         ctl_alloc_lun(softc, lun, /*be_lun*/NULL, /*target*/softc->target);
1085         mtx_unlock(&softc->ctl_lock);
1086 #endif
1087
1088         error = kproc_create(ctl_work_thread, softc, &softc->work_thread, 0, 0,
1089                          "ctl_thrd");
1090         if (error != 0) {
1091                 printf("error creating CTL work thread!\n");
1092                 mtx_lock(&softc->ctl_lock);
1093                 ctl_free_lun(lun);
1094                 mtx_unlock(&softc->ctl_lock);
1095                 ctl_pool_free(internal_pool);
1096                 ctl_pool_free(emergency_pool);
1097                 ctl_pool_free(other_pool);
1098                 return (error);
1099         }
1100         if (bootverbose)
1101                 printf("ctl: CAM Target Layer loaded\n");
1102
1103         /*
1104          * Initialize the initiator and portname mappings
1105          */
1106         memset(softc->wwpn_iid, 0, sizeof(softc->wwpn_iid));
1107
1108         /*
1109          * Initialize the ioctl front end.
1110          */
1111         fe = &softc->ioctl_info.fe;
1112         sprintf(softc->ioctl_info.port_name, "CTL ioctl");
1113         fe->port_type = CTL_PORT_IOCTL;
1114         fe->num_requested_ctl_io = 100;
1115         fe->port_name = softc->ioctl_info.port_name;
1116         fe->port_online = ctl_ioctl_online;
1117         fe->port_offline = ctl_ioctl_offline;
1118         fe->onoff_arg = &softc->ioctl_info;
1119         fe->targ_enable = ctl_ioctl_targ_enable;
1120         fe->targ_disable = ctl_ioctl_targ_disable;
1121         fe->lun_enable = ctl_ioctl_lun_enable;
1122         fe->lun_disable = ctl_ioctl_lun_disable;
1123         fe->targ_lun_arg = &softc->ioctl_info;
1124         fe->fe_datamove = ctl_ioctl_datamove;
1125         fe->fe_done = ctl_ioctl_done;
1126         fe->max_targets = 15;
1127         fe->max_target_id = 15;
1128
1129         if (ctl_frontend_register(&softc->ioctl_info.fe,
1130                           (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0) {
1131                 printf("ctl: ioctl front end registration failed, will "
1132                        "continue anyway\n");
1133         }
1134
1135 #ifdef CTL_IO_DELAY
1136         if (sizeof(struct callout) > CTL_TIMER_BYTES) {
1137                 printf("sizeof(struct callout) %zd > CTL_TIMER_BYTES %zd\n",
1138                        sizeof(struct callout), CTL_TIMER_BYTES);
1139                 return (EINVAL);
1140         }
1141 #endif /* CTL_IO_DELAY */
1142
1143         return (0);
1144 }
1145
1146 void
1147 ctl_shutdown(void)
1148 {
1149         struct ctl_softc *softc;
1150         struct ctl_lun *lun, *next_lun;
1151         struct ctl_io_pool *pool;
1152
1153         softc = (struct ctl_softc *)control_softc;
1154
1155         if (ctl_frontend_deregister(&softc->ioctl_info.fe) != 0)
1156                 printf("ctl: ioctl front end deregistration failed\n");
1157
1158         mtx_lock(&softc->ctl_lock);
1159
1160         /*
1161          * Free up each LUN.
1162          */
1163         for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){
1164                 next_lun = STAILQ_NEXT(lun, links);
1165                 ctl_free_lun(lun);
1166         }
1167
1168         mtx_unlock(&softc->ctl_lock);
1169
1170         /*
1171          * This will rip the rug out from under any FETDs or anyone else
1172          * that has a pool allocated.  Since we increment our module
1173          * refcount any time someone outside the main CTL module allocates
1174          * a pool, we shouldn't have any problems here.  The user won't be
1175          * able to unload the CTL module until client modules have
1176          * successfully unloaded.
1177          */
1178         while ((pool = STAILQ_FIRST(&softc->io_pools)) != NULL)
1179                 ctl_pool_free(pool);
1180
1181 #if 0
1182         ctl_shutdown_thread(softc->work_thread);
1183 #endif
1184
1185         mtx_destroy(&softc->pool_lock);
1186         mtx_destroy(&softc->ctl_lock);
1187
1188         destroy_dev(softc->dev);
1189
1190         sysctl_ctx_free(&softc->sysctl_ctx);
1191
1192         free(control_softc, M_DEVBUF);
1193         control_softc = NULL;
1194
1195         if (bootverbose)
1196                 printf("ctl: CAM Target Layer unloaded\n");
1197 }
1198
1199 static int
1200 ctl_module_event_handler(module_t mod, int what, void *arg)
1201 {
1202
1203         switch (what) {
1204         case MOD_LOAD:
1205                 return (ctl_init());
1206         case MOD_UNLOAD:
1207                 return (EBUSY);
1208         default:
1209                 return (EOPNOTSUPP);
1210         }
1211 }
1212
1213 /*
1214  * XXX KDM should we do some access checks here?  Bump a reference count to
1215  * prevent a CTL module from being unloaded while someone has it open?
1216  */
1217 static int
1218 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td)
1219 {
1220         return (0);
1221 }
1222
1223 static int
1224 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td)
1225 {
1226         return (0);
1227 }
1228
1229 int
1230 ctl_port_enable(ctl_port_type port_type)
1231 {
1232         struct ctl_softc *softc;
1233         struct ctl_frontend *fe;
1234
1235         if (ctl_is_single == 0) {
1236                 union ctl_ha_msg msg_info;
1237                 int isc_retval;
1238
1239 #if 0
1240                 printf("%s: HA mode, synchronizing frontend enable\n",
1241                         __func__);
1242 #endif
1243                 msg_info.hdr.msg_type = CTL_MSG_SYNC_FE;
1244                 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1245                         sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) {
1246                         printf("Sync msg send error retval %d\n", isc_retval);
1247                 }
1248                 if (!rcv_sync_msg) {
1249                         isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info,
1250                                 sizeof(msg_info), 1);
1251                 }
1252 #if 0
1253                 printf("CTL:Frontend Enable\n");
1254         } else {
1255                 printf("%s: single mode, skipping frontend synchronization\n",
1256                         __func__);
1257 #endif
1258         }
1259
1260         softc = control_softc;
1261
1262         STAILQ_FOREACH(fe, &softc->fe_list, links) {
1263                 if (port_type & fe->port_type)
1264                 {
1265 #if 0
1266                         printf("port %d\n", fe->targ_port);
1267 #endif
1268                         ctl_frontend_online(fe);
1269                 }
1270         }
1271
1272         return (0);
1273 }
1274
1275 int
1276 ctl_port_disable(ctl_port_type port_type)
1277 {
1278         struct ctl_softc *softc;
1279         struct ctl_frontend *fe;
1280
1281         softc = control_softc;
1282
1283         STAILQ_FOREACH(fe, &softc->fe_list, links) {
1284                 if (port_type & fe->port_type)
1285                         ctl_frontend_offline(fe);
1286         }
1287
1288         return (0);
1289 }
1290
1291 /*
1292  * Returns 0 for success, 1 for failure.
1293  * Currently the only failure mode is if there aren't enough entries
1294  * allocated.  So, in case of a failure, look at num_entries_dropped,
1295  * reallocate and try again.
1296  */
1297 int
1298 ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced,
1299               int *num_entries_filled, int *num_entries_dropped,
1300               ctl_port_type port_type, int no_virtual)
1301 {
1302         struct ctl_softc *softc;
1303         struct ctl_frontend *fe;
1304         int entries_dropped, entries_filled;
1305         int retval;
1306         int i;
1307
1308         softc = control_softc;
1309
1310         retval = 0;
1311         entries_filled = 0;
1312         entries_dropped = 0;
1313
1314         i = 0;
1315         mtx_lock(&softc->ctl_lock);
1316         STAILQ_FOREACH(fe, &softc->fe_list, links) {
1317                 struct ctl_port_entry *entry;
1318
1319                 if ((fe->port_type & port_type) == 0)
1320                         continue;
1321
1322                 if ((no_virtual != 0)
1323                  && (fe->virtual_port != 0))
1324                         continue;
1325
1326                 if (entries_filled >= num_entries_alloced) {
1327                         entries_dropped++;
1328                         continue;
1329                 }
1330                 entry = &entries[i];
1331
1332                 entry->port_type = fe->port_type;
1333                 strlcpy(entry->port_name, fe->port_name,
1334                         sizeof(entry->port_name));
1335                 entry->physical_port = fe->physical_port;
1336                 entry->virtual_port = fe->virtual_port;
1337                 entry->wwnn = fe->wwnn;
1338                 entry->wwpn = fe->wwpn;
1339
1340                 i++;
1341                 entries_filled++;
1342         }
1343
1344         mtx_unlock(&softc->ctl_lock);
1345
1346         if (entries_dropped > 0)
1347                 retval = 1;
1348
1349         *num_entries_dropped = entries_dropped;
1350         *num_entries_filled = entries_filled;
1351
1352         return (retval);
1353 }
1354
1355 static void
1356 ctl_ioctl_online(void *arg)
1357 {
1358         struct ctl_ioctl_info *ioctl_info;
1359
1360         ioctl_info = (struct ctl_ioctl_info *)arg;
1361
1362         ioctl_info->flags |= CTL_IOCTL_FLAG_ENABLED;
1363 }
1364
1365 static void
1366 ctl_ioctl_offline(void *arg)
1367 {
1368         struct ctl_ioctl_info *ioctl_info;
1369
1370         ioctl_info = (struct ctl_ioctl_info *)arg;
1371
1372         ioctl_info->flags &= ~CTL_IOCTL_FLAG_ENABLED;
1373 }
1374
1375 /*
1376  * Remove an initiator by port number and initiator ID.
1377  * Returns 0 for success, 1 for failure.
1378  */
1379 int
1380 ctl_remove_initiator(int32_t targ_port, uint32_t iid)
1381 {
1382         struct ctl_softc *softc;
1383
1384         softc = control_softc;
1385
1386         mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
1387
1388         if ((targ_port < 0)
1389          || (targ_port > CTL_MAX_PORTS)) {
1390                 printf("%s: invalid port number %d\n", __func__, targ_port);
1391                 return (1);
1392         }
1393         if (iid > CTL_MAX_INIT_PER_PORT) {
1394                 printf("%s: initiator ID %u > maximun %u!\n",
1395                        __func__, iid, CTL_MAX_INIT_PER_PORT);
1396                 return (1);
1397         }
1398
1399         mtx_lock(&softc->ctl_lock);
1400
1401         softc->wwpn_iid[targ_port][iid].in_use = 0;
1402
1403         mtx_unlock(&softc->ctl_lock);
1404
1405         return (0);
1406 }
1407
1408 /*
1409  * Add an initiator to the initiator map.
1410  * Returns 0 for success, 1 for failure.
1411  */
1412 int
1413 ctl_add_initiator(uint64_t wwpn, int32_t targ_port, uint32_t iid)
1414 {
1415         struct ctl_softc *softc;
1416         int retval;
1417
1418         softc = control_softc;
1419
1420         mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
1421
1422         retval = 0;
1423
1424         if ((targ_port < 0)
1425          || (targ_port > CTL_MAX_PORTS)) {
1426                 printf("%s: invalid port number %d\n", __func__, targ_port);
1427                 return (1);
1428         }
1429         if (iid > CTL_MAX_INIT_PER_PORT) {
1430                 printf("%s: WWPN %#jx initiator ID %u > maximun %u!\n",
1431                        __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT);
1432                 return (1);
1433         }
1434
1435         mtx_lock(&softc->ctl_lock);
1436
1437         if (softc->wwpn_iid[targ_port][iid].in_use != 0) {
1438                 /*
1439                  * We don't treat this as an error.
1440                  */
1441                 if (softc->wwpn_iid[targ_port][iid].wwpn == wwpn) {
1442                         printf("%s: port %d iid %u WWPN %#jx arrived again?\n",
1443                                __func__, targ_port, iid, (uintmax_t)wwpn);
1444                         goto bailout;
1445                 }
1446
1447                 /*
1448                  * This is an error, but what do we do about it?  The
1449                  * driver is telling us we have a new WWPN for this
1450                  * initiator ID, so we pretty much need to use it.
1451                  */
1452                 printf("%s: port %d iid %u WWPN %#jx arrived, WWPN %#jx is "
1453                        "still at that address\n", __func__, targ_port, iid,
1454                        (uintmax_t)wwpn,
1455                        (uintmax_t)softc->wwpn_iid[targ_port][iid].wwpn);
1456
1457                 /*
1458                  * XXX KDM clear have_ca and ua_pending on each LUN for
1459                  * this initiator.
1460                  */
1461         }
1462         softc->wwpn_iid[targ_port][iid].in_use = 1;
1463         softc->wwpn_iid[targ_port][iid].iid = iid;
1464         softc->wwpn_iid[targ_port][iid].wwpn = wwpn;
1465         softc->wwpn_iid[targ_port][iid].port = targ_port;
1466
1467 bailout:
1468
1469         mtx_unlock(&softc->ctl_lock);
1470
1471         return (retval);
1472 }
1473
1474 /*
1475  * XXX KDM should we pretend to do something in the target/lun
1476  * enable/disable functions?
1477  */
1478 static int
1479 ctl_ioctl_targ_enable(void *arg, struct ctl_id targ_id)
1480 {
1481         return (0);
1482 }
1483
1484 static int
1485 ctl_ioctl_targ_disable(void *arg, struct ctl_id targ_id)
1486 {
1487         return (0);
1488 }
1489
1490 static int
1491 ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id)
1492 {
1493         return (0);
1494 }
1495
1496 static int
1497 ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id)
1498 {
1499         return (0);
1500 }
1501
1502 /*
1503  * Data movement routine for the CTL ioctl frontend port.
1504  */
1505 static int
1506 ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
1507 {
1508         struct ctl_sg_entry *ext_sglist, *kern_sglist;
1509         struct ctl_sg_entry ext_entry, kern_entry;
1510         int ext_sglen, ext_sg_entries, kern_sg_entries;
1511         int ext_sg_start, ext_offset;
1512         int len_to_copy, len_copied;
1513         int kern_watermark, ext_watermark;
1514         int ext_sglist_malloced;
1515         int i, j;
1516
1517         ext_sglist_malloced = 0;
1518         ext_sg_start = 0;
1519         ext_offset = 0;
1520
1521         CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n"));
1522
1523         /*
1524          * If this flag is set, fake the data transfer.
1525          */
1526         if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) {
1527                 ctsio->ext_data_filled = ctsio->ext_data_len;
1528                 goto bailout;
1529         }
1530
1531         /*
1532          * To simplify things here, if we have a single buffer, stick it in
1533          * a S/G entry and just make it a single entry S/G list.
1534          */
1535         if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) {
1536                 int len_seen;
1537
1538                 ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
1539
1540                 ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL,
1541                                                            M_WAITOK);
1542                 ext_sglist_malloced = 1;
1543                 if (copyin(ctsio->ext_data_ptr, ext_sglist,
1544                                    ext_sglen) != 0) {
1545                         ctl_set_internal_failure(ctsio,
1546                                                  /*sks_valid*/ 0,
1547                                                  /*retry_count*/ 0);
1548                         goto bailout;
1549                 }
1550                 ext_sg_entries = ctsio->ext_sg_entries;
1551                 len_seen = 0;
1552                 for (i = 0; i < ext_sg_entries; i++) {
1553                         if ((len_seen + ext_sglist[i].len) >=
1554                              ctsio->ext_data_filled) {
1555                                 ext_sg_start = i;
1556                                 ext_offset = ctsio->ext_data_filled - len_seen;
1557                                 break;
1558                         }
1559                         len_seen += ext_sglist[i].len;
1560                 }
1561         } else {
1562                 ext_sglist = &ext_entry;
1563                 ext_sglist->addr = ctsio->ext_data_ptr;
1564                 ext_sglist->len = ctsio->ext_data_len;
1565                 ext_sg_entries = 1;
1566                 ext_sg_start = 0;
1567                 ext_offset = ctsio->ext_data_filled;
1568         }
1569
1570         if (ctsio->kern_sg_entries > 0) {
1571                 kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
1572                 kern_sg_entries = ctsio->kern_sg_entries;
1573         } else {
1574                 kern_sglist = &kern_entry;
1575                 kern_sglist->addr = ctsio->kern_data_ptr;
1576                 kern_sglist->len = ctsio->kern_data_len;
1577                 kern_sg_entries = 1;
1578         }
1579
1580
1581         kern_watermark = 0;
1582         ext_watermark = ext_offset;
1583         len_copied = 0;
1584         for (i = ext_sg_start, j = 0;
1585              i < ext_sg_entries && j < kern_sg_entries;) {
1586                 uint8_t *ext_ptr, *kern_ptr;
1587
1588                 len_to_copy = ctl_min(ext_sglist[i].len - ext_watermark,
1589                                       kern_sglist[j].len - kern_watermark);
1590
1591                 ext_ptr = (uint8_t *)ext_sglist[i].addr;
1592                 ext_ptr = ext_ptr + ext_watermark;
1593                 if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
1594                         /*
1595                          * XXX KDM fix this!
1596                          */
1597                         panic("need to implement bus address support");
1598 #if 0
1599                         kern_ptr = bus_to_virt(kern_sglist[j].addr);
1600 #endif
1601                 } else
1602                         kern_ptr = (uint8_t *)kern_sglist[j].addr;
1603                 kern_ptr = kern_ptr + kern_watermark;
1604
1605                 kern_watermark += len_to_copy;
1606                 ext_watermark += len_to_copy;
1607
1608                 if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
1609                      CTL_FLAG_DATA_IN) {
1610                         CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
1611                                          "bytes to user\n", len_to_copy));
1612                         CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
1613                                          "to %p\n", kern_ptr, ext_ptr));
1614                         if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) {
1615                                 ctl_set_internal_failure(ctsio,
1616                                                          /*sks_valid*/ 0,
1617                                                          /*retry_count*/ 0);
1618                                 goto bailout;
1619                         }
1620                 } else {
1621                         CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
1622                                          "bytes from user\n", len_to_copy));
1623                         CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
1624                                          "to %p\n", ext_ptr, kern_ptr));
1625                         if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){
1626                                 ctl_set_internal_failure(ctsio,
1627                                                          /*sks_valid*/ 0,
1628                                                          /*retry_count*/0);
1629                                 goto bailout;
1630                         }
1631                 }
1632
1633                 len_copied += len_to_copy;
1634
1635                 if (ext_sglist[i].len == ext_watermark) {
1636                         i++;
1637                         ext_watermark = 0;
1638                 }
1639
1640                 if (kern_sglist[j].len == kern_watermark) {
1641                         j++;
1642                         kern_watermark = 0;
1643                 }
1644         }
1645
1646         ctsio->ext_data_filled += len_copied;
1647
1648         CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, "
1649                          "kern_sg_entries: %d\n", ext_sg_entries,
1650                          kern_sg_entries));
1651         CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, "
1652                          "kern_data_len = %d\n", ctsio->ext_data_len,
1653                          ctsio->kern_data_len));
1654
1655
1656         /* XXX KDM set residual?? */
1657 bailout:
1658
1659         if (ext_sglist_malloced != 0)
1660                 free(ext_sglist, M_CTL);
1661
1662         return (CTL_RETVAL_COMPLETE);
1663 }
1664
1665 /*
1666  * Serialize a command that went down the "wrong" side, and so was sent to
1667  * this controller for execution.  The logic is a little different than the
1668  * standard case in ctl_scsiio_precheck().  Errors in this case need to get
1669  * sent back to the other side, but in the success case, we execute the
1670  * command on this side (XFER mode) or tell the other side to execute it
1671  * (SER_ONLY mode).
1672  */
1673 static int
1674 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio, int have_lock)
1675 {
1676         struct ctl_softc *ctl_softc;
1677         union ctl_ha_msg msg_info;
1678         struct ctl_lun *lun;
1679         int retval = 0;
1680         uint32_t targ_lun;
1681
1682         ctl_softc = control_softc;
1683         if (have_lock == 0)
1684                 mtx_lock(&ctl_softc->ctl_lock);
1685
1686         targ_lun = ctsio->io_hdr.nexus.targ_lun;
1687         if (ctsio->io_hdr.nexus.lun_map_fn != NULL)
1688                 targ_lun = ctsio->io_hdr.nexus.lun_map_fn(ctsio->io_hdr.nexus.lun_map_arg, targ_lun);
1689         lun = ctl_softc->ctl_luns[targ_lun];
1690         if (lun==NULL)
1691         {
1692                 /*
1693                  * Why isn't LUN defined? The other side wouldn't
1694                  * send a cmd if the LUN is undefined.
1695                  */
1696                 printf("%s: Bad JUJU!, LUN is NULL!\n", __func__);
1697
1698                 /* "Logical unit not supported" */
1699                 ctl_set_sense_data(&msg_info.scsi.sense_data,
1700                                    lun,
1701                                    /*sense_format*/SSD_TYPE_NONE,
1702                                    /*current_error*/ 1,
1703                                    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1704                                    /*asc*/ 0x25,
1705                                    /*ascq*/ 0x00,
1706                                    SSD_ELEM_NONE);
1707
1708                 msg_info.scsi.sense_len = SSD_FULL_SIZE;
1709                 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
1710                 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
1711                 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1712                 msg_info.hdr.serializing_sc = NULL;
1713                 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1714                 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1715                                 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1716                 }
1717                 if (have_lock == 0)
1718                         mtx_unlock(&ctl_softc->ctl_lock);
1719                 return(1);
1720
1721         }
1722
1723         TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1724
1725         switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
1726                 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq,
1727                  ooa_links))) {
1728         case CTL_ACTION_BLOCK:
1729                 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
1730                 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
1731                                   blocked_links);
1732                 break;
1733         case CTL_ACTION_PASS:
1734         case CTL_ACTION_SKIP:
1735                 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
1736                         ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
1737                         STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue,
1738                                            &ctsio->io_hdr, links);
1739                 } else {
1740
1741                         /* send msg back to other side */
1742                         msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1743                         msg_info.hdr.serializing_sc = (union ctl_io *)ctsio;
1744                         msg_info.hdr.msg_type = CTL_MSG_R2R;
1745 #if 0
1746                         printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc);
1747 #endif
1748                         if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1749                             sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1750                         }
1751                 }
1752                 break;
1753         case CTL_ACTION_OVERLAP:
1754                 /* OVERLAPPED COMMANDS ATTEMPTED */
1755                 ctl_set_sense_data(&msg_info.scsi.sense_data,
1756                                    lun,
1757                                    /*sense_format*/SSD_TYPE_NONE,
1758                                    /*current_error*/ 1,
1759                                    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1760                                    /*asc*/ 0x4E,
1761                                    /*ascq*/ 0x00,
1762                                    SSD_ELEM_NONE);
1763
1764                 msg_info.scsi.sense_len = SSD_FULL_SIZE;
1765                 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
1766                 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
1767                 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1768                 msg_info.hdr.serializing_sc = NULL;
1769                 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1770 #if 0
1771                 printf("BAD JUJU:Major Bummer Overlap\n");
1772 #endif
1773                 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1774                 retval = 1;
1775                 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1776                     sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1777                 }
1778                 break;
1779         case CTL_ACTION_OVERLAP_TAG:
1780                 /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */
1781                 ctl_set_sense_data(&msg_info.scsi.sense_data,
1782                                    lun,
1783                                    /*sense_format*/SSD_TYPE_NONE,
1784                                    /*current_error*/ 1,
1785                                    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1786                                    /*asc*/ 0x4D,
1787                                    /*ascq*/ ctsio->tag_num & 0xff,
1788                                    SSD_ELEM_NONE);
1789
1790                 msg_info.scsi.sense_len = SSD_FULL_SIZE;
1791                 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
1792                 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
1793                 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1794                 msg_info.hdr.serializing_sc = NULL;
1795                 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1796 #if 0
1797                 printf("BAD JUJU:Major Bummer Overlap Tag\n");
1798 #endif
1799                 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1800                 retval = 1;
1801                 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1802                     sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1803                 }
1804                 break;
1805         case CTL_ACTION_ERROR:
1806         default:
1807                 /* "Internal target failure" */
1808                 ctl_set_sense_data(&msg_info.scsi.sense_data,
1809                                    lun,
1810                                    /*sense_format*/SSD_TYPE_NONE,
1811                                    /*current_error*/ 1,
1812                                    /*sense_key*/ SSD_KEY_HARDWARE_ERROR,
1813                                    /*asc*/ 0x44,
1814                                    /*ascq*/ 0x00,
1815                                    SSD_ELEM_NONE);
1816
1817                 msg_info.scsi.sense_len = SSD_FULL_SIZE;
1818                 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
1819                 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
1820                 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1821                 msg_info.hdr.serializing_sc = NULL;
1822                 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1823 #if 0
1824                 printf("BAD JUJU:Major Bummer HW Error\n");
1825 #endif
1826                 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1827                 retval = 1;
1828                 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1829                     sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1830                 }
1831                 break;
1832         }
1833         if (have_lock == 0)
1834                 mtx_unlock(&ctl_softc->ctl_lock);
1835         return (retval);
1836 }
1837
1838 static int
1839 ctl_ioctl_submit_wait(union ctl_io *io)
1840 {
1841         struct ctl_fe_ioctl_params params;
1842         ctl_fe_ioctl_state last_state;
1843         int done, retval;
1844
1845         retval = 0;
1846
1847         bzero(&params, sizeof(params));
1848
1849         mtx_init(&params.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF);
1850         cv_init(&params.sem, "ctlioccv");
1851         params.state = CTL_IOCTL_INPROG;
1852         last_state = params.state;
1853
1854         io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = &params;
1855
1856         CTL_DEBUG_PRINT(("ctl_ioctl_submit_wait\n"));
1857
1858         /* This shouldn't happen */
1859         if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE)
1860                 return (retval);
1861
1862         done = 0;
1863
1864         do {
1865                 mtx_lock(&params.ioctl_mtx);
1866                 /*
1867                  * Check the state here, and don't sleep if the state has
1868                  * already changed (i.e. wakeup has already occured, but we
1869                  * weren't waiting yet).
1870                  */
1871                 if (params.state == last_state) {
1872                         /* XXX KDM cv_wait_sig instead? */
1873                         cv_wait(&params.sem, &params.ioctl_mtx);
1874                 }
1875                 last_state = params.state;
1876
1877                 switch (params.state) {
1878                 case CTL_IOCTL_INPROG:
1879                         /* Why did we wake up? */
1880                         /* XXX KDM error here? */
1881                         mtx_unlock(&params.ioctl_mtx);
1882                         break;
1883                 case CTL_IOCTL_DATAMOVE:
1884                         CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n"));
1885
1886                         /*
1887                          * change last_state back to INPROG to avoid
1888                          * deadlock on subsequent data moves.
1889                          */
1890                         params.state = last_state = CTL_IOCTL_INPROG;
1891
1892                         mtx_unlock(&params.ioctl_mtx);
1893                         ctl_ioctl_do_datamove(&io->scsiio);
1894                         /*
1895                          * Note that in some cases, most notably writes,
1896                          * this will queue the I/O and call us back later.
1897                          * In other cases, generally reads, this routine
1898                          * will immediately call back and wake us up,
1899                          * probably using our own context.
1900                          */
1901                         io->scsiio.be_move_done(io);
1902                         break;
1903                 case CTL_IOCTL_DONE:
1904                         mtx_unlock(&params.ioctl_mtx);
1905                         CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n"));
1906                         done = 1;
1907                         break;
1908                 default:
1909                         mtx_unlock(&params.ioctl_mtx);
1910                         /* XXX KDM error here? */
1911                         break;
1912                 }
1913         } while (done == 0);
1914
1915         mtx_destroy(&params.ioctl_mtx);
1916         cv_destroy(&params.sem);
1917
1918         return (CTL_RETVAL_COMPLETE);
1919 }
1920
1921 static void
1922 ctl_ioctl_datamove(union ctl_io *io)
1923 {
1924         struct ctl_fe_ioctl_params *params;
1925
1926         params = (struct ctl_fe_ioctl_params *)
1927                 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1928
1929         mtx_lock(&params->ioctl_mtx);
1930         params->state = CTL_IOCTL_DATAMOVE;
1931         cv_broadcast(&params->sem);
1932         mtx_unlock(&params->ioctl_mtx);
1933 }
1934
1935 static void
1936 ctl_ioctl_done(union ctl_io *io)
1937 {
1938         struct ctl_fe_ioctl_params *params;
1939
1940         params = (struct ctl_fe_ioctl_params *)
1941                 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1942
1943         mtx_lock(&params->ioctl_mtx);
1944         params->state = CTL_IOCTL_DONE;
1945         cv_broadcast(&params->sem);
1946         mtx_unlock(&params->ioctl_mtx);
1947 }
1948
1949 static void
1950 ctl_ioctl_hard_startstop_callback(void *arg, struct cfi_metatask *metatask)
1951 {
1952         struct ctl_fe_ioctl_startstop_info *sd_info;
1953
1954         sd_info = (struct ctl_fe_ioctl_startstop_info *)arg;
1955
1956         sd_info->hs_info.status = metatask->status;
1957         sd_info->hs_info.total_luns = metatask->taskinfo.startstop.total_luns;
1958         sd_info->hs_info.luns_complete =
1959                 metatask->taskinfo.startstop.luns_complete;
1960         sd_info->hs_info.luns_failed = metatask->taskinfo.startstop.luns_failed;
1961
1962         cv_broadcast(&sd_info->sem);
1963 }
1964
1965 static void
1966 ctl_ioctl_bbrread_callback(void *arg, struct cfi_metatask *metatask)
1967 {
1968         struct ctl_fe_ioctl_bbrread_info *fe_bbr_info;
1969
1970         fe_bbr_info = (struct ctl_fe_ioctl_bbrread_info *)arg;
1971
1972         mtx_lock(fe_bbr_info->lock);
1973         fe_bbr_info->bbr_info->status = metatask->status;
1974         fe_bbr_info->bbr_info->bbr_status = metatask->taskinfo.bbrread.status;
1975         fe_bbr_info->wakeup_done = 1;
1976         mtx_unlock(fe_bbr_info->lock);
1977
1978         cv_broadcast(&fe_bbr_info->sem);
1979 }
1980
1981 /*
1982  * Returns 0 for success, errno for failure.
1983  */
1984 static int
1985 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
1986                    struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries)
1987 {
1988         union ctl_io *io;
1989         int retval;
1990
1991         retval = 0;
1992
1993         mtx_assert(&control_softc->ctl_lock, MA_OWNED);
1994
1995         for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL);
1996              (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
1997              ooa_links)) {
1998                 struct ctl_ooa_entry *entry;
1999
2000                 /*
2001                  * If we've got more than we can fit, just count the
2002                  * remaining entries.
2003                  */
2004                 if (*cur_fill_num >= ooa_hdr->alloc_num)
2005                         continue;
2006
2007                 entry = &kern_entries[*cur_fill_num];
2008
2009                 entry->tag_num = io->scsiio.tag_num;
2010                 entry->lun_num = lun->lun;
2011 #ifdef CTL_TIME_IO
2012                 entry->start_bt = io->io_hdr.start_bt;
2013 #endif
2014                 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len);
2015                 entry->cdb_len = io->scsiio.cdb_len;
2016                 if (io->io_hdr.flags & CTL_FLAG_BLOCKED)
2017                         entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED;
2018
2019                 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG)
2020                         entry->cmd_flags |= CTL_OOACMD_FLAG_DMA;
2021
2022                 if (io->io_hdr.flags & CTL_FLAG_ABORT)
2023                         entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT;
2024
2025                 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR)
2026                         entry->cmd_flags |= CTL_OOACMD_FLAG_RTR;
2027
2028                 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED)
2029                         entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED;
2030         }
2031
2032         return (retval);
2033 }
2034
2035 static void *
2036 ctl_copyin_alloc(void *user_addr, int len, char *error_str,
2037                  size_t error_str_len)
2038 {
2039         void *kptr;
2040
2041         kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO);
2042
2043         if (copyin(user_addr, kptr, len) != 0) {
2044                 snprintf(error_str, error_str_len, "Error copying %d bytes "
2045                          "from user address %p to kernel address %p", len,
2046                          user_addr, kptr);
2047                 free(kptr, M_CTL);
2048                 return (NULL);
2049         }
2050
2051         return (kptr);
2052 }
2053
2054 static void
2055 ctl_free_args(int num_be_args, struct ctl_be_arg *be_args)
2056 {
2057         int i;
2058
2059         if (be_args == NULL)
2060                 return;
2061
2062         for (i = 0; i < num_be_args; i++) {
2063                 free(be_args[i].kname, M_CTL);
2064                 free(be_args[i].kvalue, M_CTL);
2065         }
2066
2067         free(be_args, M_CTL);
2068 }
2069
2070 static struct ctl_be_arg *
2071 ctl_copyin_args(int num_be_args, struct ctl_be_arg *be_args,
2072                 char *error_str, size_t error_str_len)
2073 {
2074         struct ctl_be_arg *args;
2075         int i;
2076
2077         args = ctl_copyin_alloc(be_args, num_be_args * sizeof(*be_args),
2078                                 error_str, error_str_len);
2079
2080         if (args == NULL)
2081                 goto bailout;
2082
2083         for (i = 0; i < num_be_args; i++) {
2084                 args[i].kname = NULL;
2085                 args[i].kvalue = NULL;
2086         }
2087
2088         for (i = 0; i < num_be_args; i++) {
2089                 uint8_t *tmpptr;
2090
2091                 args[i].kname = ctl_copyin_alloc(args[i].name,
2092                         args[i].namelen, error_str, error_str_len);
2093                 if (args[i].kname == NULL)
2094                         goto bailout;
2095
2096                 if (args[i].kname[args[i].namelen - 1] != '\0') {
2097                         snprintf(error_str, error_str_len, "Argument %d "
2098                                  "name is not NUL-terminated", i);
2099                         goto bailout;
2100                 }
2101
2102                 args[i].kvalue = NULL;
2103
2104                 tmpptr = ctl_copyin_alloc(args[i].value,
2105                         args[i].vallen, error_str, error_str_len);
2106                 if (tmpptr == NULL)
2107                         goto bailout;
2108
2109                 args[i].kvalue = tmpptr;
2110
2111                 if ((args[i].flags & CTL_BEARG_ASCII)
2112                  && (tmpptr[args[i].vallen - 1] != '\0')) {
2113                         snprintf(error_str, error_str_len, "Argument %d "
2114                                  "value is not NUL-terminated", i);
2115                         goto bailout;
2116                 }
2117         }
2118
2119         return (args);
2120 bailout:
2121
2122         ctl_free_args(num_be_args, args);
2123
2124         return (NULL);
2125 }
2126
2127 /*
2128  * Escape characters that are illegal or not recommended in XML.
2129  */
2130 int
2131 ctl_sbuf_printf_esc(struct sbuf *sb, char *str)
2132 {
2133         int retval;
2134
2135         retval = 0;
2136
2137         for (; *str; str++) {
2138                 switch (*str) {
2139                 case '&':
2140                         retval = sbuf_printf(sb, "&amp;");
2141                         break;
2142                 case '>':
2143                         retval = sbuf_printf(sb, "&gt;");
2144                         break;
2145                 case '<':
2146                         retval = sbuf_printf(sb, "&lt;");
2147                         break;
2148                 default:
2149                         retval = sbuf_putc(sb, *str);
2150                         break;
2151                 }
2152
2153                 if (retval != 0)
2154                         break;
2155
2156         }
2157
2158         return (retval);
2159 }
2160
2161 static int
2162 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
2163           struct thread *td)
2164 {
2165         struct ctl_softc *softc;
2166         int retval;
2167
2168         softc = control_softc;
2169
2170         retval = 0;
2171
2172         switch (cmd) {
2173         case CTL_IO: {
2174                 union ctl_io *io;
2175                 void *pool_tmp;
2176
2177                 /*
2178                  * If we haven't been "enabled", don't allow any SCSI I/O
2179                  * to this FETD.
2180                  */
2181                 if ((softc->ioctl_info.flags & CTL_IOCTL_FLAG_ENABLED) == 0) {
2182                         retval = -EPERM;
2183                         break;
2184                 }
2185
2186                 io = ctl_alloc_io(softc->ioctl_info.fe.ctl_pool_ref);
2187                 if (io == NULL) {
2188                         printf("ctl_ioctl: can't allocate ctl_io!\n");
2189                         retval = -ENOSPC;
2190                         break;
2191                 }
2192
2193                 /*
2194                  * Need to save the pool reference so it doesn't get
2195                  * spammed by the user's ctl_io.
2196                  */
2197                 pool_tmp = io->io_hdr.pool;
2198
2199                 memcpy(io, (void *)addr, sizeof(*io));
2200
2201                 io->io_hdr.pool = pool_tmp;
2202                 /*
2203                  * No status yet, so make sure the status is set properly.
2204                  */
2205                 io->io_hdr.status = CTL_STATUS_NONE;
2206
2207                 /*
2208                  * The user sets the initiator ID, target and LUN IDs.
2209                  */
2210                 io->io_hdr.nexus.targ_port = softc->ioctl_info.fe.targ_port;
2211                 io->io_hdr.flags |= CTL_FLAG_USER_REQ;
2212                 if ((io->io_hdr.io_type == CTL_IO_SCSI)
2213                  && (io->scsiio.tag_type != CTL_TAG_UNTAGGED))
2214                         io->scsiio.tag_num = softc->ioctl_info.cur_tag_num++;
2215
2216                 retval = ctl_ioctl_submit_wait(io);
2217
2218                 if (retval != 0) {
2219                         ctl_free_io(io);
2220                         break;
2221                 }
2222
2223                 memcpy((void *)addr, io, sizeof(*io));
2224
2225                 /* return this to our pool */
2226                 ctl_free_io(io);
2227
2228                 break;
2229         }
2230         case CTL_ENABLE_PORT:
2231         case CTL_DISABLE_PORT:
2232         case CTL_SET_PORT_WWNS: {
2233                 struct ctl_frontend *fe;
2234                 struct ctl_port_entry *entry;
2235
2236                 entry = (struct ctl_port_entry *)addr;
2237                 
2238                 mtx_lock(&softc->ctl_lock);
2239                 STAILQ_FOREACH(fe, &softc->fe_list, links) {
2240                         int action, done;
2241
2242                         action = 0;
2243                         done = 0;
2244
2245                         if ((entry->port_type == CTL_PORT_NONE)
2246                          && (entry->targ_port == fe->targ_port)) {
2247                                 /*
2248                                  * If the user only wants to enable or
2249                                  * disable or set WWNs on a specific port,
2250                                  * do the operation and we're done.
2251                                  */
2252                                 action = 1;
2253                                 done = 1;
2254                         } else if (entry->port_type & fe->port_type) {
2255                                 /*
2256                                  * Compare the user's type mask with the
2257                                  * particular frontend type to see if we
2258                                  * have a match.
2259                                  */
2260                                 action = 1;
2261                                 done = 0;
2262
2263                                 /*
2264                                  * Make sure the user isn't trying to set
2265                                  * WWNs on multiple ports at the same time.
2266                                  */
2267                                 if (cmd == CTL_SET_PORT_WWNS) {
2268                                         printf("%s: Can't set WWNs on "
2269                                                "multiple ports\n", __func__);
2270                                         retval = EINVAL;
2271                                         break;
2272                                 }
2273                         }
2274                         if (action != 0) {
2275                                 /*
2276                                  * XXX KDM we have to drop the lock here,
2277                                  * because the online/offline operations
2278                                  * can potentially block.  We need to
2279                                  * reference count the frontends so they
2280                                  * can't go away,
2281                                  */
2282                                 mtx_unlock(&softc->ctl_lock);
2283
2284                                 if (cmd == CTL_ENABLE_PORT) {
2285                                         struct ctl_lun *lun;
2286
2287                                         STAILQ_FOREACH(lun, &softc->lun_list,
2288                                                        links) {
2289                                                 fe->lun_enable(fe->targ_lun_arg,
2290                                                     lun->target,
2291                                                     lun->lun);
2292                                         }
2293
2294                                         ctl_frontend_online(fe);
2295                                 } else if (cmd == CTL_DISABLE_PORT) {
2296                                         struct ctl_lun *lun;
2297
2298                                         ctl_frontend_offline(fe);
2299
2300                                         STAILQ_FOREACH(lun, &softc->lun_list,
2301                                                        links) {
2302                                                 fe->lun_disable(
2303                                                     fe->targ_lun_arg,
2304                                                     lun->target,
2305                                                     lun->lun);
2306                                         }
2307                                 }
2308
2309                                 mtx_lock(&softc->ctl_lock);
2310
2311                                 if (cmd == CTL_SET_PORT_WWNS)
2312                                         ctl_frontend_set_wwns(fe,
2313                                             (entry->flags & CTL_PORT_WWNN_VALID) ?
2314                                             1 : 0, entry->wwnn,
2315                                             (entry->flags & CTL_PORT_WWPN_VALID) ?
2316                                             1 : 0, entry->wwpn);
2317                         }
2318                         if (done != 0)
2319                                 break;
2320                 }
2321                 mtx_unlock(&softc->ctl_lock);
2322                 break;
2323         }
2324         case CTL_GET_PORT_LIST: {
2325                 struct ctl_frontend *fe;
2326                 struct ctl_port_list *list;
2327                 int i;
2328
2329                 list = (struct ctl_port_list *)addr;
2330
2331                 if (list->alloc_len != (list->alloc_num *
2332                     sizeof(struct ctl_port_entry))) {
2333                         printf("%s: CTL_GET_PORT_LIST: alloc_len %u != "
2334                                "alloc_num %u * sizeof(struct ctl_port_entry) "
2335                                "%zu\n", __func__, list->alloc_len,
2336                                list->alloc_num, sizeof(struct ctl_port_entry));
2337                         retval = EINVAL;
2338                         break;
2339                 }
2340                 list->fill_len = 0;
2341                 list->fill_num = 0;
2342                 list->dropped_num = 0;
2343                 i = 0;
2344                 mtx_lock(&softc->ctl_lock);
2345                 STAILQ_FOREACH(fe, &softc->fe_list, links) {
2346                         struct ctl_port_entry entry, *list_entry;
2347
2348                         if (list->fill_num >= list->alloc_num) {
2349                                 list->dropped_num++;
2350                                 continue;
2351                         }
2352
2353                         entry.port_type = fe->port_type;
2354                         strlcpy(entry.port_name, fe->port_name,
2355                                 sizeof(entry.port_name));
2356                         entry.targ_port = fe->targ_port;
2357                         entry.physical_port = fe->physical_port;
2358                         entry.virtual_port = fe->virtual_port;
2359                         entry.wwnn = fe->wwnn;
2360                         entry.wwpn = fe->wwpn;
2361                         if (fe->status & CTL_PORT_STATUS_ONLINE)
2362                                 entry.online = 1;
2363                         else
2364                                 entry.online = 0;
2365
2366                         list_entry = &list->entries[i];
2367
2368                         retval = copyout(&entry, list_entry, sizeof(entry));
2369                         if (retval != 0) {
2370                                 printf("%s: CTL_GET_PORT_LIST: copyout "
2371                                        "returned %d\n", __func__, retval);
2372                                 break;
2373                         }
2374                         i++;
2375                         list->fill_num++;
2376                         list->fill_len += sizeof(entry);
2377                 }
2378                 mtx_unlock(&softc->ctl_lock);
2379
2380                 /*
2381                  * If this is non-zero, we had a copyout fault, so there's
2382                  * probably no point in attempting to set the status inside
2383                  * the structure.
2384                  */
2385                 if (retval != 0)
2386                         break;
2387
2388                 if (list->dropped_num > 0)
2389                         list->status = CTL_PORT_LIST_NEED_MORE_SPACE;
2390                 else
2391                         list->status = CTL_PORT_LIST_OK;
2392                 break;
2393         }
2394         case CTL_DUMP_OOA: {
2395                 struct ctl_lun *lun;
2396                 union ctl_io *io;
2397                 char printbuf[128];
2398                 struct sbuf sb;
2399
2400                 mtx_lock(&softc->ctl_lock);
2401                 printf("Dumping OOA queues:\n");
2402                 STAILQ_FOREACH(lun, &softc->lun_list, links) {
2403                         for (io = (union ctl_io *)TAILQ_FIRST(
2404                              &lun->ooa_queue); io != NULL;
2405                              io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
2406                              ooa_links)) {
2407                                 sbuf_new(&sb, printbuf, sizeof(printbuf),
2408                                          SBUF_FIXEDLEN);
2409                                 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ",
2410                                             (intmax_t)lun->lun,
2411                                             io->scsiio.tag_num,
2412                                             (io->io_hdr.flags &
2413                                             CTL_FLAG_BLOCKED) ? "" : " BLOCKED",
2414                                             (io->io_hdr.flags &
2415                                             CTL_FLAG_DMA_INPROG) ? " DMA" : "",
2416                                             (io->io_hdr.flags &
2417                                             CTL_FLAG_ABORT) ? " ABORT" : "",
2418                                             (io->io_hdr.flags &
2419                                         CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : "");
2420                                 ctl_scsi_command_string(&io->scsiio, NULL, &sb);
2421                                 sbuf_finish(&sb);
2422                                 printf("%s\n", sbuf_data(&sb));
2423                         }
2424                 }
2425                 printf("OOA queues dump done\n");
2426                 mtx_unlock(&softc->ctl_lock);
2427                 break;
2428         }
2429         case CTL_GET_OOA: {
2430                 struct ctl_lun *lun;
2431                 struct ctl_ooa *ooa_hdr;
2432                 struct ctl_ooa_entry *entries;
2433                 uint32_t cur_fill_num;
2434
2435                 ooa_hdr = (struct ctl_ooa *)addr;
2436
2437                 if ((ooa_hdr->alloc_len == 0)
2438                  || (ooa_hdr->alloc_num == 0)) {
2439                         printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u "
2440                                "must be non-zero\n", __func__,
2441                                ooa_hdr->alloc_len, ooa_hdr->alloc_num);
2442                         retval = EINVAL;
2443                         break;
2444                 }
2445
2446                 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num *
2447                     sizeof(struct ctl_ooa_entry))) {
2448                         printf("%s: CTL_GET_OOA: alloc len %u must be alloc "
2449                                "num %d * sizeof(struct ctl_ooa_entry) %zd\n",
2450                                __func__, ooa_hdr->alloc_len,
2451                                ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry));
2452                         retval = EINVAL;
2453                         break;
2454                 }
2455
2456                 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO);
2457                 if (entries == NULL) {
2458                         printf("%s: could not allocate %d bytes for OOA "
2459                                "dump\n", __func__, ooa_hdr->alloc_len);
2460                         retval = ENOMEM;
2461                         break;
2462                 }
2463
2464                 mtx_lock(&softc->ctl_lock);
2465                 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0)
2466                  && ((ooa_hdr->lun_num > CTL_MAX_LUNS)
2467                   || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) {
2468                         mtx_unlock(&softc->ctl_lock);
2469                         free(entries, M_CTL);
2470                         printf("%s: CTL_GET_OOA: invalid LUN %ju\n",
2471                                __func__, (uintmax_t)ooa_hdr->lun_num);
2472                         retval = EINVAL;
2473                         break;
2474                 }
2475
2476                 cur_fill_num = 0;
2477
2478                 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) {
2479                         STAILQ_FOREACH(lun, &softc->lun_list, links) {
2480                                 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,
2481                                         ooa_hdr, entries);
2482                                 if (retval != 0)
2483                                         break;
2484                         }
2485                         if (retval != 0) {
2486                                 mtx_unlock(&softc->ctl_lock);
2487                                 free(entries, M_CTL);
2488                                 break;
2489                         }
2490                 } else {
2491                         lun = softc->ctl_luns[ooa_hdr->lun_num];
2492
2493                         retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr,
2494                                                     entries);
2495                 }
2496                 mtx_unlock(&softc->ctl_lock);
2497
2498                 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num);
2499                 ooa_hdr->fill_len = ooa_hdr->fill_num *
2500                         sizeof(struct ctl_ooa_entry);
2501                 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len);
2502                 if (retval != 0) {
2503                         printf("%s: error copying out %d bytes for OOA dump\n", 
2504                                __func__, ooa_hdr->fill_len);
2505                 }
2506
2507                 getbintime(&ooa_hdr->cur_bt);
2508
2509                 if (cur_fill_num > ooa_hdr->alloc_num) {
2510                         ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num;
2511                         ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE;
2512                 } else {
2513                         ooa_hdr->dropped_num = 0;
2514                         ooa_hdr->status = CTL_OOA_OK;
2515                 }
2516
2517                 free(entries, M_CTL);
2518                 break;
2519         }
2520         case CTL_CHECK_OOA: {
2521                 union ctl_io *io;
2522                 struct ctl_lun *lun;
2523                 struct ctl_ooa_info *ooa_info;
2524
2525
2526                 ooa_info = (struct ctl_ooa_info *)addr;
2527
2528                 if (ooa_info->lun_id >= CTL_MAX_LUNS) {
2529                         ooa_info->status = CTL_OOA_INVALID_LUN;
2530                         break;
2531                 }
2532                 mtx_lock(&softc->ctl_lock);
2533                 lun = softc->ctl_luns[ooa_info->lun_id];
2534                 if (lun == NULL) {
2535                         mtx_unlock(&softc->ctl_lock);
2536                         ooa_info->status = CTL_OOA_INVALID_LUN;
2537                         break;
2538                 }
2539
2540                 ooa_info->num_entries = 0;
2541                 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
2542                      io != NULL; io = (union ctl_io *)TAILQ_NEXT(
2543                      &io->io_hdr, ooa_links)) {
2544                         ooa_info->num_entries++;
2545                 }
2546
2547                 mtx_unlock(&softc->ctl_lock);
2548                 ooa_info->status = CTL_OOA_SUCCESS;
2549
2550                 break;
2551         }
2552         case CTL_HARD_START:
2553         case CTL_HARD_STOP: {
2554                 struct ctl_fe_ioctl_startstop_info ss_info;
2555                 struct cfi_metatask *metatask;
2556                 struct mtx hs_mtx;
2557
2558                 mtx_init(&hs_mtx, "HS Mutex", NULL, MTX_DEF);
2559
2560                 cv_init(&ss_info.sem, "hard start/stop cv" );
2561
2562                 metatask = cfi_alloc_metatask(/*can_wait*/ 1);
2563                 if (metatask == NULL) {
2564                         retval = ENOMEM;
2565                         mtx_destroy(&hs_mtx);
2566                         break;
2567                 }
2568
2569                 if (cmd == CTL_HARD_START)
2570                         metatask->tasktype = CFI_TASK_STARTUP;
2571                 else
2572                         metatask->tasktype = CFI_TASK_SHUTDOWN;
2573
2574                 metatask->callback = ctl_ioctl_hard_startstop_callback;
2575                 metatask->callback_arg = &ss_info;
2576
2577                 cfi_action(metatask);
2578
2579                 /* Wait for the callback */
2580                 mtx_lock(&hs_mtx);
2581                 cv_wait_sig(&ss_info.sem, &hs_mtx);
2582                 mtx_unlock(&hs_mtx);
2583
2584                 /*
2585                  * All information has been copied from the metatask by the
2586                  * time cv_broadcast() is called, so we free the metatask here.
2587                  */
2588                 cfi_free_metatask(metatask);
2589
2590                 memcpy((void *)addr, &ss_info.hs_info, sizeof(ss_info.hs_info));
2591
2592                 mtx_destroy(&hs_mtx);
2593                 break;
2594         }
2595         case CTL_BBRREAD: {
2596                 struct ctl_bbrread_info *bbr_info;
2597                 struct ctl_fe_ioctl_bbrread_info fe_bbr_info;
2598                 struct mtx bbr_mtx;
2599                 struct cfi_metatask *metatask;
2600
2601                 bbr_info = (struct ctl_bbrread_info *)addr;
2602
2603                 bzero(&fe_bbr_info, sizeof(fe_bbr_info));
2604
2605                 bzero(&bbr_mtx, sizeof(bbr_mtx));
2606                 mtx_init(&bbr_mtx, "BBR Mutex", NULL, MTX_DEF);
2607
2608                 fe_bbr_info.bbr_info = bbr_info;
2609                 fe_bbr_info.lock = &bbr_mtx;
2610
2611                 cv_init(&fe_bbr_info.sem, "BBR read cv");
2612                 metatask = cfi_alloc_metatask(/*can_wait*/ 1);
2613
2614                 if (metatask == NULL) {
2615                         mtx_destroy(&bbr_mtx);
2616                         cv_destroy(&fe_bbr_info.sem);
2617                         retval = ENOMEM;
2618                         break;
2619                 }
2620                 metatask->tasktype = CFI_TASK_BBRREAD;
2621                 metatask->callback = ctl_ioctl_bbrread_callback;
2622                 metatask->callback_arg = &fe_bbr_info;
2623                 metatask->taskinfo.bbrread.lun_num = bbr_info->lun_num;
2624                 metatask->taskinfo.bbrread.lba = bbr_info->lba;
2625                 metatask->taskinfo.bbrread.len = bbr_info->len;
2626
2627                 cfi_action(metatask);
2628
2629                 mtx_lock(&bbr_mtx);
2630                 while (fe_bbr_info.wakeup_done == 0)
2631                         cv_wait_sig(&fe_bbr_info.sem, &bbr_mtx);
2632                 mtx_unlock(&bbr_mtx);
2633
2634                 bbr_info->status = metatask->status;
2635                 bbr_info->bbr_status = metatask->taskinfo.bbrread.status;
2636                 bbr_info->scsi_status = metatask->taskinfo.bbrread.scsi_status;
2637                 memcpy(&bbr_info->sense_data,
2638                        &metatask->taskinfo.bbrread.sense_data,
2639                        ctl_min(sizeof(bbr_info->sense_data),
2640                                sizeof(metatask->taskinfo.bbrread.sense_data)));
2641
2642                 cfi_free_metatask(metatask);
2643
2644                 mtx_destroy(&bbr_mtx);
2645                 cv_destroy(&fe_bbr_info.sem);
2646
2647                 break;
2648         }
2649         case CTL_DELAY_IO: {
2650                 struct ctl_io_delay_info *delay_info;
2651 #ifdef CTL_IO_DELAY
2652                 struct ctl_lun *lun;
2653 #endif /* CTL_IO_DELAY */
2654
2655                 delay_info = (struct ctl_io_delay_info *)addr;
2656
2657 #ifdef CTL_IO_DELAY
2658                 mtx_lock(&softc->ctl_lock);
2659
2660                 if ((delay_info->lun_id > CTL_MAX_LUNS)
2661                  || (softc->ctl_luns[delay_info->lun_id] == NULL)) {
2662                         delay_info->status = CTL_DELAY_STATUS_INVALID_LUN;
2663                 } else {
2664                         lun = softc->ctl_luns[delay_info->lun_id];
2665
2666                         delay_info->status = CTL_DELAY_STATUS_OK;
2667
2668                         switch (delay_info->delay_type) {
2669                         case CTL_DELAY_TYPE_CONT:
2670                                 break;
2671                         case CTL_DELAY_TYPE_ONESHOT:
2672                                 break;
2673                         default:
2674                                 delay_info->status =
2675                                         CTL_DELAY_STATUS_INVALID_TYPE;
2676                                 break;
2677                         }
2678
2679                         switch (delay_info->delay_loc) {
2680                         case CTL_DELAY_LOC_DATAMOVE:
2681                                 lun->delay_info.datamove_type =
2682                                         delay_info->delay_type;
2683                                 lun->delay_info.datamove_delay =
2684                                         delay_info->delay_secs;
2685                                 break;
2686                         case CTL_DELAY_LOC_DONE:
2687                                 lun->delay_info.done_type =
2688                                         delay_info->delay_type;
2689                                 lun->delay_info.done_delay =
2690                                         delay_info->delay_secs;
2691                                 break;
2692                         default:
2693                                 delay_info->status =
2694                                         CTL_DELAY_STATUS_INVALID_LOC;
2695                                 break;
2696                         }
2697                 }
2698
2699                 mtx_unlock(&softc->ctl_lock);
2700 #else
2701                 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED;
2702 #endif /* CTL_IO_DELAY */
2703                 break;
2704         }
2705         case CTL_REALSYNC_SET: {
2706                 int *syncstate;
2707
2708                 syncstate = (int *)addr;
2709
2710                 mtx_lock(&softc->ctl_lock);
2711                 switch (*syncstate) {
2712                 case 0:
2713                         softc->flags &= ~CTL_FLAG_REAL_SYNC;
2714                         break;
2715                 case 1:
2716                         softc->flags |= CTL_FLAG_REAL_SYNC;
2717                         break;
2718                 default:
2719                         retval = -EINVAL;
2720                         break;
2721                 }
2722                 mtx_unlock(&softc->ctl_lock);
2723                 break;
2724         }
2725         case CTL_REALSYNC_GET: {
2726                 int *syncstate;
2727
2728                 syncstate = (int*)addr;
2729
2730                 mtx_lock(&softc->ctl_lock);
2731                 if (softc->flags & CTL_FLAG_REAL_SYNC)
2732                         *syncstate = 1;
2733                 else
2734                         *syncstate = 0;
2735                 mtx_unlock(&softc->ctl_lock);
2736
2737                 break;
2738         }
2739         case CTL_SETSYNC:
2740         case CTL_GETSYNC: {
2741                 struct ctl_sync_info *sync_info;
2742                 struct ctl_lun *lun;
2743
2744                 sync_info = (struct ctl_sync_info *)addr;
2745
2746                 mtx_lock(&softc->ctl_lock);
2747                 lun = softc->ctl_luns[sync_info->lun_id];
2748                 if (lun == NULL) {
2749                         mtx_unlock(&softc->ctl_lock);
2750                         sync_info->status = CTL_GS_SYNC_NO_LUN;
2751                 }
2752                 /*
2753                  * Get or set the sync interval.  We're not bounds checking
2754                  * in the set case, hopefully the user won't do something
2755                  * silly.
2756                  */
2757                 if (cmd == CTL_GETSYNC)
2758                         sync_info->sync_interval = lun->sync_interval;
2759                 else
2760                         lun->sync_interval = sync_info->sync_interval;
2761
2762                 mtx_unlock(&softc->ctl_lock);
2763
2764                 sync_info->status = CTL_GS_SYNC_OK;
2765
2766                 break;
2767         }
2768         case CTL_GETSTATS: {
2769                 struct ctl_stats *stats;
2770                 struct ctl_lun *lun;
2771                 int i;
2772
2773                 stats = (struct ctl_stats *)addr;
2774
2775                 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) >
2776                      stats->alloc_len) {
2777                         stats->status = CTL_SS_NEED_MORE_SPACE;
2778                         stats->num_luns = softc->num_luns;
2779                         break;
2780                 }
2781                 /*
2782                  * XXX KDM no locking here.  If the LUN list changes,
2783                  * things can blow up.
2784                  */
2785                 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL;
2786                      i++, lun = STAILQ_NEXT(lun, links)) {
2787                         retval = copyout(&lun->stats, &stats->lun_stats[i],
2788                                          sizeof(lun->stats));
2789                         if (retval != 0)
2790                                 break;
2791                 }
2792                 stats->num_luns = softc->num_luns;
2793                 stats->fill_len = sizeof(struct ctl_lun_io_stats) *
2794                                  softc->num_luns;
2795                 stats->status = CTL_SS_OK;
2796 #ifdef CTL_TIME_IO
2797                 stats->flags = CTL_STATS_FLAG_TIME_VALID;
2798 #else
2799                 stats->flags = CTL_STATS_FLAG_NONE;
2800 #endif
2801                 getnanouptime(&stats->timestamp);
2802                 break;
2803         }
2804         case CTL_ERROR_INJECT: {
2805                 struct ctl_error_desc *err_desc, *new_err_desc;
2806                 struct ctl_lun *lun;
2807
2808                 err_desc = (struct ctl_error_desc *)addr;
2809
2810                 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL,
2811                                       M_WAITOK | M_ZERO);
2812                 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc));
2813
2814                 mtx_lock(&softc->ctl_lock);
2815                 lun = softc->ctl_luns[err_desc->lun_id];
2816                 if (lun == NULL) {
2817                         mtx_unlock(&softc->ctl_lock);
2818                         printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n",
2819                                __func__, (uintmax_t)err_desc->lun_id);
2820                         retval = EINVAL;
2821                         break;
2822                 }
2823
2824                 /*
2825                  * We could do some checking here to verify the validity
2826                  * of the request, but given the complexity of error
2827                  * injection requests, the checking logic would be fairly
2828                  * complex.
2829                  *
2830                  * For now, if the request is invalid, it just won't get
2831                  * executed and might get deleted.
2832                  */
2833                 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links);
2834
2835                 /*
2836                  * XXX KDM check to make sure the serial number is unique,
2837                  * in case we somehow manage to wrap.  That shouldn't
2838                  * happen for a very long time, but it's the right thing to
2839                  * do.
2840                  */
2841                 new_err_desc->serial = lun->error_serial;
2842                 err_desc->serial = lun->error_serial;
2843                 lun->error_serial++;
2844
2845                 mtx_unlock(&softc->ctl_lock);
2846                 break;
2847         }
2848         case CTL_ERROR_INJECT_DELETE: {
2849                 struct ctl_error_desc *delete_desc, *desc, *desc2;
2850                 struct ctl_lun *lun;
2851                 int delete_done;
2852
2853                 delete_desc = (struct ctl_error_desc *)addr;
2854                 delete_done = 0;
2855
2856                 mtx_lock(&softc->ctl_lock);
2857                 lun = softc->ctl_luns[delete_desc->lun_id];
2858                 if (lun == NULL) {
2859                         mtx_unlock(&softc->ctl_lock);
2860                         printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n",
2861                                __func__, (uintmax_t)delete_desc->lun_id);
2862                         retval = EINVAL;
2863                         break;
2864                 }
2865                 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
2866                         if (desc->serial != delete_desc->serial)
2867                                 continue;
2868
2869                         STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc,
2870                                       links);
2871                         free(desc, M_CTL);
2872                         delete_done = 1;
2873                 }
2874                 mtx_unlock(&softc->ctl_lock);
2875                 if (delete_done == 0) {
2876                         printf("%s: CTL_ERROR_INJECT_DELETE: can't find "
2877                                "error serial %ju on LUN %u\n", __func__, 
2878                                delete_desc->serial, delete_desc->lun_id);
2879                         retval = EINVAL;
2880                         break;
2881                 }
2882                 break;
2883         }
2884         case CTL_DUMP_STRUCTS: {
2885                 int i, j, k;
2886                 struct ctl_frontend *fe;
2887
2888                 printf("CTL IID to WWPN map start:\n");
2889                 for (i = 0; i < CTL_MAX_PORTS; i++) {
2890                         for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
2891                                 if (softc->wwpn_iid[i][j].in_use == 0)
2892                                         continue;
2893
2894                                 printf("port %d iid %u WWPN %#jx\n",
2895                                        softc->wwpn_iid[i][j].port,
2896                                        softc->wwpn_iid[i][j].iid, 
2897                                        (uintmax_t)softc->wwpn_iid[i][j].wwpn);
2898                         }
2899                 }
2900                 printf("CTL IID to WWPN map end\n");
2901                 printf("CTL Persistent Reservation information start:\n");
2902                 for (i = 0; i < CTL_MAX_LUNS; i++) {
2903                         struct ctl_lun *lun;
2904
2905                         lun = softc->ctl_luns[i];
2906
2907                         if ((lun == NULL)
2908                          || ((lun->flags & CTL_LUN_DISABLED) != 0))
2909                                 continue;
2910
2911                         for (j = 0; j < (CTL_MAX_PORTS * 2); j++) {
2912                                 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){
2913                                         if (lun->per_res[j+k].registered == 0)
2914                                                 continue;
2915                                         printf("LUN %d port %d iid %d key "
2916                                                "%#jx\n", i, j, k,
2917                                                (uintmax_t)scsi_8btou64(
2918                                                lun->per_res[j+k].res_key.key));
2919                                 }
2920                         }
2921                 }
2922                 printf("CTL Persistent Reservation information end\n");
2923                 printf("CTL Frontends:\n");
2924                 /*
2925                  * XXX KDM calling this without a lock.  We'd likely want
2926                  * to drop the lock before calling the frontend's dump
2927                  * routine anyway.
2928                  */
2929                 STAILQ_FOREACH(fe, &softc->fe_list, links) {
2930                         printf("Frontend %s Type %u pport %d vport %d WWNN "
2931                                "%#jx WWPN %#jx\n", fe->port_name, fe->port_type,
2932                                fe->physical_port, fe->virtual_port,
2933                                (uintmax_t)fe->wwnn, (uintmax_t)fe->wwpn);
2934
2935                         /*
2936                          * Frontends are not required to support the dump
2937                          * routine.
2938                          */
2939                         if (fe->fe_dump == NULL)
2940                                 continue;
2941
2942                         fe->fe_dump();
2943                 }
2944                 printf("CTL Frontend information end\n");
2945                 break;
2946         }
2947         case CTL_LUN_REQ: {
2948                 struct ctl_lun_req *lun_req;
2949                 struct ctl_backend_driver *backend;
2950
2951                 lun_req = (struct ctl_lun_req *)addr;
2952
2953                 backend = ctl_backend_find(lun_req->backend);
2954                 if (backend == NULL) {
2955                         lun_req->status = CTL_LUN_ERROR;
2956                         snprintf(lun_req->error_str,
2957                                  sizeof(lun_req->error_str),
2958                                  "Backend \"%s\" not found.",
2959                                  lun_req->backend);
2960                         break;
2961                 }
2962                 if (lun_req->num_be_args > 0) {
2963                         lun_req->kern_be_args = ctl_copyin_args(
2964                                 lun_req->num_be_args,
2965                                 lun_req->be_args,
2966                                 lun_req->error_str,
2967                                 sizeof(lun_req->error_str));
2968                         if (lun_req->kern_be_args == NULL) {
2969                                 lun_req->status = CTL_LUN_ERROR;
2970                                 break;
2971                         }
2972                 }
2973
2974                 retval = backend->ioctl(dev, cmd, addr, flag, td);
2975
2976                 if (lun_req->num_be_args > 0) {
2977                         ctl_free_args(lun_req->num_be_args,
2978                                       lun_req->kern_be_args);
2979                 }
2980                 break;
2981         }
2982         case CTL_LUN_LIST: {
2983                 struct sbuf *sb;
2984                 struct ctl_lun *lun;
2985                 struct ctl_lun_list *list;
2986                 struct ctl_be_lun_option *opt;
2987
2988                 list = (struct ctl_lun_list *)addr;
2989
2990                 /*
2991                  * Allocate a fixed length sbuf here, based on the length
2992                  * of the user's buffer.  We could allocate an auto-extending
2993                  * buffer, and then tell the user how much larger our
2994                  * amount of data is than his buffer, but that presents
2995                  * some problems:
2996                  *
2997                  * 1.  The sbuf(9) routines use a blocking malloc, and so
2998                  *     we can't hold a lock while calling them with an
2999                  *     auto-extending buffer.
3000                  *
3001                  * 2.  There is not currently a LUN reference counting
3002                  *     mechanism, outside of outstanding transactions on
3003                  *     the LUN's OOA queue.  So a LUN could go away on us
3004                  *     while we're getting the LUN number, backend-specific
3005                  *     information, etc.  Thus, given the way things
3006                  *     currently work, we need to hold the CTL lock while
3007                  *     grabbing LUN information.
3008                  *
3009                  * So, from the user's standpoint, the best thing to do is
3010                  * allocate what he thinks is a reasonable buffer length,
3011                  * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error,
3012                  * double the buffer length and try again.  (And repeat
3013                  * that until he succeeds.)
3014                  */
3015                 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN);
3016                 if (sb == NULL) {
3017                         list->status = CTL_LUN_LIST_ERROR;
3018                         snprintf(list->error_str, sizeof(list->error_str),
3019                                  "Unable to allocate %d bytes for LUN list",
3020                                  list->alloc_len);
3021                         break;
3022                 }
3023
3024                 sbuf_printf(sb, "<ctllunlist>\n");
3025
3026                 mtx_lock(&softc->ctl_lock);
3027
3028                 STAILQ_FOREACH(lun, &softc->lun_list, links) {
3029                         retval = sbuf_printf(sb, "<lun id=\"%ju\">\n",
3030                                              (uintmax_t)lun->lun);
3031
3032                         /*
3033                          * Bail out as soon as we see that we've overfilled
3034                          * the buffer.
3035                          */
3036                         if (retval != 0)
3037                                 break;
3038
3039                         retval = sbuf_printf(sb, "<backend_type>%s"
3040                                              "</backend_type>\n",
3041                                              (lun->backend == NULL) ?  "none" :
3042                                              lun->backend->name);
3043
3044                         if (retval != 0)
3045                                 break;
3046
3047                         retval = sbuf_printf(sb, "<lun_type>%d</lun_type>\n",
3048                                              lun->be_lun->lun_type);
3049
3050                         if (retval != 0)
3051                                 break;
3052
3053                         if (lun->backend == NULL) {
3054                                 retval = sbuf_printf(sb, "</lun>\n");
3055                                 if (retval != 0)
3056                                         break;
3057                                 continue;
3058                         }
3059
3060                         retval = sbuf_printf(sb, "<size>%ju</size>\n",
3061                                              (lun->be_lun->maxlba > 0) ?
3062                                              lun->be_lun->maxlba + 1 : 0);
3063
3064                         if (retval != 0)
3065                                 break;
3066
3067                         retval = sbuf_printf(sb, "<blocksize>%u</blocksize>\n",
3068                                              lun->be_lun->blocksize);
3069
3070                         if (retval != 0)
3071                                 break;
3072
3073                         retval = sbuf_printf(sb, "<serial_number>");
3074
3075                         if (retval != 0)
3076                                 break;
3077
3078                         retval = ctl_sbuf_printf_esc(sb,
3079                                                      lun->be_lun->serial_num);
3080
3081                         if (retval != 0)
3082                                 break;
3083
3084                         retval = sbuf_printf(sb, "</serial_number>\n");
3085                 
3086                         if (retval != 0)
3087                                 break;
3088
3089                         retval = sbuf_printf(sb, "<device_id>");
3090
3091                         if (retval != 0)
3092                                 break;
3093
3094                         retval = ctl_sbuf_printf_esc(sb,lun->be_lun->device_id);
3095
3096                         if (retval != 0)
3097                                 break;
3098
3099                         retval = sbuf_printf(sb, "</device_id>\n");
3100
3101                         if (retval != 0)
3102                                 break;
3103
3104                         if (lun->backend->lun_info != NULL) {
3105                                 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb);
3106                                 if (retval != 0)
3107                                         break;
3108                         }
3109                         STAILQ_FOREACH(opt, &lun->be_lun->options, links) {
3110                                 retval = sbuf_printf(sb, "<%s>%s</%s>", opt->name, opt->value, opt->name);
3111                                 if (retval != 0)
3112                                         break;
3113                         }
3114
3115                         retval = sbuf_printf(sb, "</lun>\n");
3116
3117                         if (retval != 0)
3118                                 break;
3119                 }
3120                 mtx_unlock(&softc->ctl_lock);
3121
3122                 if ((retval != 0)
3123                  || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) {
3124                         retval = 0;
3125                         sbuf_delete(sb);
3126                         list->status = CTL_LUN_LIST_NEED_MORE_SPACE;
3127                         snprintf(list->error_str, sizeof(list->error_str),
3128                                  "Out of space, %d bytes is too small",
3129                                  list->alloc_len);
3130                         break;
3131                 }
3132
3133                 sbuf_finish(sb);
3134
3135                 retval = copyout(sbuf_data(sb), list->lun_xml,
3136                                  sbuf_len(sb) + 1);
3137
3138                 list->fill_len = sbuf_len(sb) + 1;
3139                 list->status = CTL_LUN_LIST_OK;
3140                 sbuf_delete(sb);
3141                 break;
3142         }
3143         case CTL_ISCSI: {
3144                 struct ctl_iscsi *ci;
3145                 struct ctl_frontend *fe;
3146
3147                 ci = (struct ctl_iscsi *)addr;
3148
3149                 mtx_lock(&softc->ctl_lock);
3150                 STAILQ_FOREACH(fe, &softc->fe_list, links) {
3151                         if (strcmp(fe->port_name, "iscsi") == 0)
3152                                 break;
3153                 }
3154                 mtx_unlock(&softc->ctl_lock);
3155
3156                 if (fe == NULL) {
3157                         ci->status = CTL_ISCSI_ERROR;
3158                         snprintf(ci->error_str, sizeof(ci->error_str), "Backend \"iscsi\" not found.");
3159                         break;
3160                 }
3161
3162                 retval = fe->ioctl(dev, cmd, addr, flag, td);
3163                 break;
3164         }
3165         default: {
3166                 /* XXX KDM should we fix this? */
3167 #if 0
3168                 struct ctl_backend_driver *backend;
3169                 unsigned int type;
3170                 int found;
3171
3172                 found = 0;
3173
3174                 /*
3175                  * We encode the backend type as the ioctl type for backend
3176                  * ioctls.  So parse it out here, and then search for a
3177                  * backend of this type.
3178                  */
3179                 type = _IOC_TYPE(cmd);
3180
3181                 STAILQ_FOREACH(backend, &softc->be_list, links) {
3182                         if (backend->type == type) {
3183                                 found = 1;
3184                                 break;
3185                         }
3186                 }
3187                 if (found == 0) {
3188                         printf("ctl: unknown ioctl command %#lx or backend "
3189                                "%d\n", cmd, type);
3190                         retval = -EINVAL;
3191                         break;
3192                 }
3193                 retval = backend->ioctl(dev, cmd, addr, flag, td);
3194 #endif
3195                 retval = ENOTTY;
3196                 break;
3197         }
3198         }
3199         return (retval);
3200 }
3201
3202 uint32_t
3203 ctl_get_initindex(struct ctl_nexus *nexus)
3204 {
3205         if (nexus->targ_port < CTL_MAX_PORTS)
3206                 return (nexus->initid.id +
3207                         (nexus->targ_port * CTL_MAX_INIT_PER_PORT));
3208         else
3209                 return (nexus->initid.id +
3210                        ((nexus->targ_port - CTL_MAX_PORTS) *
3211                         CTL_MAX_INIT_PER_PORT));
3212 }
3213
3214 uint32_t
3215 ctl_get_resindex(struct ctl_nexus *nexus)
3216 {
3217         return (nexus->initid.id + (nexus->targ_port * CTL_MAX_INIT_PER_PORT));
3218 }
3219
3220 uint32_t
3221 ctl_port_idx(int port_num)
3222 {
3223         if (port_num < CTL_MAX_PORTS)
3224                 return(port_num);
3225         else
3226                 return(port_num - CTL_MAX_PORTS);
3227 }
3228
3229 /*
3230  * Note:  This only works for bitmask sizes that are at least 32 bits, and
3231  * that are a power of 2.
3232  */
3233 int
3234 ctl_ffz(uint32_t *mask, uint32_t size)
3235 {
3236         uint32_t num_chunks, num_pieces;
3237         int i, j;
3238
3239         num_chunks = (size >> 5);
3240         if (num_chunks == 0)
3241                 num_chunks++;
3242         num_pieces = ctl_min((sizeof(uint32_t) * 8), size);
3243
3244         for (i = 0; i < num_chunks; i++) {
3245                 for (j = 0; j < num_pieces; j++) {
3246                         if ((mask[i] & (1 << j)) == 0)
3247                                 return ((i << 5) + j);
3248                 }
3249         }
3250
3251         return (-1);
3252 }
3253
3254 int
3255 ctl_set_mask(uint32_t *mask, uint32_t bit)
3256 {
3257         uint32_t chunk, piece;
3258
3259         chunk = bit >> 5;
3260         piece = bit % (sizeof(uint32_t) * 8);
3261
3262         if ((mask[chunk] & (1 << piece)) != 0)
3263                 return (-1);
3264         else
3265                 mask[chunk] |= (1 << piece);
3266
3267         return (0);
3268 }
3269
3270 int
3271 ctl_clear_mask(uint32_t *mask, uint32_t bit)
3272 {
3273         uint32_t chunk, piece;
3274
3275         chunk = bit >> 5;
3276         piece = bit % (sizeof(uint32_t) * 8);
3277
3278         if ((mask[chunk] & (1 << piece)) == 0)
3279                 return (-1);
3280         else
3281                 mask[chunk] &= ~(1 << piece);
3282
3283         return (0);
3284 }
3285
3286 int
3287 ctl_is_set(uint32_t *mask, uint32_t bit)
3288 {
3289         uint32_t chunk, piece;
3290
3291         chunk = bit >> 5;
3292         piece = bit % (sizeof(uint32_t) * 8);
3293
3294         if ((mask[chunk] & (1 << piece)) == 0)
3295                 return (0);
3296         else
3297                 return (1);
3298 }
3299
3300 #ifdef unused
3301 /*
3302  * The bus, target and lun are optional, they can be filled in later.
3303  * can_wait is used to determine whether we can wait on the malloc or not.
3304  */
3305 union ctl_io*
3306 ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port, uint32_t targ_target,
3307               uint32_t targ_lun, int can_wait)
3308 {
3309         union ctl_io *io;
3310
3311         if (can_wait)
3312                 io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_WAITOK);
3313         else
3314                 io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_NOWAIT);
3315
3316         if (io != NULL) {
3317                 io->io_hdr.io_type = io_type;
3318                 io->io_hdr.targ_port = targ_port;
3319                 /*
3320                  * XXX KDM this needs to change/go away.  We need to move
3321                  * to a preallocated pool of ctl_scsiio structures.
3322                  */
3323                 io->io_hdr.nexus.targ_target.id = targ_target;
3324                 io->io_hdr.nexus.targ_lun = targ_lun;
3325         }
3326
3327         return (io);
3328 }
3329
3330 void
3331 ctl_kfree_io(union ctl_io *io)
3332 {
3333         free(io, M_CTL);
3334 }
3335 #endif /* unused */
3336
3337 /*
3338  * ctl_softc, pool_type, total_ctl_io are passed in.
3339  * npool is passed out.
3340  */
3341 int
3342 ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type,
3343                 uint32_t total_ctl_io, struct ctl_io_pool **npool)
3344 {
3345         uint32_t i;
3346         union ctl_io *cur_io, *next_io;
3347         struct ctl_io_pool *pool;
3348         int retval;
3349
3350         retval = 0;
3351
3352         pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL,
3353                                             M_NOWAIT | M_ZERO);
3354         if (pool == NULL) {
3355                 retval = -ENOMEM;
3356                 goto bailout;
3357         }
3358
3359         pool->type = pool_type;
3360         pool->ctl_softc = ctl_softc;
3361
3362         mtx_lock(&ctl_softc->pool_lock);
3363         pool->id = ctl_softc->cur_pool_id++;
3364         mtx_unlock(&ctl_softc->pool_lock);
3365
3366         pool->flags = CTL_POOL_FLAG_NONE;
3367         pool->refcount = 1;             /* Reference for validity. */
3368         STAILQ_INIT(&pool->free_queue);
3369
3370         /*
3371          * XXX KDM other options here:
3372          * - allocate a page at a time
3373          * - allocate one big chunk of memory.
3374          * Page allocation might work well, but would take a little more
3375          * tracking.
3376          */
3377         for (i = 0; i < total_ctl_io; i++) {
3378                 cur_io = (union ctl_io *)malloc(sizeof(*cur_io), M_CTL,
3379                                                 M_NOWAIT);
3380                 if (cur_io == NULL) {
3381                         retval = ENOMEM;
3382                         break;
3383                 }
3384                 cur_io->io_hdr.pool = pool;
3385                 STAILQ_INSERT_TAIL(&pool->free_queue, &cur_io->io_hdr, links);
3386                 pool->total_ctl_io++;
3387                 pool->free_ctl_io++;
3388         }
3389
3390         if (retval != 0) {
3391                 for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue);
3392                      cur_io != NULL; cur_io = next_io) {
3393                         next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr,
3394                                                               links);
3395                         STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr,
3396                                       ctl_io_hdr, links);
3397                         free(cur_io, M_CTL);
3398                 }
3399
3400                 free(pool, M_CTL);
3401                 goto bailout;
3402         }
3403         mtx_lock(&ctl_softc->pool_lock);
3404         ctl_softc->num_pools++;
3405         STAILQ_INSERT_TAIL(&ctl_softc->io_pools, pool, links);
3406         /*
3407          * Increment our usage count if this is an external consumer, so we
3408          * can't get unloaded until the external consumer (most likely a
3409          * FETD) unloads and frees his pool.
3410          *
3411          * XXX KDM will this increment the caller's module use count, or
3412          * mine?
3413          */
3414 #if 0
3415         if ((pool_type != CTL_POOL_EMERGENCY)
3416          && (pool_type != CTL_POOL_INTERNAL)
3417          && (pool_type != CTL_POOL_IOCTL)
3418          && (pool_type != CTL_POOL_4OTHERSC))
3419                 MOD_INC_USE_COUNT;
3420 #endif
3421
3422         mtx_unlock(&ctl_softc->pool_lock);
3423
3424         *npool = pool;
3425
3426 bailout:
3427
3428         return (retval);
3429 }
3430
3431 static int
3432 ctl_pool_acquire(struct ctl_io_pool *pool)
3433 {
3434
3435         mtx_assert(&pool->ctl_softc->pool_lock, MA_OWNED);
3436
3437         if (pool->flags & CTL_POOL_FLAG_INVALID)
3438                 return (-EINVAL);
3439
3440         pool->refcount++;
3441
3442         return (0);
3443 }
3444
3445 static void
3446 ctl_pool_release(struct ctl_io_pool *pool)
3447 {
3448         struct ctl_softc *ctl_softc = pool->ctl_softc;
3449         union ctl_io *io;
3450
3451         mtx_assert(&ctl_softc->pool_lock, MA_OWNED);
3452
3453         if (--pool->refcount != 0)
3454                 return;
3455
3456         while ((io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue)) != NULL) {
3457                 STAILQ_REMOVE(&pool->free_queue, &io->io_hdr, ctl_io_hdr,
3458                               links);
3459                 free(io, M_CTL);
3460         }
3461
3462         STAILQ_REMOVE(&ctl_softc->io_pools, pool, ctl_io_pool, links);
3463         ctl_softc->num_pools--;
3464
3465         /*
3466          * XXX KDM will this decrement the caller's usage count or mine?
3467          */
3468 #if 0
3469         if ((pool->type != CTL_POOL_EMERGENCY)
3470          && (pool->type != CTL_POOL_INTERNAL)
3471          && (pool->type != CTL_POOL_IOCTL))
3472                 MOD_DEC_USE_COUNT;
3473 #endif
3474
3475         free(pool, M_CTL);
3476 }
3477
3478 void
3479 ctl_pool_free(struct ctl_io_pool *pool)
3480 {
3481         struct ctl_softc *ctl_softc;
3482
3483         if (pool == NULL)
3484                 return;
3485
3486         ctl_softc = pool->ctl_softc;
3487         mtx_lock(&ctl_softc->pool_lock);
3488         pool->flags |= CTL_POOL_FLAG_INVALID;
3489         ctl_pool_release(pool);
3490         mtx_unlock(&ctl_softc->pool_lock);
3491 }
3492
3493 /*
3494  * This routine does not block (except for spinlocks of course).
3495  * It tries to allocate a ctl_io union from the caller's pool as quickly as
3496  * possible.
3497  */
3498 union ctl_io *
3499 ctl_alloc_io(void *pool_ref)
3500 {
3501         union ctl_io *io;
3502         struct ctl_softc *ctl_softc;
3503         struct ctl_io_pool *pool, *npool;
3504         struct ctl_io_pool *emergency_pool;
3505
3506         pool = (struct ctl_io_pool *)pool_ref;
3507
3508         if (pool == NULL) {
3509                 printf("%s: pool is NULL\n", __func__);
3510                 return (NULL);
3511         }
3512
3513         emergency_pool = NULL;
3514
3515         ctl_softc = pool->ctl_softc;
3516
3517         mtx_lock(&ctl_softc->pool_lock);
3518         /*
3519          * First, try to get the io structure from the user's pool.
3520          */
3521         if (ctl_pool_acquire(pool) == 0) {
3522                 io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue);
3523                 if (io != NULL) {
3524                         STAILQ_REMOVE_HEAD(&pool->free_queue, links);
3525                         pool->total_allocated++;
3526                         pool->free_ctl_io--;
3527                         mtx_unlock(&ctl_softc->pool_lock);
3528                         return (io);
3529                 } else
3530                         ctl_pool_release(pool);
3531         }
3532         /*
3533          * If he doesn't have any io structures left, search for an
3534          * emergency pool and grab one from there.
3535          */
3536         STAILQ_FOREACH(npool, &ctl_softc->io_pools, links) {
3537                 if (npool->type != CTL_POOL_EMERGENCY)
3538                         continue;
3539
3540                 if (ctl_pool_acquire(npool) != 0)
3541                         continue;
3542
3543                 emergency_pool = npool;
3544
3545                 io = (union ctl_io *)STAILQ_FIRST(&npool->free_queue);
3546                 if (io != NULL) {
3547                         STAILQ_REMOVE_HEAD(&npool->free_queue, links);
3548                         npool->total_allocated++;
3549                         npool->free_ctl_io--;
3550                         mtx_unlock(&ctl_softc->pool_lock);
3551                         return (io);
3552                 } else
3553                         ctl_pool_release(npool);
3554         }
3555
3556         /* Drop the spinlock before we malloc */
3557         mtx_unlock(&ctl_softc->pool_lock);
3558
3559         /*
3560          * The emergency pool (if it exists) didn't have one, so try an
3561          * atomic (i.e. nonblocking) malloc and see if we get lucky.
3562          */
3563         io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_NOWAIT);
3564         if (io != NULL) {
3565                 /*
3566                  * If the emergency pool exists but is empty, add this
3567                  * ctl_io to its list when it gets freed.
3568                  */
3569                 if (emergency_pool != NULL) {
3570                         mtx_lock(&ctl_softc->pool_lock);
3571                         if (ctl_pool_acquire(emergency_pool) == 0) {
3572                                 io->io_hdr.pool = emergency_pool;
3573                                 emergency_pool->total_ctl_io++;
3574                                 /*
3575                                  * Need to bump this, otherwise
3576                                  * total_allocated and total_freed won't
3577                                  * match when we no longer have anything
3578                                  * outstanding.
3579                                  */
3580                                 emergency_pool->total_allocated++;
3581                         }
3582                         mtx_unlock(&ctl_softc->pool_lock);
3583                 } else
3584                         io->io_hdr.pool = NULL;
3585         }
3586
3587         return (io);
3588 }
3589
3590 void
3591 ctl_free_io(union ctl_io *io)
3592 {
3593         if (io == NULL)
3594                 return;
3595
3596         /*
3597          * If this ctl_io has a pool, return it to that pool.
3598          */
3599         if (io->io_hdr.pool != NULL) {
3600                 struct ctl_io_pool *pool;
3601 #if 0
3602                 struct ctl_softc *ctl_softc;
3603                 union ctl_io *tmp_io;
3604                 unsigned long xflags;
3605                 int i;
3606
3607                 ctl_softc = control_softc;
3608 #endif
3609
3610                 pool = (struct ctl_io_pool *)io->io_hdr.pool;
3611
3612                 mtx_lock(&pool->ctl_softc->pool_lock);
3613 #if 0
3614                 save_flags(xflags);
3615
3616                 for (i = 0, tmp_io = (union ctl_io *)STAILQ_FIRST(
3617                      &ctl_softc->task_queue); tmp_io != NULL; i++,
3618                      tmp_io = (union ctl_io *)STAILQ_NEXT(&tmp_io->io_hdr,
3619                      links)) {
3620                         if (tmp_io == io) {
3621                                 printf("%s: %p is still on the task queue!\n",
3622                                        __func__, tmp_io);
3623                                 printf("%s: (%d): type %d "
3624                                        "msg %d cdb %x iptl: "
3625                                        "%d:%d:%d:%d tag 0x%04x "
3626                                        "flg %#lx\n",
3627                                         __func__, i,
3628                                         tmp_io->io_hdr.io_type,
3629                                         tmp_io->io_hdr.msg_type,
3630                                         tmp_io->scsiio.cdb[0],
3631                                         tmp_io->io_hdr.nexus.initid.id,
3632                                         tmp_io->io_hdr.nexus.targ_port,
3633                                         tmp_io->io_hdr.nexus.targ_target.id,
3634                                         tmp_io->io_hdr.nexus.targ_lun,
3635                                         (tmp_io->io_hdr.io_type ==
3636                                         CTL_IO_TASK) ?
3637                                         tmp_io->taskio.tag_num :
3638                                         tmp_io->scsiio.tag_num,
3639                                         xflags);
3640                                 panic("I/O still on the task queue!");
3641                         }
3642                 }
3643 #endif
3644                 io->io_hdr.io_type = 0xff;
3645                 STAILQ_INSERT_TAIL(&pool->free_queue, &io->io_hdr, links);
3646                 pool->total_freed++;
3647                 pool->free_ctl_io++;
3648                 ctl_pool_release(pool);
3649                 mtx_unlock(&pool->ctl_softc->pool_lock);
3650         } else {
3651                 /*
3652                  * Otherwise, just free it.  We probably malloced it and
3653                  * the emergency pool wasn't available.
3654                  */
3655                 free(io, M_CTL);
3656         }
3657
3658 }
3659
3660 void
3661 ctl_zero_io(union ctl_io *io)
3662 {
3663         void *pool_ref;
3664
3665         if (io == NULL)
3666                 return;
3667
3668         /*
3669          * May need to preserve linked list pointers at some point too.
3670          */
3671         pool_ref = io->io_hdr.pool;
3672
3673         memset(io, 0, sizeof(*io));
3674
3675         io->io_hdr.pool = pool_ref;
3676 }
3677
3678 /*
3679  * This routine is currently used for internal copies of ctl_ios that need
3680  * to persist for some reason after we've already returned status to the
3681  * FETD.  (Thus the flag set.)
3682  *
3683  * XXX XXX
3684  * Note that this makes a blind copy of all fields in the ctl_io, except
3685  * for the pool reference.  This includes any memory that has been
3686  * allocated!  That memory will no longer be valid after done has been
3687  * called, so this would be VERY DANGEROUS for command that actually does
3688  * any reads or writes.  Right now (11/7/2005), this is only used for immediate
3689  * start and stop commands, which don't transfer any data, so this is not a
3690  * problem.  If it is used for anything else, the caller would also need to
3691  * allocate data buffer space and this routine would need to be modified to
3692  * copy the data buffer(s) as well.
3693  */
3694 void
3695 ctl_copy_io(union ctl_io *src, union ctl_io *dest)
3696 {
3697         void *pool_ref;
3698
3699         if ((src == NULL)
3700          || (dest == NULL))
3701                 return;
3702
3703         /*
3704          * May need to preserve linked list pointers at some point too.
3705          */
3706         pool_ref = dest->io_hdr.pool;
3707
3708         memcpy(dest, src, ctl_min(sizeof(*src), sizeof(*dest)));
3709
3710         dest->io_hdr.pool = pool_ref;
3711         /*
3712          * We need to know that this is an internal copy, and doesn't need
3713          * to get passed back to the FETD that allocated it.
3714          */
3715         dest->io_hdr.flags |= CTL_FLAG_INT_COPY;
3716 }
3717
3718 #ifdef NEEDTOPORT
3719 static void
3720 ctl_update_power_subpage(struct copan_power_subpage *page)
3721 {
3722         int num_luns, num_partitions, config_type;
3723         struct ctl_softc *softc;
3724         cs_BOOL_t aor_present, shelf_50pct_power;
3725         cs_raidset_personality_t rs_type;
3726         int max_active_luns;
3727
3728         softc = control_softc;
3729
3730         /* subtract out the processor LUN */
3731         num_luns = softc->num_luns - 1;
3732         /*
3733          * Default to 7 LUNs active, which was the only number we allowed
3734          * in the past.
3735          */
3736         max_active_luns = 7;
3737
3738         num_partitions = config_GetRsPartitionInfo();
3739         config_type = config_GetConfigType();
3740         shelf_50pct_power = config_GetShelfPowerMode();
3741         aor_present = config_IsAorRsPresent();
3742
3743         rs_type = ddb_GetRsRaidType(1);
3744         if ((rs_type != CS_RAIDSET_PERSONALITY_RAID5)
3745          && (rs_type != CS_RAIDSET_PERSONALITY_RAID1)) {
3746                 EPRINT(0, "Unsupported RS type %d!", rs_type);
3747         }
3748
3749
3750         page->total_luns = num_luns;
3751
3752         switch (config_type) {
3753         case 40:
3754                 /*
3755                  * In a 40 drive configuration, it doesn't matter what DC
3756                  * cards we have, whether we have AOR enabled or not,
3757                  * partitioning or not, or what type of RAIDset we have.
3758                  * In that scenario, we can power up every LUN we present
3759                  * to the user.
3760                  */
3761                 max_active_luns = num_luns;
3762
3763                 break;
3764         case 64:
3765                 if (shelf_50pct_power == CS_FALSE) {
3766                         /* 25% power */
3767                         if (aor_present == CS_TRUE) {
3768                                 if (rs_type ==
3769                                      CS_RAIDSET_PERSONALITY_RAID5) {
3770                                         max_active_luns = 7;
3771                                 } else if (rs_type ==
3772                                          CS_RAIDSET_PERSONALITY_RAID1){
3773                                         max_active_luns = 14;
3774                                 } else {
3775                                         /* XXX KDM now what?? */
3776                                 }
3777                         } else {
3778                                 if (rs_type ==
3779                                      CS_RAIDSET_PERSONALITY_RAID5) {
3780                                         max_active_luns = 8;
3781                                 } else if (rs_type ==
3782                                          CS_RAIDSET_PERSONALITY_RAID1){
3783                                         max_active_luns = 16;
3784                                 } else {
3785                                         /* XXX KDM now what?? */
3786                                 }
3787                         }
3788                 } else {
3789                         /* 50% power */
3790                         /*
3791                          * With 50% power in a 64 drive configuration, we
3792                          * can power all LUNs we present.
3793                          */
3794                         max_active_luns = num_luns;
3795                 }
3796                 break;
3797         case 112:
3798                 if (shelf_50pct_power == CS_FALSE) {
3799                         /* 25% power */
3800                         if (aor_present == CS_TRUE) {
3801                                 if (rs_type ==
3802                                      CS_RAIDSET_PERSONALITY_RAID5) {
3803                                         max_active_luns = 7;
3804                                 } else if (rs_type ==
3805                                          CS_RAIDSET_PERSONALITY_RAID1){
3806                                         max_active_luns = 14;
3807                                 } else {
3808                                         /* XXX KDM now what?? */
3809                                 }
3810                         } else {
3811                                 if (rs_type ==
3812                                      CS_RAIDSET_PERSONALITY_RAID5) {
3813                                         max_active_luns = 8;
3814                                 } else if (rs_type ==
3815                                          CS_RAIDSET_PERSONALITY_RAID1){
3816                                         max_active_luns = 16;
3817                                 } else {
3818                                         /* XXX KDM now what?? */
3819                                 }
3820                         }
3821                 } else {
3822                         /* 50% power */
3823                         if (aor_present == CS_TRUE) {
3824                                 if (rs_type ==
3825                                      CS_RAIDSET_PERSONALITY_RAID5) {
3826                                         max_active_luns = 14;
3827                                 } else if (rs_type ==
3828                                          CS_RAIDSET_PERSONALITY_RAID1){
3829                                         /*
3830                                          * We're assuming here that disk
3831                                          * caching is enabled, and so we're
3832                                          * able to power up half of each
3833                                          * LUN, and cache all writes.
3834                                          */
3835                                         max_active_luns = num_luns;
3836                                 } else {
3837                                         /* XXX KDM now what?? */
3838                                 }
3839                         } else {
3840                                 if (rs_type ==
3841                                      CS_RAIDSET_PERSONALITY_RAID5) {
3842                                         max_active_luns = 15;
3843                                 } else if (rs_type ==
3844                                          CS_RAIDSET_PERSONALITY_RAID1){
3845                                         max_active_luns = 30;
3846                                 } else {
3847                                         /* XXX KDM now what?? */
3848                                 }
3849                         }
3850                 }
3851                 break;
3852         default:
3853                 /*
3854                  * In this case, we have an unknown configuration, so we
3855                  * just use the default from above.
3856                  */
3857                 break;
3858         }
3859
3860         page->max_active_luns = max_active_luns;
3861 #if 0
3862         printk("%s: total_luns = %d, max_active_luns = %d\n", __func__,
3863                page->total_luns, page->max_active_luns);
3864 #endif
3865 }
3866 #endif /* NEEDTOPORT */
3867
3868 /*
3869  * This routine could be used in the future to load default and/or saved
3870  * mode page parameters for a particuar lun.
3871  */
3872 static int
3873 ctl_init_page_index(struct ctl_lun *lun)
3874 {
3875         int i;
3876         struct ctl_page_index *page_index;
3877         struct ctl_softc *softc;
3878
3879         memcpy(&lun->mode_pages.index, page_index_template,
3880                sizeof(page_index_template));
3881
3882         softc = lun->ctl_softc;
3883
3884         for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
3885
3886                 page_index = &lun->mode_pages.index[i];
3887                 /*
3888                  * If this is a disk-only mode page, there's no point in
3889                  * setting it up.  For some pages, we have to have some
3890                  * basic information about the disk in order to calculate the
3891                  * mode page data.
3892                  */
3893                 if ((lun->be_lun->lun_type != T_DIRECT)
3894                  && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY))
3895                         continue;
3896
3897                 switch (page_index->page_code & SMPH_PC_MASK) {
3898                 case SMS_FORMAT_DEVICE_PAGE: {
3899                         struct scsi_format_page *format_page;
3900
3901                         if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
3902                                 panic("subpage is incorrect!");
3903
3904                         /*
3905                          * Sectors per track are set above.  Bytes per
3906                          * sector need to be set here on a per-LUN basis.
3907                          */
3908                         memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT],
3909                                &format_page_default,
3910                                sizeof(format_page_default));
3911                         memcpy(&lun->mode_pages.format_page[
3912                                CTL_PAGE_CHANGEABLE], &format_page_changeable,
3913                                sizeof(format_page_changeable));
3914                         memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT],
3915                                &format_page_default,
3916                                sizeof(format_page_default));
3917                         memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED],
3918                                &format_page_default,
3919                                sizeof(format_page_default));
3920
3921                         format_page = &lun->mode_pages.format_page[
3922                                 CTL_PAGE_CURRENT];
3923                         scsi_ulto2b(lun->be_lun->blocksize,
3924                                     format_page->bytes_per_sector);
3925
3926                         format_page = &lun->mode_pages.format_page[
3927                                 CTL_PAGE_DEFAULT];
3928                         scsi_ulto2b(lun->be_lun->blocksize,
3929                                     format_page->bytes_per_sector);
3930
3931                         format_page = &lun->mode_pages.format_page[
3932                                 CTL_PAGE_SAVED];
3933                         scsi_ulto2b(lun->be_lun->blocksize,
3934                                     format_page->bytes_per_sector);
3935
3936                         page_index->page_data =
3937                                 (uint8_t *)lun->mode_pages.format_page;
3938                         break;
3939                 }
3940                 case SMS_RIGID_DISK_PAGE: {
3941                         struct scsi_rigid_disk_page *rigid_disk_page;
3942                         uint32_t sectors_per_cylinder;
3943                         uint64_t cylinders;
3944 #ifndef __XSCALE__
3945                         int shift;
3946 #endif /* !__XSCALE__ */
3947
3948                         if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
3949                                 panic("invalid subpage value %d",
3950                                       page_index->subpage);
3951
3952                         /*
3953                          * Rotation rate and sectors per track are set
3954                          * above.  We calculate the cylinders here based on
3955                          * capacity.  Due to the number of heads and
3956                          * sectors per track we're using, smaller arrays
3957                          * may turn out to have 0 cylinders.  Linux and
3958                          * FreeBSD don't pay attention to these mode pages
3959                          * to figure out capacity, but Solaris does.  It
3960                          * seems to deal with 0 cylinders just fine, and
3961                          * works out a fake geometry based on the capacity.
3962                          */
3963                         memcpy(&lun->mode_pages.rigid_disk_page[
3964                                CTL_PAGE_CURRENT], &rigid_disk_page_default,
3965                                sizeof(rigid_disk_page_default));
3966                         memcpy(&lun->mode_pages.rigid_disk_page[
3967                                CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable,
3968                                sizeof(rigid_disk_page_changeable));
3969                         memcpy(&lun->mode_pages.rigid_disk_page[
3970                                CTL_PAGE_DEFAULT], &rigid_disk_page_default,
3971                                sizeof(rigid_disk_page_default));
3972                         memcpy(&lun->mode_pages.rigid_disk_page[
3973                                CTL_PAGE_SAVED], &rigid_disk_page_default,
3974                                sizeof(rigid_disk_page_default));
3975
3976                         sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK *
3977                                 CTL_DEFAULT_HEADS;
3978
3979                         /*
3980                          * The divide method here will be more accurate,
3981                          * probably, but results in floating point being
3982                          * used in the kernel on i386 (__udivdi3()).  On the
3983                          * XScale, though, __udivdi3() is implemented in
3984                          * software.
3985                          *
3986                          * The shift method for cylinder calculation is
3987                          * accurate if sectors_per_cylinder is a power of
3988                          * 2.  Otherwise it might be slightly off -- you
3989                          * might have a bit of a truncation problem.
3990                          */
3991 #ifdef  __XSCALE__
3992                         cylinders = (lun->be_lun->maxlba + 1) /
3993                                 sectors_per_cylinder;
3994 #else
3995                         for (shift = 31; shift > 0; shift--) {
3996                                 if (sectors_per_cylinder & (1 << shift))
3997                                         break;
3998                         }
3999                         cylinders = (lun->be_lun->maxlba + 1) >> shift;
4000 #endif
4001
4002                         /*
4003                          * We've basically got 3 bytes, or 24 bits for the
4004                          * cylinder size in the mode page.  If we're over,
4005                          * just round down to 2^24.
4006                          */
4007                         if (cylinders > 0xffffff)
4008                                 cylinders = 0xffffff;
4009
4010                         rigid_disk_page = &lun->mode_pages.rigid_disk_page[
4011                                 CTL_PAGE_CURRENT];
4012                         scsi_ulto3b(cylinders, rigid_disk_page->cylinders);
4013
4014                         rigid_disk_page = &lun->mode_pages.rigid_disk_page[
4015                                 CTL_PAGE_DEFAULT];
4016                         scsi_ulto3b(cylinders, rigid_disk_page->cylinders);
4017
4018                         rigid_disk_page = &lun->mode_pages.rigid_disk_page[
4019                                 CTL_PAGE_SAVED];
4020                         scsi_ulto3b(cylinders, rigid_disk_page->cylinders);
4021
4022                         page_index->page_data =
4023                                 (uint8_t *)lun->mode_pages.rigid_disk_page;
4024                         break;
4025                 }
4026                 case SMS_CACHING_PAGE: {
4027
4028                         if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
4029                                 panic("invalid subpage value %d",
4030                                       page_index->subpage);
4031                         /*
4032                          * Defaults should be okay here, no calculations
4033                          * needed.
4034                          */
4035                         memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT],
4036                                &caching_page_default,
4037                                sizeof(caching_page_default));
4038                         memcpy(&lun->mode_pages.caching_page[
4039                                CTL_PAGE_CHANGEABLE], &caching_page_changeable,
4040                                sizeof(caching_page_changeable));
4041                         memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT],
4042                                &caching_page_default,
4043                                sizeof(caching_page_default));
4044                         memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED],
4045                                &caching_page_default,
4046                                sizeof(caching_page_default));
4047                         page_index->page_data =
4048                                 (uint8_t *)lun->mode_pages.caching_page;
4049                         break;
4050                 }
4051                 case SMS_CONTROL_MODE_PAGE: {
4052
4053                         if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
4054                                 panic("invalid subpage value %d",
4055                                       page_index->subpage);
4056
4057                         /*
4058                          * Defaults should be okay here, no calculations
4059                          * needed.
4060                          */
4061                         memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT],
4062                                &control_page_default,
4063                                sizeof(control_page_default));
4064                         memcpy(&lun->mode_pages.control_page[
4065                                CTL_PAGE_CHANGEABLE], &control_page_changeable,
4066                                sizeof(control_page_changeable));
4067                         memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT],
4068                                &control_page_default,
4069                                sizeof(control_page_default));
4070                         memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED],
4071                                &control_page_default,
4072                                sizeof(control_page_default));
4073                         page_index->page_data =
4074                                 (uint8_t *)lun->mode_pages.control_page;
4075                         break;
4076
4077                 }
4078                 case SMS_VENDOR_SPECIFIC_PAGE:{
4079                         switch (page_index->subpage) {
4080                         case PWR_SUBPAGE_CODE: {
4081                                 struct copan_power_subpage *current_page,
4082                                                            *saved_page;
4083
4084                                 memcpy(&lun->mode_pages.power_subpage[
4085                                        CTL_PAGE_CURRENT],
4086                                        &power_page_default,
4087                                        sizeof(power_page_default));
4088                                 memcpy(&lun->mode_pages.power_subpage[
4089                                        CTL_PAGE_CHANGEABLE],
4090                                        &power_page_changeable,
4091                                        sizeof(power_page_changeable));
4092                                 memcpy(&lun->mode_pages.power_subpage[
4093                                        CTL_PAGE_DEFAULT],
4094                                        &power_page_default,
4095                                        sizeof(power_page_default));
4096                                 memcpy(&lun->mode_pages.power_subpage[
4097                                        CTL_PAGE_SAVED],
4098                                        &power_page_default,
4099                                        sizeof(power_page_default));
4100                                 page_index->page_data =
4101                                     (uint8_t *)lun->mode_pages.power_subpage;
4102
4103                                 current_page = (struct copan_power_subpage *)
4104                                         (page_index->page_data +
4105                                          (page_index->page_len *
4106                                           CTL_PAGE_CURRENT));
4107                                 saved_page = (struct copan_power_subpage *)
4108                                         (page_index->page_data +
4109                                          (page_index->page_len *
4110                                           CTL_PAGE_SAVED));
4111                                 break;
4112                         }
4113                         case APS_SUBPAGE_CODE: {
4114                                 struct copan_aps_subpage *current_page,
4115                                                          *saved_page;
4116
4117                                 // This gets set multiple times but
4118                                 // it should always be the same. It's
4119                                 // only done during init so who cares.
4120                                 index_to_aps_page = i;
4121
4122                                 memcpy(&lun->mode_pages.aps_subpage[
4123                                        CTL_PAGE_CURRENT],
4124                                        &aps_page_default,
4125                                        sizeof(aps_page_default));
4126                                 memcpy(&lun->mode_pages.aps_subpage[
4127                                        CTL_PAGE_CHANGEABLE],
4128                                        &aps_page_changeable,
4129                                        sizeof(aps_page_changeable));
4130                                 memcpy(&lun->mode_pages.aps_subpage[
4131                                        CTL_PAGE_DEFAULT],
4132                                        &aps_page_default,
4133                                        sizeof(aps_page_default));
4134                                 memcpy(&lun->mode_pages.aps_subpage[
4135                                        CTL_PAGE_SAVED],
4136                                        &aps_page_default,
4137                                        sizeof(aps_page_default));
4138                                 page_index->page_data =
4139                                         (uint8_t *)lun->mode_pages.aps_subpage;
4140
4141                                 current_page = (struct copan_aps_subpage *)
4142                                         (page_index->page_data +
4143                                          (page_index->page_len *
4144                                           CTL_PAGE_CURRENT));
4145                                 saved_page = (struct copan_aps_subpage *)
4146                                         (page_index->page_data +
4147                                          (page_index->page_len *
4148                                           CTL_PAGE_SAVED));
4149                                 break;
4150                         }
4151                         case DBGCNF_SUBPAGE_CODE: {
4152                                 struct copan_debugconf_subpage *current_page,
4153                                                                *saved_page;
4154
4155                                 memcpy(&lun->mode_pages.debugconf_subpage[
4156                                        CTL_PAGE_CURRENT],
4157                                        &debugconf_page_default,
4158                                        sizeof(debugconf_page_default));
4159                                 memcpy(&lun->mode_pages.debugconf_subpage[
4160                                        CTL_PAGE_CHANGEABLE],
4161                                        &debugconf_page_changeable,
4162                                        sizeof(debugconf_page_changeable));
4163                                 memcpy(&lun->mode_pages.debugconf_subpage[
4164                                        CTL_PAGE_DEFAULT],
4165                                        &debugconf_page_default,
4166                                        sizeof(debugconf_page_default));
4167                                 memcpy(&lun->mode_pages.debugconf_subpage[
4168                                        CTL_PAGE_SAVED],
4169                                        &debugconf_page_default,
4170                                        sizeof(debugconf_page_default));
4171                                 page_index->page_data =
4172                                         (uint8_t *)lun->mode_pages.debugconf_subpage;
4173
4174                                 current_page = (struct copan_debugconf_subpage *)
4175                                         (page_index->page_data +
4176                                          (page_index->page_len *
4177                                           CTL_PAGE_CURRENT));
4178                                 saved_page = (struct copan_debugconf_subpage *)
4179                                         (page_index->page_data +
4180                                          (page_index->page_len *
4181                                           CTL_PAGE_SAVED));
4182                                 break;
4183                         }
4184                         default:
4185                                 panic("invalid subpage value %d",
4186                                       page_index->subpage);
4187                                 break;
4188                         }
4189                         break;
4190                 }
4191                 default:
4192                         panic("invalid page value %d",
4193                               page_index->page_code & SMPH_PC_MASK);
4194                         break;
4195         }
4196         }
4197
4198         return (CTL_RETVAL_COMPLETE);
4199 }
4200
4201 /*
4202  * LUN allocation.
4203  *
4204  * Requirements:
4205  * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he
4206  *   wants us to allocate the LUN and he can block.
4207  * - ctl_softc is always set
4208  * - be_lun is set if the LUN has a backend (needed for disk LUNs)
4209  *
4210  * Returns 0 for success, non-zero (errno) for failure.
4211  */
4212 static int
4213 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
4214               struct ctl_be_lun *const be_lun, struct ctl_id target_id)
4215 {
4216         struct ctl_lun *nlun, *lun;
4217         struct ctl_frontend *fe;
4218         int lun_number, i, lun_malloced;
4219
4220         if (be_lun == NULL)
4221                 return (EINVAL);
4222
4223         /*
4224          * We currently only support Direct Access or Processor LUN types.
4225          */
4226         switch (be_lun->lun_type) {
4227         case T_DIRECT:
4228                 break;
4229         case T_PROCESSOR:
4230                 break;
4231         case T_SEQUENTIAL:
4232         case T_CHANGER:
4233         default:
4234                 be_lun->lun_config_status(be_lun->be_lun,
4235                                           CTL_LUN_CONFIG_FAILURE);
4236                 break;
4237         }
4238         if (ctl_lun == NULL) {
4239                 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK);
4240                 lun_malloced = 1;
4241         } else {
4242                 lun_malloced = 0;
4243                 lun = ctl_lun;
4244         }
4245
4246         memset(lun, 0, sizeof(*lun));
4247         if (lun_malloced)
4248                 lun->flags = CTL_LUN_MALLOCED;
4249
4250         mtx_lock(&ctl_softc->ctl_lock);
4251         /*
4252          * See if the caller requested a particular LUN number.  If so, see
4253          * if it is available.  Otherwise, allocate the first available LUN.
4254          */
4255         if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) {
4256                 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1))
4257                  || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) {
4258                         mtx_unlock(&ctl_softc->ctl_lock);
4259                         if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) {
4260                                 printf("ctl: requested LUN ID %d is higher "
4261                                        "than CTL_MAX_LUNS - 1 (%d)\n",
4262                                        be_lun->req_lun_id, CTL_MAX_LUNS - 1);
4263                         } else {
4264                                 /*
4265                                  * XXX KDM return an error, or just assign
4266                                  * another LUN ID in this case??
4267                                  */
4268                                 printf("ctl: requested LUN ID %d is already "
4269                                        "in use\n", be_lun->req_lun_id);
4270                         }
4271                         if (lun->flags & CTL_LUN_MALLOCED)
4272                                 free(lun, M_CTL);
4273                         be_lun->lun_config_status(be_lun->be_lun,
4274                                                   CTL_LUN_CONFIG_FAILURE);
4275                         return (ENOSPC);
4276                 }
4277                 lun_number = be_lun->req_lun_id;
4278         } else {
4279                 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS);
4280                 if (lun_number == -1) {
4281                         mtx_unlock(&ctl_softc->ctl_lock);
4282                         printf("ctl: can't allocate LUN on target %ju, out of "
4283                                "LUNs\n", (uintmax_t)target_id.id);
4284                         if (lun->flags & CTL_LUN_MALLOCED)
4285                                 free(lun, M_CTL);
4286                         be_lun->lun_config_status(be_lun->be_lun,
4287                                                   CTL_LUN_CONFIG_FAILURE);
4288                         return (ENOSPC);
4289                 }
4290         }
4291         ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number);
4292
4293         lun->target = target_id;
4294         lun->lun = lun_number;
4295         lun->be_lun = be_lun;
4296         /*
4297          * The processor LUN is always enabled.  Disk LUNs come on line
4298          * disabled, and must be enabled by the backend.
4299          */
4300         lun->flags |= CTL_LUN_DISABLED;
4301         lun->backend = be_lun->be;
4302         be_lun->ctl_lun = lun;
4303         be_lun->lun_id = lun_number;
4304         atomic_add_int(&be_lun->be->num_luns, 1);
4305         if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF)
4306                 lun->flags |= CTL_LUN_STOPPED;
4307
4308         if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE)
4309                 lun->flags |= CTL_LUN_INOPERABLE;
4310
4311         if (be_lun->flags & CTL_LUN_FLAG_PRIMARY)
4312                 lun->flags |= CTL_LUN_PRIMARY_SC;
4313
4314         lun->ctl_softc = ctl_softc;
4315         TAILQ_INIT(&lun->ooa_queue);
4316         TAILQ_INIT(&lun->blocked_queue);
4317         STAILQ_INIT(&lun->error_list);
4318
4319         /*
4320          * Initialize the mode page index.
4321          */
4322         ctl_init_page_index(lun);
4323
4324         /*
4325          * Set the poweron UA for all initiators on this LUN only.
4326          */
4327         for (i = 0; i < CTL_MAX_INITIATORS; i++)
4328                 lun->pending_sense[i].ua_pending = CTL_UA_POWERON;
4329
4330         /*
4331          * Now, before we insert this lun on the lun list, set the lun
4332          * inventory changed UA for all other luns.
4333          */
4334         STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) {
4335                 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
4336                         nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE;
4337                 }
4338         }
4339
4340         STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links);
4341
4342         ctl_softc->ctl_luns[lun_number] = lun;
4343
4344         ctl_softc->num_luns++;
4345
4346         /* Setup statistics gathering */
4347         lun->stats.device_type = be_lun->lun_type;
4348         lun->stats.lun_number = lun_number;
4349         if (lun->stats.device_type == T_DIRECT)
4350                 lun->stats.blocksize = be_lun->blocksize;
4351         else
4352                 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE;
4353         for (i = 0;i < CTL_MAX_PORTS;i++)
4354                 lun->stats.ports[i].targ_port = i;
4355
4356         mtx_unlock(&ctl_softc->ctl_lock);
4357
4358         lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK);
4359
4360         /*
4361          * Run through each registered FETD and bring it online if it isn't
4362          * already.  Enable the target ID if it hasn't been enabled, and
4363          * enable this particular LUN.
4364          */
4365         STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) {
4366                 int retval;
4367
4368                 /*
4369                  * XXX KDM this only works for ONE TARGET ID.  We'll need
4370                  * to do things differently if we go to a multiple target
4371                  * ID scheme.
4372                  */
4373                 if ((fe->status & CTL_PORT_STATUS_TARG_ONLINE) == 0) {
4374
4375                         retval = fe->targ_enable(fe->targ_lun_arg, target_id);
4376                         if (retval != 0) {
4377                                 printf("ctl_alloc_lun: FETD %s port %d "
4378                                        "returned error %d for targ_enable on "
4379                                        "target %ju\n", fe->port_name,
4380                                        fe->targ_port, retval,
4381                                        (uintmax_t)target_id.id);
4382                         } else
4383                                 fe->status |= CTL_PORT_STATUS_TARG_ONLINE;
4384                 }
4385
4386                 retval = fe->lun_enable(fe->targ_lun_arg, target_id,lun_number);
4387                 if (retval != 0) {
4388                         printf("ctl_alloc_lun: FETD %s port %d returned error "
4389                                "%d for lun_enable on target %ju lun %d\n",
4390                                fe->port_name, fe->targ_port, retval,
4391                                (uintmax_t)target_id.id, lun_number);
4392                 } else
4393                         fe->status |= CTL_PORT_STATUS_LUN_ONLINE;
4394         }
4395         return (0);
4396 }
4397
4398 /*
4399  * Delete a LUN.
4400  * Assumptions:
4401  * - LUN has already been marked invalid and any pending I/O has been taken
4402  *   care of.
4403  */
4404 static int
4405 ctl_free_lun(struct ctl_lun *lun)
4406 {
4407         struct ctl_softc *softc;
4408 #if 0
4409         struct ctl_frontend *fe;
4410 #endif
4411         struct ctl_lun *nlun;
4412         union ctl_io *io, *next_io;
4413         int i;
4414
4415         softc = lun->ctl_softc;
4416
4417         mtx_assert(&softc->ctl_lock, MA_OWNED);
4418
4419         STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links);
4420
4421         ctl_clear_mask(softc->ctl_lun_mask, lun->lun);
4422
4423         softc->ctl_luns[lun->lun] = NULL;
4424
4425         if (TAILQ_FIRST(&lun->ooa_queue) != NULL) {
4426                 printf("ctl_free_lun: aieee!! freeing a LUN with "
4427                        "outstanding I/O!!\n");
4428         }
4429
4430         /*
4431          * If we have anything pending on the RtR queue, remove it.
4432          */
4433         for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue); io != NULL;
4434              io = next_io) {
4435                 uint32_t targ_lun;
4436
4437                 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
4438                 targ_lun = io->io_hdr.nexus.targ_lun;
4439                 if (io->io_hdr.nexus.lun_map_fn != NULL)
4440                         targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
4441                 if ((io->io_hdr.nexus.targ_target.id == lun->target.id)
4442                  && (targ_lun == lun->lun))
4443                         STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr,
4444                                       ctl_io_hdr, links);
4445         }
4446
4447         /*
4448          * Then remove everything from the blocked queue.
4449          */
4450         for (io = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); io != NULL;
4451              io = next_io) {
4452                 next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,blocked_links);
4453                 TAILQ_REMOVE(&lun->blocked_queue, &io->io_hdr, blocked_links);
4454                 io->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
4455         }
4456
4457         /*
4458          * Now clear out the OOA queue, and free all the I/O.
4459          * XXX KDM should we notify the FETD here?  We probably need to
4460          * quiesce the LUN before deleting it.
4461          */
4462         for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); io != NULL;
4463              io = next_io) {
4464                 next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, ooa_links);
4465                 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
4466                 ctl_free_io(io);
4467         }
4468
4469         softc->num_luns--;
4470
4471         /*
4472          * XXX KDM this scheme only works for a single target/multiple LUN
4473          * setup.  It needs to be revamped for a multiple target scheme.
4474          *
4475          * XXX KDM this results in fe->lun_disable() getting called twice,
4476          * once when ctl_disable_lun() is called, and a second time here.
4477          * We really need to re-think the LUN disable semantics.  There
4478          * should probably be several steps/levels to LUN removal:
4479          *  - disable
4480          *  - invalidate
4481          *  - free
4482          *
4483          * Right now we only have a disable method when communicating to
4484          * the front end ports, at least for individual LUNs.
4485          */
4486 #if 0
4487         STAILQ_FOREACH(fe, &softc->fe_list, links) {
4488                 int retval;
4489
4490                 retval = fe->lun_disable(fe->targ_lun_arg, lun->target,
4491                                          lun->lun);
4492                 if (retval != 0) {
4493                         printf("ctl_free_lun: FETD %s port %d returned error "
4494                                "%d for lun_disable on target %ju lun %jd\n",
4495                                fe->port_name, fe->targ_port, retval,
4496                                (uintmax_t)lun->target.id, (intmax_t)lun->lun);
4497                 }
4498
4499                 if (STAILQ_FIRST(&softc->lun_list) == NULL) {
4500                         fe->status &= ~CTL_PORT_STATUS_LUN_ONLINE;
4501
4502                         retval = fe->targ_disable(fe->targ_lun_arg,lun->target);
4503                         if (retval != 0) {
4504                                 printf("ctl_free_lun: FETD %s port %d "
4505                                        "returned error %d for targ_disable on "
4506                                        "target %ju\n", fe->port_name,
4507                                        fe->targ_port, retval,
4508                                        (uintmax_t)lun->target.id);
4509                         } else
4510                                 fe->status &= ~CTL_PORT_STATUS_TARG_ONLINE;
4511
4512                         if ((fe->status & CTL_PORT_STATUS_TARG_ONLINE) != 0)
4513                                 continue;
4514
4515 #if 0
4516                         fe->port_offline(fe->onoff_arg);
4517                         fe->status &= ~CTL_PORT_STATUS_ONLINE;
4518 #endif
4519                 }
4520         }
4521 #endif
4522
4523         /*
4524          * Tell the backend to free resources, if this LUN has a backend.
4525          */
4526         atomic_subtract_int(&lun->be_lun->be->num_luns, 1);
4527         lun->be_lun->lun_shutdown(lun->be_lun->be_lun);
4528
4529         if (lun->flags & CTL_LUN_MALLOCED)
4530                 free(lun, M_CTL);
4531
4532         STAILQ_FOREACH(nlun, &softc->lun_list, links) {
4533                 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
4534                         nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE;
4535                 }
4536         }
4537
4538         return (0);
4539 }
4540
4541 static void
4542 ctl_create_lun(struct ctl_be_lun *be_lun)
4543 {
4544         struct ctl_softc *ctl_softc;
4545
4546         ctl_softc = control_softc;
4547
4548         /*
4549          * ctl_alloc_lun() should handle all potential failure cases.
4550          */
4551         ctl_alloc_lun(ctl_softc, NULL, be_lun, ctl_softc->target);
4552 }
4553
4554 int
4555 ctl_add_lun(struct ctl_be_lun *be_lun)
4556 {
4557         struct ctl_softc *ctl_softc;
4558
4559         ctl_softc = control_softc;
4560
4561         mtx_lock(&ctl_softc->ctl_lock);
4562         STAILQ_INSERT_TAIL(&ctl_softc->pending_lun_queue, be_lun, links);
4563         mtx_unlock(&ctl_softc->ctl_lock);
4564
4565         ctl_wakeup_thread();
4566
4567         return (0);
4568 }
4569
4570 int
4571 ctl_enable_lun(struct ctl_be_lun *be_lun)
4572 {
4573         struct ctl_softc *ctl_softc;
4574         struct ctl_frontend *fe, *nfe;
4575         struct ctl_lun *lun;
4576         int retval;
4577
4578         ctl_softc = control_softc;
4579
4580         lun = (struct ctl_lun *)be_lun->ctl_lun;
4581
4582         mtx_lock(&ctl_softc->ctl_lock);
4583         if ((lun->flags & CTL_LUN_DISABLED) == 0) {
4584                 /*
4585                  * eh?  Why did we get called if the LUN is already
4586                  * enabled?
4587                  */
4588                 mtx_unlock(&ctl_softc->ctl_lock);
4589                 return (0);
4590         }
4591         lun->flags &= ~CTL_LUN_DISABLED;
4592
4593         for (fe = STAILQ_FIRST(&ctl_softc->fe_list); fe != NULL; fe = nfe) {
4594                 nfe = STAILQ_NEXT(fe, links);
4595
4596                 /*
4597                  * Drop the lock while we call the FETD's enable routine.
4598                  * This can lead to a callback into CTL (at least in the
4599                  * case of the internal initiator frontend.
4600                  */
4601                 mtx_unlock(&ctl_softc->ctl_lock);
4602                 retval = fe->lun_enable(fe->targ_lun_arg, lun->target,lun->lun);
4603                 mtx_lock(&ctl_softc->ctl_lock);
4604                 if (retval != 0) {
4605                         printf("%s: FETD %s port %d returned error "
4606                                "%d for lun_enable on target %ju lun %jd\n",
4607                                __func__, fe->port_name, fe->targ_port, retval,
4608                                (uintmax_t)lun->target.id, (intmax_t)lun->lun);
4609                 }
4610 #if 0
4611                  else {
4612             /* NOTE:  TODO:  why does lun enable affect port status? */
4613                         fe->status |= CTL_PORT_STATUS_LUN_ONLINE;
4614                 }
4615 #endif
4616         }
4617
4618         mtx_unlock(&ctl_softc->ctl_lock);
4619
4620         return (0);
4621 }
4622
4623 int
4624 ctl_disable_lun(struct ctl_be_lun *be_lun)
4625 {
4626         struct ctl_softc *ctl_softc;
4627         struct ctl_frontend *fe;
4628         struct ctl_lun *lun;
4629         int retval;
4630
4631         ctl_softc = control_softc;
4632
4633         lun = (struct ctl_lun *)be_lun->ctl_lun;
4634
4635         mtx_lock(&ctl_softc->ctl_lock);
4636
4637         if (lun->flags & CTL_LUN_DISABLED) {
4638                 mtx_unlock(&ctl_softc->ctl_lock);
4639                 return (0);
4640         }
4641         lun->flags |= CTL_LUN_DISABLED;
4642
4643         STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) {
4644                 mtx_unlock(&ctl_softc->ctl_lock);
4645                 /*
4646                  * Drop the lock before we call the frontend's disable
4647                  * routine, to avoid lock order reversals.
4648                  *
4649                  * XXX KDM what happens if the frontend list changes while
4650                  * we're traversing it?  It's unlikely, but should be handled.
4651                  */
4652                 retval = fe->lun_disable(fe->targ_lun_arg, lun->target,
4653                                          lun->lun);
4654                 mtx_lock(&ctl_softc->ctl_lock);
4655                 if (retval != 0) {
4656                         printf("ctl_alloc_lun: FETD %s port %d returned error "
4657                                "%d for lun_disable on target %ju lun %jd\n",
4658                                fe->port_name, fe->targ_port, retval,
4659                                (uintmax_t)lun->target.id, (intmax_t)lun->lun);
4660                 }
4661         }
4662
4663         mtx_unlock(&ctl_softc->ctl_lock);
4664
4665         return (0);
4666 }
4667
4668 int
4669 ctl_start_lun(struct ctl_be_lun *be_lun)
4670 {
4671         struct ctl_softc *ctl_softc;
4672         struct ctl_lun *lun;
4673
4674         ctl_softc = control_softc;
4675
4676         lun = (struct ctl_lun *)be_lun->ctl_lun;
4677
4678         mtx_lock(&ctl_softc->ctl_lock);
4679         lun->flags &= ~CTL_LUN_STOPPED;
4680         mtx_unlock(&ctl_softc->ctl_lock);
4681
4682         return (0);
4683 }
4684
4685 int
4686 ctl_stop_lun(struct ctl_be_lun *be_lun)
4687 {
4688         struct ctl_softc *ctl_softc;
4689         struct ctl_lun *lun;
4690
4691         ctl_softc = control_softc;
4692
4693         lun = (struct ctl_lun *)be_lun->ctl_lun;
4694
4695         mtx_lock(&ctl_softc->ctl_lock);
4696         lun->flags |= CTL_LUN_STOPPED;
4697         mtx_unlock(&ctl_softc->ctl_lock);
4698
4699         return (0);
4700 }
4701
4702 int
4703 ctl_lun_offline(struct ctl_be_lun *be_lun)
4704 {
4705         struct ctl_softc *ctl_softc;
4706         struct ctl_lun *lun;
4707
4708         ctl_softc = control_softc;
4709
4710         lun = (struct ctl_lun *)be_lun->ctl_lun;
4711
4712         mtx_lock(&ctl_softc->ctl_lock);
4713         lun->flags |= CTL_LUN_OFFLINE;
4714         mtx_unlock(&ctl_softc->ctl_lock);
4715
4716         return (0);
4717 }
4718
4719 int
4720 ctl_lun_online(struct ctl_be_lun *be_lun)
4721 {
4722         struct ctl_softc *ctl_softc;
4723         struct ctl_lun *lun;
4724
4725         ctl_softc = control_softc;
4726
4727         lun = (struct ctl_lun *)be_lun->ctl_lun;
4728
4729         mtx_lock(&ctl_softc->ctl_lock);
4730         lun->flags &= ~CTL_LUN_OFFLINE;
4731         mtx_unlock(&ctl_softc->ctl_lock);
4732
4733         return (0);
4734 }
4735
4736 int
4737 ctl_invalidate_lun(struct ctl_be_lun *be_lun)
4738 {
4739         struct ctl_softc *ctl_softc;
4740         struct ctl_lun *lun;
4741
4742         ctl_softc = control_softc;
4743
4744         lun = (struct ctl_lun *)be_lun->ctl_lun;
4745
4746         mtx_lock(&ctl_softc->ctl_lock);
4747
4748         /*
4749          * The LUN needs to be disabled before it can be marked invalid.
4750          */
4751         if ((lun->flags & CTL_LUN_DISABLED) == 0) {
4752                 mtx_unlock(&ctl_softc->ctl_lock);
4753                 return (-1);
4754         }
4755         /*
4756          * Mark the LUN invalid.
4757          */
4758         lun->flags |= CTL_LUN_INVALID;
4759
4760         /*
4761          * If there is nothing in the OOA queue, go ahead and free the LUN.
4762          * If we have something in the OOA queue, we'll free it when the
4763          * last I/O completes.
4764          */
4765         if (TAILQ_FIRST(&lun->ooa_queue) == NULL)
4766                 ctl_free_lun(lun);
4767         mtx_unlock(&ctl_softc->ctl_lock);
4768
4769         return (0);
4770 }
4771
4772 int
4773 ctl_lun_inoperable(struct ctl_be_lun *be_lun)
4774 {
4775         struct ctl_softc *ctl_softc;
4776         struct ctl_lun *lun;
4777
4778         ctl_softc = control_softc;
4779         lun = (struct ctl_lun *)be_lun->ctl_lun;
4780
4781         mtx_lock(&ctl_softc->ctl_lock);
4782         lun->flags |= CTL_LUN_INOPERABLE;
4783         mtx_unlock(&ctl_softc->ctl_lock);
4784
4785         return (0);
4786 }
4787
4788 int
4789 ctl_lun_operable(struct ctl_be_lun *be_lun)
4790 {
4791         struct ctl_softc *ctl_softc;
4792         struct ctl_lun *lun;
4793
4794         ctl_softc = control_softc;
4795         lun = (struct ctl_lun *)be_lun->ctl_lun;
4796
4797         mtx_lock(&ctl_softc->ctl_lock);
4798         lun->flags &= ~CTL_LUN_INOPERABLE;
4799         mtx_unlock(&ctl_softc->ctl_lock);
4800
4801         return (0);
4802 }
4803
4804 int
4805 ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus,
4806                    int lock)
4807 {
4808         struct ctl_softc *softc;
4809         struct ctl_lun *lun;
4810         struct copan_aps_subpage *current_sp;
4811         struct ctl_page_index *page_index;
4812         int i;
4813
4814         softc = control_softc;
4815
4816         mtx_lock(&softc->ctl_lock);
4817
4818         lun = (struct ctl_lun *)be_lun->ctl_lun;
4819
4820         page_index = NULL;
4821         for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
4822                 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) !=
4823                      APS_PAGE_CODE)
4824                         continue;
4825
4826                 if (lun->mode_pages.index[i].subpage != APS_SUBPAGE_CODE)
4827                         continue;
4828                 page_index = &lun->mode_pages.index[i];
4829         }
4830
4831         if (page_index == NULL) {
4832                 mtx_unlock(&softc->ctl_lock);
4833                 printf("%s: APS subpage not found for lun %ju!\n", __func__,
4834                        (uintmax_t)lun->lun);
4835                 return (1);
4836         }
4837 #if 0
4838         if ((softc->aps_locked_lun != 0)
4839          && (softc->aps_locked_lun != lun->lun)) {
4840                 printf("%s: attempt to lock LUN %llu when %llu is already "
4841                        "locked\n");
4842                 mtx_unlock(&softc->ctl_lock);
4843                 return (1);
4844         }
4845 #endif
4846
4847         current_sp = (struct copan_aps_subpage *)(page_index->page_data +
4848                 (page_index->page_len * CTL_PAGE_CURRENT));
4849
4850         if (lock != 0) {
4851                 current_sp->lock_active = APS_LOCK_ACTIVE;
4852                 softc->aps_locked_lun = lun->lun;
4853         } else {
4854                 current_sp->lock_active = 0;
4855                 softc->aps_locked_lun = 0;
4856         }
4857
4858
4859         /*
4860          * If we're in HA mode, try to send the lock message to the other
4861          * side.
4862          */
4863         if (ctl_is_single == 0) {
4864                 int isc_retval;
4865                 union ctl_ha_msg lock_msg;
4866
4867                 lock_msg.hdr.nexus = *nexus;
4868                 lock_msg.hdr.msg_type = CTL_MSG_APS_LOCK;
4869                 if (lock != 0)
4870                         lock_msg.aps.lock_flag = 1;
4871                 else
4872                         lock_msg.aps.lock_flag = 0;
4873                 isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &lock_msg,
4874                                          sizeof(lock_msg), 0);
4875                 if (isc_retval > CTL_HA_STATUS_SUCCESS) {
4876                         printf("%s: APS (lock=%d) error returned from "
4877                                "ctl_ha_msg_send: %d\n", __func__, lock, isc_retval);
4878                         mtx_unlock(&softc->ctl_lock);
4879                         return (1);
4880                 }
4881         }
4882
4883         mtx_unlock(&softc->ctl_lock);
4884
4885         return (0);
4886 }
4887
4888 void
4889 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun)
4890 {
4891         struct ctl_lun *lun;
4892         struct ctl_softc *softc;
4893         int i;
4894
4895         softc = control_softc;
4896
4897         mtx_lock(&softc->ctl_lock);
4898
4899         lun = (struct ctl_lun *)be_lun->ctl_lun;
4900
4901         for (i = 0; i < CTL_MAX_INITIATORS; i++) 
4902                 lun->pending_sense[i].ua_pending |= CTL_UA_CAPACITY_CHANGED;
4903
4904         mtx_unlock(&softc->ctl_lock);
4905 }
4906
4907 /*
4908  * Backend "memory move is complete" callback for requests that never
4909  * make it down to say RAIDCore's configuration code.
4910  */
4911 int
4912 ctl_config_move_done(union ctl_io *io)
4913 {
4914         int retval;
4915
4916         retval = CTL_RETVAL_COMPLETE;
4917
4918
4919         CTL_DEBUG_PRINT(("ctl_config_move_done\n"));
4920         /*
4921          * XXX KDM this shouldn't happen, but what if it does?
4922          */
4923         if (io->io_hdr.io_type != CTL_IO_SCSI)
4924                 panic("I/O type isn't CTL_IO_SCSI!");
4925
4926         if ((io->io_hdr.port_status == 0)
4927          && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
4928          && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE))
4929                 io->io_hdr.status = CTL_SUCCESS;
4930         else if ((io->io_hdr.port_status != 0)
4931               && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
4932               && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)){
4933                 /*
4934                  * For hardware error sense keys, the sense key
4935                  * specific value is defined to be a retry count,
4936                  * but we use it to pass back an internal FETD
4937                  * error code.  XXX KDM  Hopefully the FETD is only
4938                  * using 16 bits for an error code, since that's
4939                  * all the space we have in the sks field.
4940                  */
4941                 ctl_set_internal_failure(&io->scsiio,
4942                                          /*sks_valid*/ 1,
4943                                          /*retry_count*/
4944                                          io->io_hdr.port_status);
4945                 free(io->scsiio.kern_data_ptr, M_CTL);
4946                 ctl_done(io);
4947                 goto bailout;
4948         }
4949
4950         if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN)
4951          || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
4952          || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) {
4953                 /*
4954                  * XXX KDM just assuming a single pointer here, and not a
4955                  * S/G list.  If we start using S/G lists for config data,
4956                  * we'll need to know how to clean them up here as well.
4957                  */
4958                 free(io->scsiio.kern_data_ptr, M_CTL);
4959                 /* Hopefully the user has already set the status... */
4960                 ctl_done(io);
4961         } else {
4962                 /*
4963                  * XXX KDM now we need to continue data movement.  Some
4964                  * options:
4965                  * - call ctl_scsiio() again?  We don't do this for data
4966                  *   writes, because for those at least we know ahead of
4967                  *   time where the write will go and how long it is.  For
4968                  *   config writes, though, that information is largely
4969                  *   contained within the write itself, thus we need to
4970                  *   parse out the data again.
4971                  *
4972                  * - Call some other function once the data is in?
4973                  */
4974
4975                 /*
4976                  * XXX KDM call ctl_scsiio() again for now, and check flag
4977                  * bits to see whether we're allocated or not.
4978                  */
4979                 retval = ctl_scsiio(&io->scsiio);
4980         }
4981 bailout:
4982         return (retval);
4983 }
4984
4985 /*
4986  * This gets called by a backend driver when it is done with a
4987  * configuration write.
4988  */
4989 void
4990 ctl_config_write_done(union ctl_io *io)
4991 {
4992         /*
4993          * If the IO_CONT flag is set, we need to call the supplied
4994          * function to continue processing the I/O, instead of completing
4995          * the I/O just yet.
4996          *
4997          * If there is an error, though, we don't want to keep processing.
4998          * Instead, just send status back to the initiator.
4999          */
5000         if ((io->io_hdr.flags & CTL_FLAG_IO_CONT)
5001          && (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)
5002           || ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS))) {
5003                 io->scsiio.io_cont(io);
5004                 return;
5005         }
5006         /*
5007          * Since a configuration write can be done for commands that actually
5008          * have data allocated, like write buffer, and commands that have
5009          * no data, like start/stop unit, we need to check here.
5010          */
5011         if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT)
5012                 free(io->scsiio.kern_data_ptr, M_CTL);
5013         ctl_done(io);
5014 }
5015
5016 /*
5017  * SCSI release command.
5018  */
5019 int
5020 ctl_scsi_release(struct ctl_scsiio *ctsio)
5021 {
5022         int length, longid, thirdparty_id, resv_id;
5023         struct ctl_softc *ctl_softc;
5024         struct ctl_lun *lun;
5025
5026         length = 0;
5027         resv_id = 0;
5028
5029         CTL_DEBUG_PRINT(("ctl_scsi_release\n"));
5030
5031         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5032         ctl_softc = control_softc;
5033
5034         switch (ctsio->cdb[0]) {
5035         case RELEASE: {
5036                 struct scsi_release *cdb;
5037
5038                 cdb = (struct scsi_release *)ctsio->cdb;
5039                 if ((cdb->byte2 & 0x1f) != 0) {
5040                         ctl_set_invalid_field(ctsio,
5041                                               /*sks_valid*/ 1,
5042                                               /*command*/ 1,
5043                                               /*field*/ 1,
5044                                               /*bit_valid*/ 0,
5045                                               /*bit*/ 0);
5046                         ctl_done((union ctl_io *)ctsio);
5047                         return (CTL_RETVAL_COMPLETE);
5048                 }
5049                 break;
5050         }
5051         case RELEASE_10: {
5052                 struct scsi_release_10 *cdb;
5053
5054                 cdb = (struct scsi_release_10 *)ctsio->cdb;
5055
5056                 if ((cdb->byte2 & SR10_EXTENT) != 0) {
5057                         ctl_set_invalid_field(ctsio,
5058                                               /*sks_valid*/ 1,
5059                                               /*command*/ 1,
5060                                               /*field*/ 1,
5061                                               /*bit_valid*/ 1,
5062                                               /*bit*/ 0);
5063                         ctl_done((union ctl_io *)ctsio);
5064                         return (CTL_RETVAL_COMPLETE);
5065
5066                 }
5067
5068                 if ((cdb->byte2 & SR10_3RDPTY) != 0) {
5069                         ctl_set_invalid_field(ctsio,
5070                                               /*sks_valid*/ 1,
5071                                               /*command*/ 1,
5072                                               /*field*/ 1,
5073                                               /*bit_valid*/ 1,
5074                                               /*bit*/ 4);
5075                         ctl_done((union ctl_io *)ctsio);
5076                         return (CTL_RETVAL_COMPLETE);
5077                 }
5078
5079                 if (cdb->byte2 & SR10_LONGID)
5080                         longid = 1;
5081                 else
5082                         thirdparty_id = cdb->thirdparty_id;
5083
5084                 resv_id = cdb->resv_id;
5085                 length = scsi_2btoul(cdb->length);
5086                 break;
5087         }
5088         }
5089
5090
5091         /*
5092          * XXX KDM right now, we only support LUN reservation.  We don't
5093          * support 3rd party reservations, or extent reservations, which
5094          * might actually need the parameter list.  If we've gotten this
5095          * far, we've got a LUN reservation.  Anything else got kicked out
5096          * above.  So, according to SPC, ignore the length.
5097          */
5098         length = 0;
5099
5100         if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
5101          && (length > 0)) {
5102                 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
5103                 ctsio->kern_data_len = length;
5104                 ctsio->kern_total_len = length;
5105                 ctsio->kern_data_resid = 0;
5106                 ctsio->kern_rel_offset = 0;
5107                 ctsio->kern_sg_entries = 0;
5108                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5109                 ctsio->be_move_done = ctl_config_move_done;
5110                 ctl_datamove((union ctl_io *)ctsio);
5111
5112                 return (CTL_RETVAL_COMPLETE);
5113         }
5114
5115         if (length > 0)
5116                 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr);
5117
5118         mtx_lock(&ctl_softc->ctl_lock);
5119
5120         /*
5121          * According to SPC, it is not an error for an intiator to attempt
5122          * to release a reservation on a LUN that isn't reserved, or that
5123          * is reserved by another initiator.  The reservation can only be
5124          * released, though, by the initiator who made it or by one of
5125          * several reset type events.
5126          */
5127         if (lun->flags & CTL_LUN_RESERVED) {
5128                 if ((ctsio->io_hdr.nexus.initid.id == lun->rsv_nexus.initid.id)
5129                  && (ctsio->io_hdr.nexus.targ_port == lun->rsv_nexus.targ_port)
5130                  && (ctsio->io_hdr.nexus.targ_target.id ==
5131                      lun->rsv_nexus.targ_target.id)) {
5132                         lun->flags &= ~CTL_LUN_RESERVED;
5133                 }
5134         }
5135
5136         ctsio->scsi_status = SCSI_STATUS_OK;
5137         ctsio->io_hdr.status = CTL_SUCCESS;
5138
5139         if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
5140                 free(ctsio->kern_data_ptr, M_CTL);
5141                 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
5142         }
5143
5144         mtx_unlock(&ctl_softc->ctl_lock);
5145
5146         ctl_done((union ctl_io *)ctsio);
5147         return (CTL_RETVAL_COMPLETE);
5148 }
5149
5150 int
5151 ctl_scsi_reserve(struct ctl_scsiio *ctsio)
5152 {
5153         int extent, thirdparty, longid;
5154         int resv_id, length;
5155         uint64_t thirdparty_id;
5156         struct ctl_softc *ctl_softc;
5157         struct ctl_lun *lun;
5158
5159         extent = 0;
5160         thirdparty = 0;
5161         longid = 0;
5162         resv_id = 0;
5163         length = 0;
5164         thirdparty_id = 0;
5165
5166         CTL_DEBUG_PRINT(("ctl_reserve\n"));
5167
5168         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5169         ctl_softc = control_softc;
5170
5171         switch (ctsio->cdb[0]) {
5172         case RESERVE: {
5173                 struct scsi_reserve *cdb;
5174
5175                 cdb = (struct scsi_reserve *)ctsio->cdb;
5176                 if ((cdb->byte2 & 0x1f) != 0) {
5177                         ctl_set_invalid_field(ctsio,
5178                                               /*sks_valid*/ 1,
5179                                               /*command*/ 1,
5180                                               /*field*/ 1,
5181                                               /*bit_valid*/ 0,
5182                                               /*bit*/ 0);
5183                         ctl_done((union ctl_io *)ctsio);
5184                         return (CTL_RETVAL_COMPLETE);
5185                 }
5186                 resv_id = cdb->resv_id;
5187                 length = scsi_2btoul(cdb->length);
5188                 break;
5189         }
5190         case RESERVE_10: {
5191                 struct scsi_reserve_10 *cdb;
5192
5193                 cdb = (struct scsi_reserve_10 *)ctsio->cdb;
5194
5195                 if ((cdb->byte2 & SR10_EXTENT) != 0) {
5196                         ctl_set_invalid_field(ctsio,
5197                                               /*sks_valid*/ 1,
5198                                               /*command*/ 1,
5199                                               /*field*/ 1,
5200                                               /*bit_valid*/ 1,
5201                                               /*bit*/ 0);
5202                         ctl_done((union ctl_io *)ctsio);
5203                         return (CTL_RETVAL_COMPLETE);
5204                 }
5205                 if ((cdb->byte2 & SR10_3RDPTY) != 0) {
5206                         ctl_set_invalid_field(ctsio,
5207                                               /*sks_valid*/ 1,
5208                                               /*command*/ 1,
5209                                               /*field*/ 1,
5210                                               /*bit_valid*/ 1,
5211                                               /*bit*/ 4);
5212                         ctl_done((union ctl_io *)ctsio);
5213                         return (CTL_RETVAL_COMPLETE);
5214                 }
5215                 if (cdb->byte2 & SR10_LONGID)
5216                         longid = 1;
5217                 else
5218                         thirdparty_id = cdb->thirdparty_id;
5219
5220                 resv_id = cdb->resv_id;
5221                 length = scsi_2btoul(cdb->length);
5222                 break;
5223         }
5224         }
5225
5226         /*
5227          * XXX KDM right now, we only support LUN reservation.  We don't
5228          * support 3rd party reservations, or extent reservations, which
5229          * might actually need the parameter list.  If we've gotten this
5230          * far, we've got a LUN reservation.  Anything else got kicked out
5231          * above.  So, according to SPC, ignore the length.
5232          */
5233         length = 0;
5234
5235         if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
5236          && (length > 0)) {
5237                 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
5238                 ctsio->kern_data_len = length;
5239                 ctsio->kern_total_len = length;
5240                 ctsio->kern_data_resid = 0;
5241                 ctsio->kern_rel_offset = 0;
5242                 ctsio->kern_sg_entries = 0;
5243                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5244                 ctsio->be_move_done = ctl_config_move_done;
5245                 ctl_datamove((union ctl_io *)ctsio);
5246
5247                 return (CTL_RETVAL_COMPLETE);
5248         }
5249
5250         if (length > 0)
5251                 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr);
5252
5253         mtx_lock(&ctl_softc->ctl_lock);
5254         if (lun->flags & CTL_LUN_RESERVED) {
5255                 if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id)
5256                  || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port)
5257                  || (ctsio->io_hdr.nexus.targ_target.id !=
5258                      lun->rsv_nexus.targ_target.id)) {
5259                         ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
5260                         ctsio->io_hdr.status = CTL_SCSI_ERROR;
5261                         goto bailout;
5262                 }
5263         }
5264
5265         lun->flags |= CTL_LUN_RESERVED;
5266         lun->rsv_nexus = ctsio->io_hdr.nexus;
5267
5268         ctsio->scsi_status = SCSI_STATUS_OK;
5269         ctsio->io_hdr.status = CTL_SUCCESS;
5270
5271 bailout:
5272         if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
5273                 free(ctsio->kern_data_ptr, M_CTL);
5274                 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
5275         }
5276
5277         mtx_unlock(&ctl_softc->ctl_lock);
5278
5279         ctl_done((union ctl_io *)ctsio);
5280         return (CTL_RETVAL_COMPLETE);
5281 }
5282
5283 int
5284 ctl_start_stop(struct ctl_scsiio *ctsio)
5285 {
5286         struct scsi_start_stop_unit *cdb;
5287         struct ctl_lun *lun;
5288         struct ctl_softc *ctl_softc;
5289         int retval;
5290
5291         CTL_DEBUG_PRINT(("ctl_start_stop\n"));
5292
5293         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5294         ctl_softc = control_softc;
5295         retval = 0;
5296
5297         cdb = (struct scsi_start_stop_unit *)ctsio->cdb;
5298
5299         /*
5300          * XXX KDM
5301          * We don't support the immediate bit on a stop unit.  In order to
5302          * do that, we would need to code up a way to know that a stop is
5303          * pending, and hold off any new commands until it completes, one
5304          * way or another.  Then we could accept or reject those commands
5305          * depending on its status.  We would almost need to do the reverse
5306          * of what we do below for an immediate start -- return the copy of
5307          * the ctl_io to the FETD with status to send to the host (and to
5308          * free the copy!) and then free the original I/O once the stop
5309          * actually completes.  That way, the OOA queue mechanism can work
5310          * to block commands that shouldn't proceed.  Another alternative
5311          * would be to put the copy in the queue in place of the original,
5312          * and return the original back to the caller.  That could be
5313          * slightly safer..
5314          */
5315         if ((cdb->byte2 & SSS_IMMED)
5316          && ((cdb->how & SSS_START) == 0)) {
5317                 ctl_set_invalid_field(ctsio,
5318                                       /*sks_valid*/ 1,
5319                                       /*command*/ 1,
5320                                       /*field*/ 1,
5321                                       /*bit_valid*/ 1,
5322                                       /*bit*/ 0);
5323                 ctl_done((union ctl_io *)ctsio);
5324                 return (CTL_RETVAL_COMPLETE);
5325         }
5326
5327         /*
5328          * We don't support the power conditions field.  We need to check
5329          * this prior to checking the load/eject and start/stop bits.
5330          */
5331         if ((cdb->how & SSS_PC_MASK) != SSS_PC_START_VALID) {
5332                 ctl_set_invalid_field(ctsio,
5333                                       /*sks_valid*/ 1,
5334                                       /*command*/ 1,
5335                                       /*field*/ 4,
5336                                       /*bit_valid*/ 1,
5337                                       /*bit*/ 4);
5338                 ctl_done((union ctl_io *)ctsio);
5339                 return (CTL_RETVAL_COMPLETE);
5340         }
5341
5342         /*
5343          * Media isn't removable, so we can't load or eject it.
5344          */
5345         if ((cdb->how & SSS_LOEJ) != 0) {
5346                 ctl_set_invalid_field(ctsio,
5347                                       /*sks_valid*/ 1,
5348                                       /*command*/ 1,
5349                                       /*field*/ 4,
5350                                       /*bit_valid*/ 1,
5351                                       /*bit*/ 1);
5352                 ctl_done((union ctl_io *)ctsio);
5353                 return (CTL_RETVAL_COMPLETE);
5354         }
5355
5356         if ((lun->flags & CTL_LUN_PR_RESERVED)
5357          && ((cdb->how & SSS_START)==0)) {
5358                 uint32_t residx;
5359
5360                 residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
5361                 if (!lun->per_res[residx].registered
5362                  || (lun->pr_res_idx!=residx && lun->res_type < 4)) {
5363
5364                         ctl_set_reservation_conflict(ctsio);
5365                         ctl_done((union ctl_io *)ctsio);
5366                         return (CTL_RETVAL_COMPLETE);
5367                 }
5368         }
5369
5370         /*
5371          * If there is no backend on this device, we can't start or stop
5372          * it.  In theory we shouldn't get any start/stop commands in the
5373          * first place at this level if the LUN doesn't have a backend.
5374          * That should get stopped by the command decode code.
5375          */
5376         if (lun->backend == NULL) {
5377                 ctl_set_invalid_opcode(ctsio);
5378                 ctl_done((union ctl_io *)ctsio);
5379                 return (CTL_RETVAL_COMPLETE);
5380         }
5381
5382         /*
5383          * XXX KDM Copan-specific offline behavior.
5384          * Figure out a reasonable way to port this?
5385          */
5386 #ifdef NEEDTOPORT
5387         mtx_lock(&ctl_softc->ctl_lock);
5388
5389         if (((cdb->byte2 & SSS_ONOFFLINE) == 0)
5390          && (lun->flags & CTL_LUN_OFFLINE)) {
5391                 /*
5392                  * If the LUN is offline, and the on/offline bit isn't set,
5393                  * reject the start or stop.  Otherwise, let it through.
5394                  */
5395                 mtx_unlock(&ctl_softc->ctl_lock);
5396                 ctl_set_lun_not_ready(ctsio);
5397                 ctl_done((union ctl_io *)ctsio);
5398         } else {
5399                 mtx_unlock(&ctl_softc->ctl_lock);
5400 #endif /* NEEDTOPORT */
5401                 /*
5402                  * This could be a start or a stop when we're online,
5403                  * or a stop/offline or start/online.  A start or stop when
5404                  * we're offline is covered in the case above.
5405                  */
5406                 /*
5407                  * In the non-immediate case, we send the request to
5408                  * the backend and return status to the user when
5409                  * it is done.
5410                  *
5411                  * In the immediate case, we allocate a new ctl_io
5412                  * to hold a copy of the request, and send that to
5413                  * the backend.  We then set good status on the
5414                  * user's request and return it immediately.
5415                  */
5416                 if (cdb->byte2 & SSS_IMMED) {
5417                         union ctl_io *new_io;
5418
5419                         new_io = ctl_alloc_io(ctsio->io_hdr.pool);
5420                         if (new_io == NULL) {
5421                                 ctl_set_busy(ctsio);
5422                                 ctl_done((union ctl_io *)ctsio);
5423                         } else {
5424                                 ctl_copy_io((union ctl_io *)ctsio,
5425                                             new_io);
5426                                 retval = lun->backend->config_write(new_io);
5427                                 ctl_set_success(ctsio);
5428                                 ctl_done((union ctl_io *)ctsio);
5429                         }
5430                 } else {
5431                         retval = lun->backend->config_write(
5432                                 (union ctl_io *)ctsio);
5433                 }
5434 #ifdef NEEDTOPORT
5435         }
5436 #endif
5437         return (retval);
5438 }
5439
5440 /*
5441  * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but
5442  * we don't really do anything with the LBA and length fields if the user
5443  * passes them in.  Instead we'll just flush out the cache for the entire
5444  * LUN.
5445  */
5446 int
5447 ctl_sync_cache(struct ctl_scsiio *ctsio)
5448 {
5449         struct ctl_lun *lun;
5450         struct ctl_softc *ctl_softc;
5451         uint64_t starting_lba;
5452         uint32_t block_count;
5453         int reladr, immed;
5454         int retval;
5455
5456         CTL_DEBUG_PRINT(("ctl_sync_cache\n"));
5457
5458         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5459         ctl_softc = control_softc;
5460         retval = 0;
5461         reladr = 0;
5462         immed = 0;
5463
5464         switch (ctsio->cdb[0]) {
5465         case SYNCHRONIZE_CACHE: {
5466                 struct scsi_sync_cache *cdb;
5467                 cdb = (struct scsi_sync_cache *)ctsio->cdb;
5468
5469                 if (cdb->byte2 & SSC_RELADR)
5470                         reladr = 1;
5471
5472                 if (cdb->byte2 & SSC_IMMED)
5473                         immed = 1;
5474
5475                 starting_lba = scsi_4btoul(cdb->begin_lba);
5476                 block_count = scsi_2btoul(cdb->lb_count);
5477                 break;
5478         }
5479         case SYNCHRONIZE_CACHE_16: {
5480                 struct scsi_sync_cache_16 *cdb;
5481                 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb;
5482
5483                 if (cdb->byte2 & SSC_RELADR)
5484                         reladr = 1;
5485
5486                 if (cdb->byte2 & SSC_IMMED)
5487                         immed = 1;
5488
5489                 starting_lba = scsi_8btou64(cdb->begin_lba);
5490                 block_count = scsi_4btoul(cdb->lb_count);
5491                 break;
5492         }
5493         default:
5494                 ctl_set_invalid_opcode(ctsio);
5495                 ctl_done((union ctl_io *)ctsio);
5496                 goto bailout;
5497                 break; /* NOTREACHED */
5498         }
5499
5500         if (immed) {
5501                 /*
5502                  * We don't support the immediate bit.  Since it's in the
5503                  * same place for the 10 and 16 byte SYNCHRONIZE CACHE
5504                  * commands, we can just return the same error in either
5505                  * case.
5506                  */
5507                 ctl_set_invalid_field(ctsio,
5508                                       /*sks_valid*/ 1,
5509                                       /*command*/ 1,
5510                                       /*field*/ 1,
5511                                       /*bit_valid*/ 1,
5512                                       /*bit*/ 1);
5513                 ctl_done((union ctl_io *)ctsio);
5514                 goto bailout;
5515         }
5516
5517         if (reladr) {
5518                 /*
5519                  * We don't support the reladr bit either.  It can only be
5520                  * used with linked commands, and we don't support linked
5521                  * commands.  Since the bit is in the same place for the
5522                  * 10 and 16 byte SYNCHRONIZE CACHE * commands, we can
5523                  * just return the same error in either case.
5524                  */
5525                 ctl_set_invalid_field(ctsio,
5526                                       /*sks_valid*/ 1,
5527                                       /*command*/ 1,
5528                                       /*field*/ 1,
5529                                       /*bit_valid*/ 1,
5530                                       /*bit*/ 0);
5531                 ctl_done((union ctl_io *)ctsio);
5532                 goto bailout;
5533         }
5534
5535         /*
5536          * We check the LBA and length, but don't do anything with them.
5537          * A SYNCHRONIZE CACHE will cause the entire cache for this lun to
5538          * get flushed.  This check will just help satisfy anyone who wants
5539          * to see an error for an out of range LBA.
5540          */
5541         if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) {
5542                 ctl_set_lba_out_of_range(ctsio);
5543                 ctl_done((union ctl_io *)ctsio);
5544                 goto bailout;
5545         }
5546
5547         /*
5548          * If this LUN has no backend, we can't flush the cache anyway.
5549          */
5550         if (lun->backend == NULL) {
5551                 ctl_set_invalid_opcode(ctsio);
5552                 ctl_done((union ctl_io *)ctsio);
5553                 goto bailout;
5554         }
5555
5556         /*
5557          * Check to see whether we're configured to send the SYNCHRONIZE
5558          * CACHE command directly to the back end.
5559          */
5560         mtx_lock(&ctl_softc->ctl_lock);
5561         if ((ctl_softc->flags & CTL_FLAG_REAL_SYNC)
5562          && (++(lun->sync_count) >= lun->sync_interval)) {
5563                 lun->sync_count = 0;
5564                 mtx_unlock(&ctl_softc->ctl_lock);
5565                 retval = lun->backend->config_write((union ctl_io *)ctsio);
5566         } else {
5567                 mtx_unlock(&ctl_softc->ctl_lock);
5568                 ctl_set_success(ctsio);
5569                 ctl_done((union ctl_io *)ctsio);
5570         }
5571
5572 bailout:
5573
5574         return (retval);
5575 }
5576
5577 int
5578 ctl_format(struct ctl_scsiio *ctsio)
5579 {
5580         struct scsi_format *cdb;
5581         struct ctl_lun *lun;
5582         struct ctl_softc *ctl_softc;
5583         int length, defect_list_len;
5584
5585         CTL_DEBUG_PRINT(("ctl_format\n"));
5586
5587         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5588         ctl_softc = control_softc;
5589
5590         cdb = (struct scsi_format *)ctsio->cdb;
5591
5592         length = 0;
5593         if (cdb->byte2 & SF_FMTDATA) {
5594                 if (cdb->byte2 & SF_LONGLIST)
5595                         length = sizeof(struct scsi_format_header_long);
5596                 else
5597                         length = sizeof(struct scsi_format_header_short);
5598         }
5599
5600         if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
5601          && (length > 0)) {
5602                 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
5603                 ctsio->kern_data_len = length;
5604                 ctsio->kern_total_len = length;
5605                 ctsio->kern_data_resid = 0;
5606                 ctsio->kern_rel_offset = 0;
5607                 ctsio->kern_sg_entries = 0;
5608                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5609                 ctsio->be_move_done = ctl_config_move_done;
5610                 ctl_datamove((union ctl_io *)ctsio);
5611
5612                 return (CTL_RETVAL_COMPLETE);
5613         }
5614
5615         defect_list_len = 0;
5616
5617         if (cdb->byte2 & SF_FMTDATA) {
5618                 if (cdb->byte2 & SF_LONGLIST) {
5619                         struct scsi_format_header_long *header;
5620
5621                         header = (struct scsi_format_header_long *)
5622                                 ctsio->kern_data_ptr;
5623
5624                         defect_list_len = scsi_4btoul(header->defect_list_len);
5625                         if (defect_list_len != 0) {
5626                                 ctl_set_invalid_field(ctsio,
5627                                                       /*sks_valid*/ 1,
5628                                                       /*command*/ 0,
5629                                                       /*field*/ 2,
5630                                                       /*bit_valid*/ 0,
5631                                                       /*bit*/ 0);
5632                                 goto bailout;
5633                         }
5634                 } else {
5635                         struct scsi_format_header_short *header;
5636
5637                         header = (struct scsi_format_header_short *)
5638                                 ctsio->kern_data_ptr;
5639
5640                         defect_list_len = scsi_2btoul(header->defect_list_len);
5641                         if (defect_list_len != 0) {
5642                                 ctl_set_invalid_field(ctsio,
5643                                                       /*sks_valid*/ 1,
5644                                                       /*command*/ 0,
5645                                                       /*field*/ 2,
5646                                                       /*bit_valid*/ 0,
5647                                                       /*bit*/ 0);
5648                                 goto bailout;
5649                         }
5650                 }
5651         }
5652
5653         /*
5654          * The format command will clear out the "Medium format corrupted"
5655          * status if set by the configuration code.  That status is really
5656          * just a way to notify the host that we have lost the media, and
5657          * get them to issue a command that will basically make them think
5658          * they're blowing away the media.
5659          */
5660         mtx_lock(&ctl_softc->ctl_lock);
5661         lun->flags &= ~CTL_LUN_INOPERABLE;
5662         mtx_unlock(&ctl_softc->ctl_lock);
5663
5664         ctsio->scsi_status = SCSI_STATUS_OK;
5665         ctsio->io_hdr.status = CTL_SUCCESS;
5666 bailout:
5667
5668         if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
5669                 free(ctsio->kern_data_ptr, M_CTL);
5670                 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
5671         }
5672
5673         ctl_done((union ctl_io *)ctsio);
5674         return (CTL_RETVAL_COMPLETE);
5675 }
5676
5677 int
5678 ctl_write_buffer(struct ctl_scsiio *ctsio)
5679 {
5680         struct scsi_write_buffer *cdb;
5681         struct copan_page_header *header;
5682         struct ctl_lun *lun;
5683         struct ctl_softc *ctl_softc;
5684         int buffer_offset, len;
5685         int retval;
5686
5687         header = NULL;
5688
5689         retval = CTL_RETVAL_COMPLETE;
5690
5691         CTL_DEBUG_PRINT(("ctl_write_buffer\n"));
5692
5693         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5694         ctl_softc = control_softc;
5695         cdb = (struct scsi_write_buffer *)ctsio->cdb;
5696
5697         if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) {
5698                 ctl_set_invalid_field(ctsio,
5699                                       /*sks_valid*/ 1,
5700                                       /*command*/ 1,
5701                                       /*field*/ 1,
5702                                       /*bit_valid*/ 1,
5703                                       /*bit*/ 4);
5704                 ctl_done((union ctl_io *)ctsio);
5705                 return (CTL_RETVAL_COMPLETE);
5706         }
5707         if (cdb->buffer_id != 0) {
5708                 ctl_set_invalid_field(ctsio,
5709                                       /*sks_valid*/ 1,
5710                                       /*command*/ 1,
5711                                       /*field*/ 2,
5712                                       /*bit_valid*/ 0,
5713                                       /*bit*/ 0);
5714                 ctl_done((union ctl_io *)ctsio);
5715                 return (CTL_RETVAL_COMPLETE);
5716         }
5717
5718         len = scsi_3btoul(cdb->length);
5719         buffer_offset = scsi_3btoul(cdb->offset);
5720
5721         if (len > sizeof(lun->write_buffer)) {
5722                 ctl_set_invalid_field(ctsio,
5723                                       /*sks_valid*/ 1,
5724                                       /*command*/ 1,
5725                                       /*field*/ 6,
5726                                       /*bit_valid*/ 0,
5727                                       /*bit*/ 0);
5728                 ctl_done((union ctl_io *)ctsio);
5729                 return (CTL_RETVAL_COMPLETE);
5730         }
5731
5732         if (buffer_offset != 0) {
5733                 ctl_set_invalid_field(ctsio,
5734                                       /*sks_valid*/ 1,
5735                                       /*command*/ 1,
5736                                       /*field*/ 3,
5737                                       /*bit_valid*/ 0,
5738                                       /*bit*/ 0);
5739                 ctl_done((union ctl_io *)ctsio);
5740                 return (CTL_RETVAL_COMPLETE);
5741         }
5742
5743         /*
5744          * If we've got a kernel request that hasn't been malloced yet,
5745          * malloc it and tell the caller the data buffer is here.
5746          */
5747         if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
5748                 ctsio->kern_data_ptr = lun->write_buffer;
5749                 ctsio->kern_data_len = len;
5750                 ctsio->kern_total_len = len;
5751                 ctsio->kern_data_resid = 0;
5752                 ctsio->kern_rel_offset = 0;
5753                 ctsio->kern_sg_entries = 0;
5754                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5755                 ctsio->be_move_done = ctl_config_move_done;
5756                 ctl_datamove((union ctl_io *)ctsio);
5757
5758                 return (CTL_RETVAL_COMPLETE);
5759         }
5760
5761         ctl_done((union ctl_io *)ctsio);
5762
5763         return (CTL_RETVAL_COMPLETE);
5764 }
5765
5766 /*
5767  * Note that this function currently doesn't actually do anything inside
5768  * CTL to enforce things if the DQue bit is turned on.
5769  *
5770  * Also note that this function can't be used in the default case, because
5771  * the DQue bit isn't set in the changeable mask for the control mode page
5772  * anyway.  This is just here as an example for how to implement a page
5773  * handler, and a placeholder in case we want to allow the user to turn
5774  * tagged queueing on and off.
5775  *
5776  * The D_SENSE bit handling is functional, however, and will turn
5777  * descriptor sense on and off for a given LUN.
5778  */
5779 int
5780 ctl_control_page_handler(struct ctl_scsiio *ctsio,
5781                          struct ctl_page_index *page_index, uint8_t *page_ptr)
5782 {
5783         struct scsi_control_page *current_cp, *saved_cp, *user_cp;
5784         struct ctl_lun *lun;
5785         struct ctl_softc *softc;
5786         int set_ua;
5787         uint32_t initidx;
5788
5789         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5790         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5791         set_ua = 0;
5792
5793         user_cp = (struct scsi_control_page *)page_ptr;
5794         current_cp = (struct scsi_control_page *)
5795                 (page_index->page_data + (page_index->page_len *
5796                 CTL_PAGE_CURRENT));
5797         saved_cp = (struct scsi_control_page *)
5798                 (page_index->page_data + (page_index->page_len *
5799                 CTL_PAGE_SAVED));
5800
5801         softc = control_softc;
5802
5803         mtx_lock(&softc->ctl_lock);
5804         if (((current_cp->rlec & SCP_DSENSE) == 0)
5805          && ((user_cp->rlec & SCP_DSENSE) != 0)) {
5806                 /*
5807                  * Descriptor sense is currently turned off and the user
5808                  * wants to turn it on.
5809                  */
5810                 current_cp->rlec |= SCP_DSENSE;
5811                 saved_cp->rlec |= SCP_DSENSE;
5812                 lun->flags |= CTL_LUN_SENSE_DESC;
5813                 set_ua = 1;
5814         } else if (((current_cp->rlec & SCP_DSENSE) != 0)
5815                 && ((user_cp->rlec & SCP_DSENSE) == 0)) {
5816                 /*
5817                  * Descriptor sense is currently turned on, and the user
5818                  * wants to turn it off.
5819                  */
5820                 current_cp->rlec &= ~SCP_DSENSE;
5821                 saved_cp->rlec &= ~SCP_DSENSE;
5822                 lun->flags &= ~CTL_LUN_SENSE_DESC;
5823                 set_ua = 1;
5824         }
5825         if (current_cp->queue_flags & SCP_QUEUE_DQUE) {
5826                 if (user_cp->queue_flags & SCP_QUEUE_DQUE) {
5827 #ifdef NEEDTOPORT
5828                         csevent_log(CSC_CTL | CSC_SHELF_SW |
5829                                     CTL_UNTAG_TO_UNTAG,
5830                                     csevent_LogType_Trace,
5831                                     csevent_Severity_Information,
5832                                     csevent_AlertLevel_Green,
5833                                     csevent_FRU_Firmware,
5834                                     csevent_FRU_Unknown,
5835                                     "Received untagged to untagged transition");
5836 #endif /* NEEDTOPORT */
5837                 } else {
5838 #ifdef NEEDTOPORT
5839                         csevent_log(CSC_CTL | CSC_SHELF_SW |
5840                                     CTL_UNTAG_TO_TAG,
5841                                     csevent_LogType_ConfigChange,
5842                                     csevent_Severity_Information,
5843                                     csevent_AlertLevel_Green,
5844                                     csevent_FRU_Firmware,
5845                                     csevent_FRU_Unknown,
5846                                     "Received untagged to tagged "
5847                                     "queueing transition");
5848 #endif /* NEEDTOPORT */
5849
5850                         current_cp->queue_flags &= ~SCP_QUEUE_DQUE;
5851                         saved_cp->queue_flags &= ~SCP_QUEUE_DQUE;
5852                         set_ua = 1;
5853                 }
5854         } else {
5855                 if (user_cp->queue_flags & SCP_QUEUE_DQUE) {
5856 #ifdef NEEDTOPORT
5857                         csevent_log(CSC_CTL | CSC_SHELF_SW |
5858                                     CTL_TAG_TO_UNTAG,
5859                                     csevent_LogType_ConfigChange,
5860                                     csevent_Severity_Warning,
5861                                     csevent_AlertLevel_Yellow,
5862                                     csevent_FRU_Firmware,
5863                                     csevent_FRU_Unknown,
5864                                     "Received tagged queueing to untagged "
5865                                     "transition");
5866 #endif /* NEEDTOPORT */
5867
5868                         current_cp->queue_flags |= SCP_QUEUE_DQUE;
5869                         saved_cp->queue_flags |= SCP_QUEUE_DQUE;
5870                         set_ua = 1;
5871                 } else {
5872 #ifdef NEEDTOPORT
5873                         csevent_log(CSC_CTL | CSC_SHELF_SW |
5874                                     CTL_TAG_TO_TAG,
5875                                     csevent_LogType_Trace,
5876                                     csevent_Severity_Information,
5877                                     csevent_AlertLevel_Green,
5878                                     csevent_FRU_Firmware,
5879                                     csevent_FRU_Unknown,
5880                                     "Received tagged queueing to tagged "
5881                                     "queueing transition");
5882 #endif /* NEEDTOPORT */
5883                 }
5884         }
5885         if (set_ua != 0) {
5886                 int i;
5887                 /*
5888                  * Let other initiators know that the mode
5889                  * parameters for this LUN have changed.
5890                  */
5891                 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
5892                         if (i == initidx)
5893                                 continue;
5894
5895                         lun->pending_sense[i].ua_pending |=
5896                                 CTL_UA_MODE_CHANGE;
5897                 }
5898         }
5899         mtx_unlock(&softc->ctl_lock);
5900
5901         return (0);
5902 }
5903
5904 int
5905 ctl_power_sp_handler(struct ctl_scsiio *ctsio,
5906                      struct ctl_page_index *page_index, uint8_t *page_ptr)
5907 {
5908         return (0);
5909 }
5910
5911 int
5912 ctl_power_sp_sense_handler(struct ctl_scsiio *ctsio,
5913                            struct ctl_page_index *page_index, int pc)
5914 {
5915         struct copan_power_subpage *page;
5916
5917         page = (struct copan_power_subpage *)page_index->page_data +
5918                 (page_index->page_len * pc);
5919
5920         switch (pc) {
5921         case SMS_PAGE_CTRL_CHANGEABLE >> 6:
5922                 /*
5923                  * We don't update the changable bits for this page.
5924                  */
5925                 break;
5926         case SMS_PAGE_CTRL_CURRENT >> 6:
5927         case SMS_PAGE_CTRL_DEFAULT >> 6:
5928         case SMS_PAGE_CTRL_SAVED >> 6:
5929 #ifdef NEEDTOPORT
5930                 ctl_update_power_subpage(page);
5931 #endif
5932                 break;
5933         default:
5934 #ifdef NEEDTOPORT
5935                 EPRINT(0, "Invalid PC %d!!", pc);
5936 #endif
5937                 break;
5938         }
5939         return (0);
5940 }
5941
5942
5943 int
5944 ctl_aps_sp_handler(struct ctl_scsiio *ctsio,
5945                    struct ctl_page_index *page_index, uint8_t *page_ptr)
5946 {
5947         struct copan_aps_subpage *user_sp;
5948         struct copan_aps_subpage *current_sp;
5949         union ctl_modepage_info *modepage_info;
5950         struct ctl_softc *softc;
5951         struct ctl_lun *lun;
5952         int retval;
5953
5954         retval = CTL_RETVAL_COMPLETE;
5955         current_sp = (struct copan_aps_subpage *)(page_index->page_data +
5956                      (page_index->page_len * CTL_PAGE_CURRENT));
5957         softc = control_softc;
5958         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5959
5960         user_sp = (struct copan_aps_subpage *)page_ptr;
5961
5962         modepage_info = (union ctl_modepage_info *)
5963                 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
5964
5965         modepage_info->header.page_code = page_index->page_code & SMPH_PC_MASK;
5966         modepage_info->header.subpage = page_index->subpage;
5967         modepage_info->aps.lock_active = user_sp->lock_active;
5968
5969         mtx_lock(&softc->ctl_lock);
5970
5971         /*
5972          * If there is a request to lock the LUN and another LUN is locked
5973          * this is an error. If the requested LUN is already locked ignore
5974          * the request. If no LUN is locked attempt to lock it.
5975          * if there is a request to unlock the LUN and the LUN is currently
5976          * locked attempt to unlock it. Otherwise ignore the request. i.e.
5977          * if another LUN is locked or no LUN is locked.
5978          */
5979         if (user_sp->lock_active & APS_LOCK_ACTIVE) {
5980                 if (softc->aps_locked_lun == lun->lun) {
5981                         /*
5982                          * This LUN is already locked, so we're done.
5983                          */
5984                         retval = CTL_RETVAL_COMPLETE;
5985                 } else if (softc->aps_locked_lun == 0) {
5986                         /*
5987                          * No one has the lock, pass the request to the
5988                          * backend.
5989                          */
5990                         retval = lun->backend->config_write(
5991                                 (union ctl_io *)ctsio);
5992                 } else {
5993                         /*
5994                          * Someone else has the lock, throw out the request.
5995                          */
5996                         ctl_set_already_locked(ctsio);
5997                         free(ctsio->kern_data_ptr, M_CTL);
5998                         ctl_done((union ctl_io *)ctsio);
5999
6000                         /*
6001                          * Set the return value so that ctl_do_mode_select()
6002                          * won't try to complete the command.  We already
6003                          * completed it here.
6004                          */
6005                         retval = CTL_RETVAL_ERROR;
6006                 }
6007         } else if (softc->aps_locked_lun == lun->lun) {
6008                 /*
6009                  * This LUN is locked, so pass the unlock request to the
6010                  * backend.
6011                  */
6012                 retval = lun->backend->config_write((union ctl_io *)ctsio);
6013         }
6014         mtx_unlock(&softc->ctl_lock);
6015
6016         return (retval);
6017 }
6018
6019 int
6020 ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio,
6021                                 struct ctl_page_index *page_index,
6022                                 uint8_t *page_ptr)
6023 {
6024         uint8_t *c;
6025         int i;
6026
6027         c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs;
6028         ctl_time_io_secs =
6029                 (c[0] << 8) |
6030                 (c[1] << 0) |
6031                 0;
6032         CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs));
6033         printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs);
6034         printf("page data:");
6035         for (i=0; i<8; i++)
6036                 printf(" %.2x",page_ptr[i]);
6037         printf("\n");
6038         return (0);
6039 }
6040
6041 int
6042 ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio,
6043                                struct ctl_page_index *page_index,
6044                                int pc)
6045 {
6046         struct copan_debugconf_subpage *page;
6047
6048         page = (struct copan_debugconf_subpage *)page_index->page_data +
6049                 (page_index->page_len * pc);
6050
6051         switch (pc) {
6052         case SMS_PAGE_CTRL_CHANGEABLE >> 6:
6053         case SMS_PAGE_CTRL_DEFAULT >> 6:
6054         case SMS_PAGE_CTRL_SAVED >> 6:
6055                 /*
6056                  * We don't update the changable or default bits for this page.
6057                  */
6058                 break;
6059         case SMS_PAGE_CTRL_CURRENT >> 6:
6060                 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8;
6061                 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0;
6062                 break;
6063         default:
6064 #ifdef NEEDTOPORT
6065                 EPRINT(0, "Invalid PC %d!!", pc);
6066 #endif /* NEEDTOPORT */
6067                 break;
6068         }
6069         return (0);
6070 }
6071
6072
6073 static int
6074 ctl_do_mode_select(union ctl_io *io)
6075 {
6076         struct scsi_mode_page_header *page_header;
6077         struct ctl_page_index *page_index;
6078         struct ctl_scsiio *ctsio;
6079         int control_dev, page_len;
6080         int page_len_offset, page_len_size;
6081         union ctl_modepage_info *modepage_info;
6082         struct ctl_lun *lun;
6083         int *len_left, *len_used;
6084         int retval, i;
6085
6086         ctsio = &io->scsiio;
6087         page_index = NULL;
6088         page_len = 0;
6089         retval = CTL_RETVAL_COMPLETE;
6090
6091         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6092
6093         if (lun->be_lun->lun_type != T_DIRECT)
6094                 control_dev = 1;
6095         else
6096                 control_dev = 0;
6097
6098         modepage_info = (union ctl_modepage_info *)
6099                 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
6100         len_left = &modepage_info->header.len_left;
6101         len_used = &modepage_info->header.len_used;
6102
6103 do_next_page:
6104
6105         page_header = (struct scsi_mode_page_header *)
6106                 (ctsio->kern_data_ptr + *len_used);
6107
6108         if (*len_left == 0) {
6109                 free(ctsio->kern_data_ptr, M_CTL);
6110                 ctl_set_success(ctsio);
6111                 ctl_done((union ctl_io *)ctsio);
6112                 return (CTL_RETVAL_COMPLETE);
6113         } else if (*len_left < sizeof(struct scsi_mode_page_header)) {
6114
6115                 free(ctsio->kern_data_ptr, M_CTL);
6116                 ctl_set_param_len_error(ctsio);
6117                 ctl_done((union ctl_io *)ctsio);
6118                 return (CTL_RETVAL_COMPLETE);
6119
6120         } else if ((page_header->page_code & SMPH_SPF)
6121                 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) {
6122
6123                 free(ctsio->kern_data_ptr, M_CTL);
6124                 ctl_set_param_len_error(ctsio);
6125                 ctl_done((union ctl_io *)ctsio);
6126                 return (CTL_RETVAL_COMPLETE);
6127         }
6128
6129
6130         /*
6131          * XXX KDM should we do something with the block descriptor?
6132          */
6133         for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6134
6135                 if ((control_dev != 0)
6136                  && (lun->mode_pages.index[i].page_flags &
6137                      CTL_PAGE_FLAG_DISK_ONLY))
6138                         continue;
6139
6140                 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) !=
6141                     (page_header->page_code & SMPH_PC_MASK))
6142                         continue;
6143
6144                 /*
6145                  * If neither page has a subpage code, then we've got a
6146                  * match.
6147                  */
6148                 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0)
6149                  && ((page_header->page_code & SMPH_SPF) == 0)) {
6150                         page_index = &lun->mode_pages.index[i];
6151                         page_len = page_header->page_length;
6152                         break;
6153                 }
6154
6155                 /*
6156                  * If both pages have subpages, then the subpage numbers
6157                  * have to match.
6158                  */
6159                 if ((lun->mode_pages.index[i].page_code & SMPH_SPF)
6160                   && (page_header->page_code & SMPH_SPF)) {
6161                         struct scsi_mode_page_header_sp *sph;
6162
6163                         sph = (struct scsi_mode_page_header_sp *)page_header;
6164
6165                         if (lun->mode_pages.index[i].subpage ==
6166                             sph->subpage) {
6167                                 page_index = &lun->mode_pages.index[i];
6168                                 page_len = scsi_2btoul(sph->page_length);
6169                                 break;
6170                         }
6171                 }
6172         }
6173
6174         /*
6175          * If we couldn't find the page, or if we don't have a mode select
6176          * handler for it, send back an error to the user.
6177          */
6178         if ((page_index == NULL)
6179          || (page_index->select_handler == NULL)) {
6180                 ctl_set_invalid_field(ctsio,
6181                                       /*sks_valid*/ 1,
6182                                       /*command*/ 0,
6183                                       /*field*/ *len_used,
6184                                       /*bit_valid*/ 0,
6185                                       /*bit*/ 0);
6186                 free(ctsio->kern_data_ptr, M_CTL);
6187                 ctl_done((union ctl_io *)ctsio);
6188                 return (CTL_RETVAL_COMPLETE);
6189         }
6190
6191         if (page_index->page_code & SMPH_SPF) {
6192                 page_len_offset = 2;
6193                 page_len_size = 2;
6194         } else {
6195                 page_len_size = 1;
6196                 page_len_offset = 1;
6197         }
6198
6199         /*
6200          * If the length the initiator gives us isn't the one we specify in
6201          * the mode page header, or if they didn't specify enough data in
6202          * the CDB to avoid truncating this page, kick out the request.
6203          */
6204         if ((page_len != (page_index->page_len - page_len_offset -
6205                           page_len_size))
6206          || (*len_left < page_index->page_len)) {
6207
6208
6209                 ctl_set_invalid_field(ctsio,
6210                                       /*sks_valid*/ 1,
6211                                       /*command*/ 0,
6212                                       /*field*/ *len_used + page_len_offset,
6213                                       /*bit_valid*/ 0,
6214                                       /*bit*/ 0);
6215                 free(ctsio->kern_data_ptr, M_CTL);
6216                 ctl_done((union ctl_io *)ctsio);
6217                 return (CTL_RETVAL_COMPLETE);
6218         }
6219
6220         /*
6221          * Run through the mode page, checking to make sure that the bits
6222          * the user changed are actually legal for him to change.
6223          */
6224         for (i = 0; i < page_index->page_len; i++) {
6225                 uint8_t *user_byte, *change_mask, *current_byte;
6226                 int bad_bit;
6227                 int j;
6228
6229                 user_byte = (uint8_t *)page_header + i;
6230                 change_mask = page_index->page_data +
6231                               (page_index->page_len * CTL_PAGE_CHANGEABLE) + i;
6232                 current_byte = page_index->page_data +
6233                                (page_index->page_len * CTL_PAGE_CURRENT) + i;
6234
6235                 /*
6236                  * Check to see whether the user set any bits in this byte
6237                  * that he is not allowed to set.
6238                  */
6239                 if ((*user_byte & ~(*change_mask)) ==
6240                     (*current_byte & ~(*change_mask)))
6241                         continue;
6242
6243                 /*
6244                  * Go through bit by bit to determine which one is illegal.
6245                  */
6246                 bad_bit = 0;
6247                 for (j = 7; j >= 0; j--) {
6248                         if ((((1 << i) & ~(*change_mask)) & *user_byte) !=
6249                             (((1 << i) & ~(*change_mask)) & *current_byte)) {
6250                                 bad_bit = i;
6251                                 break;
6252                         }
6253                 }
6254                 ctl_set_invalid_field(ctsio,
6255                                       /*sks_valid*/ 1,
6256                                       /*command*/ 0,
6257                                       /*field*/ *len_used + i,
6258                                       /*bit_valid*/ 1,
6259                                       /*bit*/ bad_bit);
6260                 free(ctsio->kern_data_ptr, M_CTL);
6261                 ctl_done((union ctl_io *)ctsio);
6262                 return (CTL_RETVAL_COMPLETE);
6263         }
6264
6265         /*
6266          * Decrement these before we call the page handler, since we may
6267          * end up getting called back one way or another before the handler
6268          * returns to this context.
6269          */
6270         *len_left -= page_index->page_len;
6271         *len_used += page_index->page_len;
6272
6273         retval = page_index->select_handler(ctsio, page_index,
6274                                             (uint8_t *)page_header);
6275
6276         /*
6277          * If the page handler returns CTL_RETVAL_QUEUED, then we need to
6278          * wait until this queued command completes to finish processing
6279          * the mode page.  If it returns anything other than
6280          * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have
6281          * already set the sense information, freed the data pointer, and
6282          * completed the io for us.
6283          */
6284         if (retval != CTL_RETVAL_COMPLETE)
6285                 goto bailout_no_done;
6286
6287         /*
6288          * If the initiator sent us more than one page, parse the next one.
6289          */
6290         if (*len_left > 0)
6291                 goto do_next_page;
6292
6293         ctl_set_success(ctsio);
6294         free(ctsio->kern_data_ptr, M_CTL);
6295         ctl_done((union ctl_io *)ctsio);
6296
6297 bailout_no_done:
6298
6299         return (CTL_RETVAL_COMPLETE);
6300
6301 }
6302
6303 int
6304 ctl_mode_select(struct ctl_scsiio *ctsio)
6305 {
6306         int param_len, pf, sp;
6307         int header_size, bd_len;
6308         int len_left, len_used;
6309         struct ctl_page_index *page_index;
6310         struct ctl_lun *lun;
6311         int control_dev, page_len;
6312         union ctl_modepage_info *modepage_info;
6313         int retval;
6314
6315         pf = 0;
6316         sp = 0;
6317         page_len = 0;
6318         len_used = 0;
6319         len_left = 0;
6320         retval = 0;
6321         bd_len = 0;
6322         page_index = NULL;
6323
6324         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6325
6326         if (lun->be_lun->lun_type != T_DIRECT)
6327                 control_dev = 1;
6328         else
6329                 control_dev = 0;
6330
6331         switch (ctsio->cdb[0]) {
6332         case MODE_SELECT_6: {
6333                 struct scsi_mode_select_6 *cdb;
6334
6335                 cdb = (struct scsi_mode_select_6 *)ctsio->cdb;
6336
6337                 pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
6338                 sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
6339
6340                 param_len = cdb->length;
6341                 header_size = sizeof(struct scsi_mode_header_6);
6342                 break;
6343         }
6344         case MODE_SELECT_10: {
6345                 struct scsi_mode_select_10 *cdb;
6346
6347                 cdb = (struct scsi_mode_select_10 *)ctsio->cdb;
6348
6349                 pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
6350                 sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
6351
6352                 param_len = scsi_2btoul(cdb->length);
6353                 header_size = sizeof(struct scsi_mode_header_10);
6354                 break;
6355         }
6356         default:
6357                 ctl_set_invalid_opcode(ctsio);
6358                 ctl_done((union ctl_io *)ctsio);
6359                 return (CTL_RETVAL_COMPLETE);
6360                 break; /* NOTREACHED */
6361         }
6362
6363         /*
6364          * From SPC-3:
6365          * "A parameter list length of zero indicates that the Data-Out Buffer
6366          * shall be empty. This condition shall not be considered as an error."
6367          */
6368         if (param_len == 0) {
6369                 ctl_set_success(ctsio);
6370                 ctl_done((union ctl_io *)ctsio);
6371                 return (CTL_RETVAL_COMPLETE);
6372         }
6373
6374         /*
6375          * Since we'll hit this the first time through, prior to
6376          * allocation, we don't need to free a data buffer here.
6377          */
6378         if (param_len < header_size) {
6379                 ctl_set_param_len_error(ctsio);
6380                 ctl_done((union ctl_io *)ctsio);
6381                 return (CTL_RETVAL_COMPLETE);
6382         }
6383
6384         /*
6385          * Allocate the data buffer and grab the user's data.  In theory,
6386          * we shouldn't have to sanity check the parameter list length here
6387          * because the maximum size is 64K.  We should be able to malloc
6388          * that much without too many problems.
6389          */
6390         if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
6391                 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
6392                 ctsio->kern_data_len = param_len;
6393                 ctsio->kern_total_len = param_len;
6394                 ctsio->kern_data_resid = 0;
6395                 ctsio->kern_rel_offset = 0;
6396                 ctsio->kern_sg_entries = 0;
6397                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6398                 ctsio->be_move_done = ctl_config_move_done;
6399                 ctl_datamove((union ctl_io *)ctsio);
6400
6401                 return (CTL_RETVAL_COMPLETE);
6402         }
6403
6404         switch (ctsio->cdb[0]) {
6405         case MODE_SELECT_6: {
6406                 struct scsi_mode_header_6 *mh6;
6407
6408                 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr;
6409                 bd_len = mh6->blk_desc_len;
6410                 break;
6411         }
6412         case MODE_SELECT_10: {
6413                 struct scsi_mode_header_10 *mh10;
6414
6415                 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr;
6416                 bd_len = scsi_2btoul(mh10->blk_desc_len);
6417                 break;
6418         }
6419         default:
6420                 panic("Invalid CDB type %#x", ctsio->cdb[0]);
6421                 break;
6422         }
6423
6424         if (param_len < (header_size + bd_len)) {
6425                 free(ctsio->kern_data_ptr, M_CTL);
6426                 ctl_set_param_len_error(ctsio);
6427                 ctl_done((union ctl_io *)ctsio);
6428                 return (CTL_RETVAL_COMPLETE);
6429         }
6430
6431         /*
6432          * Set the IO_CONT flag, so that if this I/O gets passed to
6433          * ctl_config_write_done(), it'll get passed back to
6434          * ctl_do_mode_select() for further processing, or completion if
6435          * we're all done.
6436          */
6437         ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT;
6438         ctsio->io_cont = ctl_do_mode_select;
6439
6440         modepage_info = (union ctl_modepage_info *)
6441                 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
6442
6443         memset(modepage_info, 0, sizeof(*modepage_info));
6444
6445         len_left = param_len - header_size - bd_len;
6446         len_used = header_size + bd_len;
6447
6448         modepage_info->header.len_left = len_left;
6449         modepage_info->header.len_used = len_used;
6450
6451         return (ctl_do_mode_select((union ctl_io *)ctsio));
6452 }
6453
6454 int
6455 ctl_mode_sense(struct ctl_scsiio *ctsio)
6456 {
6457         struct ctl_lun *lun;
6458         int pc, page_code, dbd, llba, subpage;
6459         int alloc_len, page_len, header_len, total_len;
6460         struct scsi_mode_block_descr *block_desc;
6461         struct ctl_page_index *page_index;
6462         int control_dev;
6463
6464         dbd = 0;
6465         llba = 0;
6466         block_desc = NULL;
6467         page_index = NULL;
6468
6469         CTL_DEBUG_PRINT(("ctl_mode_sense\n"));
6470
6471         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6472
6473         if (lun->be_lun->lun_type != T_DIRECT)
6474                 control_dev = 1;
6475         else
6476                 control_dev = 0;
6477
6478         switch (ctsio->cdb[0]) {
6479         case MODE_SENSE_6: {
6480                 struct scsi_mode_sense_6 *cdb;
6481
6482                 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb;
6483
6484                 header_len = sizeof(struct scsi_mode_hdr_6);
6485                 if (cdb->byte2 & SMS_DBD)
6486                         dbd = 1;
6487                 else
6488                         header_len += sizeof(struct scsi_mode_block_descr);
6489
6490                 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6;
6491                 page_code = cdb->page & SMS_PAGE_CODE;
6492                 subpage = cdb->subpage;
6493                 alloc_len = cdb->length;
6494                 break;
6495         }
6496         case MODE_SENSE_10: {
6497                 struct scsi_mode_sense_10 *cdb;
6498
6499                 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb;
6500
6501                 header_len = sizeof(struct scsi_mode_hdr_10);
6502
6503                 if (cdb->byte2 & SMS_DBD)
6504                         dbd = 1;
6505                 else
6506                         header_len += sizeof(struct scsi_mode_block_descr);
6507                 if (cdb->byte2 & SMS10_LLBAA)
6508                         llba = 1;
6509                 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6;
6510                 page_code = cdb->page & SMS_PAGE_CODE;
6511                 subpage = cdb->subpage;
6512                 alloc_len = scsi_2btoul(cdb->length);
6513                 break;
6514         }
6515         default:
6516                 ctl_set_invalid_opcode(ctsio);
6517                 ctl_done((union ctl_io *)ctsio);
6518                 return (CTL_RETVAL_COMPLETE);
6519                 break; /* NOTREACHED */
6520         }
6521
6522         /*
6523          * We have to make a first pass through to calculate the size of
6524          * the pages that match the user's query.  Then we allocate enough
6525          * memory to hold it, and actually copy the data into the buffer.
6526          */
6527         switch (page_code) {
6528         case SMS_ALL_PAGES_PAGE: {
6529                 int i;
6530
6531                 page_len = 0;
6532
6533                 /*
6534                  * At the moment, values other than 0 and 0xff here are
6535                  * reserved according to SPC-3.
6536                  */
6537                 if ((subpage != SMS_SUBPAGE_PAGE_0)
6538                  && (subpage != SMS_SUBPAGE_ALL)) {
6539                         ctl_set_invalid_field(ctsio,
6540                                               /*sks_valid*/ 1,
6541                                               /*command*/ 1,
6542                                               /*field*/ 3,
6543                                               /*bit_valid*/ 0,
6544                                               /*bit*/ 0);
6545                         ctl_done((union ctl_io *)ctsio);
6546                         return (CTL_RETVAL_COMPLETE);
6547                 }
6548
6549                 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6550                         if ((control_dev != 0)
6551                          && (lun->mode_pages.index[i].page_flags &
6552                              CTL_PAGE_FLAG_DISK_ONLY))
6553                                 continue;
6554
6555                         /*
6556                          * We don't use this subpage if the user didn't
6557                          * request all subpages.
6558                          */
6559                         if ((lun->mode_pages.index[i].subpage != 0)
6560                          && (subpage == SMS_SUBPAGE_PAGE_0))
6561                                 continue;
6562
6563 #if 0
6564                         printf("found page %#x len %d\n",
6565                                lun->mode_pages.index[i].page_code &
6566                                SMPH_PC_MASK,
6567                                lun->mode_pages.index[i].page_len);
6568 #endif
6569                         page_len += lun->mode_pages.index[i].page_len;
6570                 }
6571                 break;
6572         }
6573         default: {
6574                 int i;
6575
6576                 page_len = 0;
6577
6578                 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6579                         /* Look for the right page code */
6580                         if ((lun->mode_pages.index[i].page_code &
6581                              SMPH_PC_MASK) != page_code)
6582                                 continue;
6583
6584                         /* Look for the right subpage or the subpage wildcard*/
6585                         if ((lun->mode_pages.index[i].subpage != subpage)
6586                          && (subpage != SMS_SUBPAGE_ALL))
6587                                 continue;
6588
6589                         /* Make sure the page is supported for this dev type */
6590                         if ((control_dev != 0)
6591                          && (lun->mode_pages.index[i].page_flags &
6592                              CTL_PAGE_FLAG_DISK_ONLY))
6593                                 continue;
6594
6595 #if 0
6596                         printf("found page %#x len %d\n",
6597                                lun->mode_pages.index[i].page_code &
6598                                SMPH_PC_MASK,
6599                                lun->mode_pages.index[i].page_len);
6600 #endif
6601
6602                         page_len += lun->mode_pages.index[i].page_len;
6603                 }
6604
6605                 if (page_len == 0) {
6606                         ctl_set_invalid_field(ctsio,
6607                                               /*sks_valid*/ 1,
6608                                               /*command*/ 1,
6609                                               /*field*/ 2,
6610                                               /*bit_valid*/ 1,
6611                                               /*bit*/ 5);
6612                         ctl_done((union ctl_io *)ctsio);
6613                         return (CTL_RETVAL_COMPLETE);
6614                 }
6615                 break;
6616         }
6617         }
6618
6619         total_len = header_len + page_len;
6620 #if 0
6621         printf("header_len = %d, page_len = %d, total_len = %d\n",
6622                header_len, page_len, total_len);
6623 #endif
6624
6625         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
6626         ctsio->kern_sg_entries = 0;
6627         ctsio->kern_data_resid = 0;
6628         ctsio->kern_rel_offset = 0;
6629         if (total_len < alloc_len) {
6630                 ctsio->residual = alloc_len - total_len;
6631                 ctsio->kern_data_len = total_len;
6632                 ctsio->kern_total_len = total_len;
6633         } else {
6634                 ctsio->residual = 0;
6635                 ctsio->kern_data_len = alloc_len;
6636                 ctsio->kern_total_len = alloc_len;
6637         }
6638
6639         switch (ctsio->cdb[0]) {
6640         case MODE_SENSE_6: {
6641                 struct scsi_mode_hdr_6 *header;
6642
6643                 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr;
6644
6645                 header->datalen = ctl_min(total_len - 1, 254);
6646
6647                 if (dbd)
6648                         header->block_descr_len = 0;
6649                 else
6650                         header->block_descr_len =
6651                                 sizeof(struct scsi_mode_block_descr);
6652                 block_desc = (struct scsi_mode_block_descr *)&header[1];
6653                 break;
6654         }
6655         case MODE_SENSE_10: {
6656                 struct scsi_mode_hdr_10 *header;
6657                 int datalen;
6658
6659                 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr;
6660
6661                 datalen = ctl_min(total_len - 2, 65533);
6662                 scsi_ulto2b(datalen, header->datalen);
6663                 if (dbd)
6664                         scsi_ulto2b(0, header->block_descr_len);
6665                 else
6666                         scsi_ulto2b(sizeof(struct scsi_mode_block_descr),
6667                                     header->block_descr_len);
6668                 block_desc = (struct scsi_mode_block_descr *)&header[1];
6669                 break;
6670         }
6671         default:
6672                 panic("invalid CDB type %#x", ctsio->cdb[0]);
6673                 break; /* NOTREACHED */
6674         }
6675
6676         /*
6677          * If we've got a disk, use its blocksize in the block
6678          * descriptor.  Otherwise, just set it to 0.
6679          */
6680         if (dbd == 0) {
6681                 if (control_dev != 0)
6682                         scsi_ulto3b(lun->be_lun->blocksize,
6683                                     block_desc->block_len);
6684                 else
6685                         scsi_ulto3b(0, block_desc->block_len);
6686         }
6687
6688         switch (page_code) {
6689         case SMS_ALL_PAGES_PAGE: {
6690                 int i, data_used;
6691
6692                 data_used = header_len;
6693                 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6694                         struct ctl_page_index *page_index;
6695
6696                         page_index = &lun->mode_pages.index[i];
6697
6698                         if ((control_dev != 0)
6699                          && (page_index->page_flags &
6700                             CTL_PAGE_FLAG_DISK_ONLY))
6701                                 continue;
6702
6703                         /*
6704                          * We don't use this subpage if the user didn't
6705                          * request all subpages.  We already checked (above)
6706                          * to make sure the user only specified a subpage
6707                          * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case.
6708                          */
6709                         if ((page_index->subpage != 0)
6710                          && (subpage == SMS_SUBPAGE_PAGE_0))
6711                                 continue;
6712
6713                         /*
6714                          * Call the handler, if it exists, to update the
6715                          * page to the latest values.
6716                          */
6717                         if (page_index->sense_handler != NULL)
6718                                 page_index->sense_handler(ctsio, page_index,pc);
6719
6720                         memcpy(ctsio->kern_data_ptr + data_used,
6721                                page_index->page_data +
6722                                (page_index->page_len * pc),
6723                                page_index->page_len);
6724                         data_used += page_index->page_len;
6725                 }
6726                 break;
6727         }
6728         default: {
6729                 int i, data_used;
6730
6731                 data_used = header_len;
6732
6733                 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6734                         struct ctl_page_index *page_index;
6735
6736                         page_index = &lun->mode_pages.index[i];
6737
6738                         /* Look for the right page code */
6739                         if ((page_index->page_code & SMPH_PC_MASK) != page_code)
6740                                 continue;
6741
6742                         /* Look for the right subpage or the subpage wildcard*/
6743                         if ((page_index->subpage != subpage)
6744                          && (subpage != SMS_SUBPAGE_ALL))
6745                                 continue;
6746
6747                         /* Make sure the page is supported for this dev type */
6748                         if ((control_dev != 0)
6749                          && (page_index->page_flags &
6750                              CTL_PAGE_FLAG_DISK_ONLY))
6751                                 continue;
6752
6753                         /*
6754                          * Call the handler, if it exists, to update the
6755                          * page to the latest values.
6756                          */
6757                         if (page_index->sense_handler != NULL)
6758                                 page_index->sense_handler(ctsio, page_index,pc);
6759
6760                         memcpy(ctsio->kern_data_ptr + data_used,
6761                                page_index->page_data +
6762                                (page_index->page_len * pc),
6763                                page_index->page_len);
6764                         data_used += page_index->page_len;
6765                 }
6766                 break;
6767         }
6768         }
6769
6770         ctsio->scsi_status = SCSI_STATUS_OK;
6771
6772         ctsio->be_move_done = ctl_config_move_done;
6773         ctl_datamove((union ctl_io *)ctsio);
6774
6775         return (CTL_RETVAL_COMPLETE);
6776 }
6777
6778 int
6779 ctl_read_capacity(struct ctl_scsiio *ctsio)
6780 {
6781         struct scsi_read_capacity *cdb;
6782         struct scsi_read_capacity_data *data;
6783         struct ctl_lun *lun;
6784         uint32_t lba;
6785
6786         CTL_DEBUG_PRINT(("ctl_read_capacity\n"));
6787
6788         cdb = (struct scsi_read_capacity *)ctsio->cdb;
6789
6790         lba = scsi_4btoul(cdb->addr);
6791         if (((cdb->pmi & SRC_PMI) == 0)
6792          && (lba != 0)) {
6793                 ctl_set_invalid_field(/*ctsio*/ ctsio,
6794                                       /*sks_valid*/ 1,
6795                                       /*command*/ 1,
6796                                       /*field*/ 2,
6797                                       /*bit_valid*/ 0,
6798                                       /*bit*/ 0);
6799                 ctl_done((union ctl_io *)ctsio);
6800                 return (CTL_RETVAL_COMPLETE);
6801         }
6802
6803         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6804
6805         ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
6806         data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr;
6807         ctsio->residual = 0;
6808         ctsio->kern_data_len = sizeof(*data);
6809         ctsio->kern_total_len = sizeof(*data);
6810         ctsio->kern_data_resid = 0;
6811         ctsio->kern_rel_offset = 0;
6812         ctsio->kern_sg_entries = 0;
6813
6814         /*
6815          * If the maximum LBA is greater than 0xfffffffe, the user must
6816          * issue a SERVICE ACTION IN (16) command, with the read capacity
6817          * serivce action set.
6818          */
6819         if (lun->be_lun->maxlba > 0xfffffffe)
6820                 scsi_ulto4b(0xffffffff, data->addr);
6821         else
6822                 scsi_ulto4b(lun->be_lun->maxlba, data->addr);
6823
6824         /*
6825          * XXX KDM this may not be 512 bytes...
6826          */
6827         scsi_ulto4b(lun->be_lun->blocksize, data->length);
6828
6829         ctsio->scsi_status = SCSI_STATUS_OK;
6830
6831         ctsio->be_move_done = ctl_config_move_done;
6832         ctl_datamove((union ctl_io *)ctsio);
6833
6834         return (CTL_RETVAL_COMPLETE);
6835 }
6836
6837 static int
6838 ctl_read_capacity_16(struct ctl_scsiio *ctsio)
6839 {
6840         struct scsi_read_capacity_16 *cdb;
6841         struct scsi_read_capacity_data_long *data;
6842         struct ctl_lun *lun;
6843         uint64_t lba;
6844         uint32_t alloc_len;
6845
6846         CTL_DEBUG_PRINT(("ctl_read_capacity_16\n"));
6847
6848         cdb = (struct scsi_read_capacity_16 *)ctsio->cdb;
6849
6850         alloc_len = scsi_4btoul(cdb->alloc_len);
6851         lba = scsi_8btou64(cdb->addr);
6852
6853         if ((cdb->reladr & SRC16_PMI)
6854          && (lba != 0)) {
6855                 ctl_set_invalid_field(/*ctsio*/ ctsio,
6856                                       /*sks_valid*/ 1,
6857                                       /*command*/ 1,
6858                                       /*field*/ 2,
6859                                       /*bit_valid*/ 0,
6860                                       /*bit*/ 0);
6861                 ctl_done((union ctl_io *)ctsio);
6862                 return (CTL_RETVAL_COMPLETE);
6863         }
6864
6865         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6866
6867         ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
6868         data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr;
6869
6870         if (sizeof(*data) < alloc_len) {
6871                 ctsio->residual = alloc_len - sizeof(*data);
6872                 ctsio->kern_data_len = sizeof(*data);
6873                 ctsio->kern_total_len = sizeof(*data);
6874         } else {
6875                 ctsio->residual = 0;
6876                 ctsio->kern_data_len = alloc_len;
6877                 ctsio->kern_total_len = alloc_len;
6878         }
6879         ctsio->kern_data_resid = 0;
6880         ctsio->kern_rel_offset = 0;
6881         ctsio->kern_sg_entries = 0;
6882
6883         scsi_u64to8b(lun->be_lun->maxlba, data->addr);
6884         /* XXX KDM this may not be 512 bytes... */
6885         scsi_ulto4b(lun->be_lun->blocksize, data->length);
6886
6887         ctsio->scsi_status = SCSI_STATUS_OK;
6888
6889         ctsio->be_move_done = ctl_config_move_done;
6890         ctl_datamove((union ctl_io *)ctsio);
6891
6892         return (CTL_RETVAL_COMPLETE);
6893 }
6894
6895 int
6896 ctl_service_action_in(struct ctl_scsiio *ctsio)
6897 {
6898         struct scsi_service_action_in *cdb;
6899         int retval;
6900
6901         CTL_DEBUG_PRINT(("ctl_service_action_in\n"));
6902
6903         cdb = (struct scsi_service_action_in *)ctsio->cdb;
6904
6905         retval = CTL_RETVAL_COMPLETE;
6906
6907         switch (cdb->service_action) {
6908         case SRC16_SERVICE_ACTION:
6909                 retval = ctl_read_capacity_16(ctsio);
6910                 break;
6911         default:
6912                 ctl_set_invalid_field(/*ctsio*/ ctsio,
6913                                       /*sks_valid*/ 1,
6914                                       /*command*/ 1,
6915                                       /*field*/ 1,
6916                                       /*bit_valid*/ 1,
6917                                       /*bit*/ 4);
6918                 ctl_done((union ctl_io *)ctsio);
6919                 break;
6920         }
6921
6922         return (retval);
6923 }
6924
6925 int
6926 ctl_maintenance_in(struct ctl_scsiio *ctsio)
6927 {
6928         struct scsi_maintenance_in *cdb;
6929         int retval;
6930         int alloc_len, total_len = 0;
6931         int num_target_port_groups, single;
6932         struct ctl_lun *lun;
6933         struct ctl_softc *softc;
6934         struct scsi_target_group_data *rtg_ptr;
6935         struct scsi_target_port_group_descriptor *tpg_desc_ptr1, *tpg_desc_ptr2;
6936         struct scsi_target_port_descriptor  *tp_desc_ptr1_1, *tp_desc_ptr1_2,
6937                                             *tp_desc_ptr2_1, *tp_desc_ptr2_2;
6938
6939         CTL_DEBUG_PRINT(("ctl_maintenance_in\n"));
6940
6941         cdb = (struct scsi_maintenance_in *)ctsio->cdb;
6942         softc = control_softc;
6943         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6944
6945         retval = CTL_RETVAL_COMPLETE;
6946
6947         if ((cdb->byte2 & SERVICE_ACTION_MASK) != SA_RPRT_TRGT_GRP) {
6948                 ctl_set_invalid_field(/*ctsio*/ ctsio,
6949                                       /*sks_valid*/ 1,
6950                                       /*command*/ 1,
6951                                       /*field*/ 1,
6952                                       /*bit_valid*/ 1,
6953                                       /*bit*/ 4);
6954                 ctl_done((union ctl_io *)ctsio);
6955                 return(retval);
6956         }
6957
6958         mtx_lock(&softc->ctl_lock);
6959         single = ctl_is_single;
6960         mtx_unlock(&softc->ctl_lock);
6961
6962         if (single)
6963                 num_target_port_groups = NUM_TARGET_PORT_GROUPS - 1;
6964         else
6965                 num_target_port_groups = NUM_TARGET_PORT_GROUPS;
6966
6967         total_len = sizeof(struct scsi_target_group_data) +
6968                 sizeof(struct scsi_target_port_group_descriptor) *
6969                 num_target_port_groups +
6970                 sizeof(struct scsi_target_port_descriptor) *
6971                 NUM_PORTS_PER_GRP * num_target_port_groups;
6972
6973         alloc_len = scsi_4btoul(cdb->length);
6974
6975         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
6976
6977         ctsio->kern_sg_entries = 0;
6978
6979         if (total_len < alloc_len) {
6980                 ctsio->residual = alloc_len - total_len;
6981                 ctsio->kern_data_len = total_len;
6982                 ctsio->kern_total_len = total_len;
6983         } else {
6984                 ctsio->residual = 0;
6985                 ctsio->kern_data_len = alloc_len;
6986                 ctsio->kern_total_len = alloc_len;
6987         }
6988         ctsio->kern_data_resid = 0;
6989         ctsio->kern_rel_offset = 0;
6990
6991         rtg_ptr = (struct scsi_target_group_data *)ctsio->kern_data_ptr;
6992
6993         tpg_desc_ptr1 = &rtg_ptr->groups[0];
6994         tp_desc_ptr1_1 = &tpg_desc_ptr1->descriptors[0];
6995         tp_desc_ptr1_2 = (struct scsi_target_port_descriptor *)
6996                 &tp_desc_ptr1_1->desc_list[0];
6997
6998         if (single == 0) {
6999                 tpg_desc_ptr2 = (struct scsi_target_port_group_descriptor *)
7000                         &tp_desc_ptr1_2->desc_list[0];
7001                 tp_desc_ptr2_1 = &tpg_desc_ptr2->descriptors[0];
7002                 tp_desc_ptr2_2 = (struct scsi_target_port_descriptor *)
7003                         &tp_desc_ptr2_1->desc_list[0];
7004         } else {
7005                 tpg_desc_ptr2 = NULL;
7006                 tp_desc_ptr2_1 = NULL;
7007                 tp_desc_ptr2_2 = NULL;
7008         }
7009
7010         scsi_ulto4b(total_len - 4, rtg_ptr->length);
7011         if (single == 0) {
7012                 if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS) {
7013                         if (lun->flags & CTL_LUN_PRIMARY_SC) {
7014                                 tpg_desc_ptr1->pref_state = TPG_PRIMARY;
7015                                 tpg_desc_ptr2->pref_state =
7016                                         TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
7017                         } else {
7018                                 tpg_desc_ptr1->pref_state =
7019                                         TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
7020                                 tpg_desc_ptr2->pref_state = TPG_PRIMARY;
7021                         }
7022                 } else {
7023                         if (lun->flags & CTL_LUN_PRIMARY_SC) {
7024                                 tpg_desc_ptr1->pref_state =
7025                                         TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
7026                                 tpg_desc_ptr2->pref_state = TPG_PRIMARY;
7027                         } else {
7028                                 tpg_desc_ptr1->pref_state = TPG_PRIMARY;
7029                                 tpg_desc_ptr2->pref_state =
7030                                         TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
7031                         }
7032                 }
7033         } else {
7034                 tpg_desc_ptr1->pref_state = TPG_PRIMARY;
7035         }
7036         tpg_desc_ptr1->support = 0;
7037         tpg_desc_ptr1->target_port_group[1] = 1;
7038         tpg_desc_ptr1->status = TPG_IMPLICIT;
7039         tpg_desc_ptr1->target_port_count= NUM_PORTS_PER_GRP;
7040
7041         if (single == 0) {
7042                 tpg_desc_ptr2->support = 0;
7043                 tpg_desc_ptr2->target_port_group[1] = 2;
7044                 tpg_desc_ptr2->status = TPG_IMPLICIT;
7045                 tpg_desc_ptr2->target_port_count = NUM_PORTS_PER_GRP;
7046
7047                 tp_desc_ptr1_1->relative_target_port_identifier[1] = 1;
7048                 tp_desc_ptr1_2->relative_target_port_identifier[1] = 2;
7049
7050                 tp_desc_ptr2_1->relative_target_port_identifier[1] = 9;
7051                 tp_desc_ptr2_2->relative_target_port_identifier[1] = 10;
7052         } else {
7053                 if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS) {
7054                         tp_desc_ptr1_1->relative_target_port_identifier[1] = 1;
7055                         tp_desc_ptr1_2->relative_target_port_identifier[1] = 2;
7056                 } else {
7057                         tp_desc_ptr1_1->relative_target_port_identifier[1] = 9;
7058                         tp_desc_ptr1_2->relative_target_port_identifier[1] = 10;
7059                 }
7060         }
7061
7062         ctsio->be_move_done = ctl_config_move_done;
7063
7064         CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n",
7065                          ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1],
7066                          ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3],
7067                          ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5],
7068                          ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7]));
7069
7070         ctl_datamove((union ctl_io *)ctsio);
7071         return(retval);
7072 }
7073
7074 int
7075 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio)
7076 {
7077         struct scsi_per_res_in *cdb;
7078         int alloc_len, total_len = 0;
7079         /* struct scsi_per_res_in_rsrv in_data; */
7080         struct ctl_lun *lun;
7081         struct ctl_softc *softc;
7082
7083         CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n"));
7084
7085         softc = control_softc;
7086
7087         cdb = (struct scsi_per_res_in *)ctsio->cdb;
7088
7089         alloc_len = scsi_2btoul(cdb->length);
7090
7091         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7092
7093 retry:
7094         mtx_lock(&softc->ctl_lock);
7095         switch (cdb->action) {
7096         case SPRI_RK: /* read keys */
7097                 total_len = sizeof(struct scsi_per_res_in_keys) +
7098                         lun->pr_key_count *
7099                         sizeof(struct scsi_per_res_key);
7100                 break;
7101         case SPRI_RR: /* read reservation */
7102                 if (lun->flags & CTL_LUN_PR_RESERVED)
7103                         total_len = sizeof(struct scsi_per_res_in_rsrv);
7104                 else
7105                         total_len = sizeof(struct scsi_per_res_in_header);
7106                 break;
7107         case SPRI_RC: /* report capabilities */
7108                 total_len = sizeof(struct scsi_per_res_cap);
7109                 break;
7110         case SPRI_RS: /* read full status */
7111         default:
7112                 mtx_unlock(&softc->ctl_lock);
7113                 ctl_set_invalid_field(ctsio,
7114                                       /*sks_valid*/ 1,
7115                                       /*command*/ 1,
7116                                       /*field*/ 1,
7117                                       /*bit_valid*/ 1,
7118                                       /*bit*/ 0);
7119                 ctl_done((union ctl_io *)ctsio);
7120                 return (CTL_RETVAL_COMPLETE);
7121                 break; /* NOTREACHED */
7122         }
7123         mtx_unlock(&softc->ctl_lock);
7124
7125         ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7126
7127         if (total_len < alloc_len) {
7128                 ctsio->residual = alloc_len - total_len;
7129                 ctsio->kern_data_len = total_len;
7130                 ctsio->kern_total_len = total_len;
7131         } else {
7132                 ctsio->residual = 0;
7133                 ctsio->kern_data_len = alloc_len;
7134                 ctsio->kern_total_len = alloc_len;
7135         }
7136
7137         ctsio->kern_data_resid = 0;
7138         ctsio->kern_rel_offset = 0;
7139         ctsio->kern_sg_entries = 0;
7140
7141         mtx_lock(&softc->ctl_lock);
7142         switch (cdb->action) {
7143         case SPRI_RK: { // read keys
7144         struct scsi_per_res_in_keys *res_keys;
7145                 int i, key_count;
7146
7147                 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr;
7148
7149                 /*
7150                  * We had to drop the lock to allocate our buffer, which
7151                  * leaves time for someone to come in with another
7152                  * persistent reservation.  (That is unlikely, though,
7153                  * since this should be the only persistent reservation
7154                  * command active right now.)
7155                  */
7156                 if (total_len != (sizeof(struct scsi_per_res_in_keys) +
7157                     (lun->pr_key_count *
7158                      sizeof(struct scsi_per_res_key)))){
7159                         mtx_unlock(&softc->ctl_lock);
7160                         free(ctsio->kern_data_ptr, M_CTL);
7161                         printf("%s: reservation length changed, retrying\n",
7162                                __func__);
7163                         goto retry;
7164                 }
7165
7166                 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation);
7167
7168                 scsi_ulto4b(sizeof(struct scsi_per_res_key) *
7169                              lun->pr_key_count, res_keys->header.length);
7170
7171                 for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) {
7172                         if (!lun->per_res[i].registered)
7173                                 continue;
7174
7175                         /*
7176                          * We used lun->pr_key_count to calculate the
7177                          * size to allocate.  If it turns out the number of
7178                          * initiators with the registered flag set is
7179                          * larger than that (i.e. they haven't been kept in
7180                          * sync), we've got a problem.
7181                          */
7182                         if (key_count >= lun->pr_key_count) {
7183 #ifdef NEEDTOPORT
7184                                 csevent_log(CSC_CTL | CSC_SHELF_SW |
7185                                             CTL_PR_ERROR,
7186                                             csevent_LogType_Fault,
7187                                             csevent_AlertLevel_Yellow,
7188                                             csevent_FRU_ShelfController,
7189                                             csevent_FRU_Firmware,
7190                                         csevent_FRU_Unknown,
7191                                             "registered keys %d >= key "
7192                                             "count %d", key_count,
7193                                             lun->pr_key_count);
7194 #endif
7195                                 key_count++;
7196                                 continue;
7197                         }
7198                         memcpy(res_keys->keys[key_count].key,
7199                                lun->per_res[i].res_key.key,
7200                                ctl_min(sizeof(res_keys->keys[key_count].key),
7201                                sizeof(lun->per_res[i].res_key)));
7202                         key_count++;
7203                 }
7204                 break;
7205         }
7206         case SPRI_RR: { // read reservation
7207                 struct scsi_per_res_in_rsrv *res;
7208                 int tmp_len, header_only;
7209
7210                 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr;
7211
7212                 scsi_ulto4b(lun->PRGeneration, res->header.generation);
7213
7214                 if (lun->flags & CTL_LUN_PR_RESERVED)
7215                 {
7216                         tmp_len = sizeof(struct scsi_per_res_in_rsrv);
7217                         scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data),
7218                                     res->header.length);
7219                         header_only = 0;
7220                 } else {
7221                         tmp_len = sizeof(struct scsi_per_res_in_header);
7222                         scsi_ulto4b(0, res->header.length);
7223                         header_only = 1;
7224                 }
7225
7226                 /*
7227                  * We had to drop the lock to allocate our buffer, which
7228                  * leaves time for someone to come in with another
7229                  * persistent reservation.  (That is unlikely, though,
7230                  * since this should be the only persistent reservation
7231                  * command active right now.)
7232                  */
7233                 if (tmp_len != total_len) {
7234                         mtx_unlock(&softc->ctl_lock);
7235                         free(ctsio->kern_data_ptr, M_CTL);
7236                         printf("%s: reservation status changed, retrying\n",
7237                                __func__);
7238                         goto retry;
7239                 }
7240
7241                 /*
7242                  * No reservation held, so we're done.
7243                  */
7244                 if (header_only != 0)
7245                         break;
7246
7247                 /*
7248                  * If the registration is an All Registrants type, the key
7249                  * is 0, since it doesn't really matter.
7250                  */
7251                 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) {
7252                         memcpy(res->data.reservation,
7253                                &lun->per_res[lun->pr_res_idx].res_key,
7254                                sizeof(struct scsi_per_res_key));
7255                 }
7256                 res->data.scopetype = lun->res_type;
7257                 break;
7258         }
7259         case SPRI_RC:     //report capabilities
7260         {
7261                 struct scsi_per_res_cap *res_cap;
7262                 uint16_t type_mask;
7263
7264                 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr;
7265                 scsi_ulto2b(sizeof(*res_cap), res_cap->length);
7266                 res_cap->flags2 |= SPRI_TMV;
7267                 type_mask = SPRI_TM_WR_EX_AR |
7268                             SPRI_TM_EX_AC_RO |
7269                             SPRI_TM_WR_EX_RO |
7270                             SPRI_TM_EX_AC |
7271                             SPRI_TM_WR_EX |
7272                             SPRI_TM_EX_AC_AR;
7273                 scsi_ulto2b(type_mask, res_cap->type_mask);
7274                 break;
7275         }
7276         case SPRI_RS: //read full status
7277         default:
7278                 /*
7279                  * This is a bug, because we just checked for this above,
7280                  * and should have returned an error.
7281                  */
7282                 panic("Invalid PR type %x", cdb->action);
7283                 break; /* NOTREACHED */
7284         }
7285         mtx_unlock(&softc->ctl_lock);
7286
7287         ctsio->be_move_done = ctl_config_move_done;
7288
7289         CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n",
7290                          ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1],
7291                          ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3],
7292                          ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5],
7293                          ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7]));
7294
7295         ctl_datamove((union ctl_io *)ctsio);
7296
7297         return (CTL_RETVAL_COMPLETE);
7298 }
7299
7300 /*
7301  * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if
7302  * it should return.
7303  */
7304 static int
7305 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
7306                 uint64_t sa_res_key, uint8_t type, uint32_t residx,
7307                 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb,
7308                 struct scsi_per_res_out_parms* param)
7309 {
7310         union ctl_ha_msg persis_io;
7311         int retval, i;
7312         int isc_retval;
7313
7314         retval = 0;
7315
7316         if (sa_res_key == 0) {
7317                 mtx_lock(&softc->ctl_lock);
7318                 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
7319                         /* validate scope and type */
7320                         if ((cdb->scope_type & SPR_SCOPE_MASK) !=
7321                              SPR_LU_SCOPE) {
7322                                 mtx_unlock(&softc->ctl_lock);
7323                                 ctl_set_invalid_field(/*ctsio*/ ctsio,
7324                                                       /*sks_valid*/ 1,
7325                                                       /*command*/ 1,
7326                                                       /*field*/ 2,
7327                                                       /*bit_valid*/ 1,
7328                                                       /*bit*/ 4);
7329                                 ctl_done((union ctl_io *)ctsio);
7330                                 return (1);
7331                         }
7332
7333                         if (type>8 || type==2 || type==4 || type==0) {
7334                                 mtx_unlock(&softc->ctl_lock);
7335                                 ctl_set_invalid_field(/*ctsio*/ ctsio,
7336                                                       /*sks_valid*/ 1,
7337                                                       /*command*/ 1,
7338                                                       /*field*/ 2,
7339                                                       /*bit_valid*/ 1,
7340                                                       /*bit*/ 0);
7341                                 ctl_done((union ctl_io *)ctsio);
7342                                 return (1);
7343                         }
7344
7345                         /* temporarily unregister this nexus */
7346                         lun->per_res[residx].registered = 0;
7347
7348                         /*
7349                          * Unregister everybody else and build UA for
7350                          * them
7351                          */
7352                         for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7353                                 if (lun->per_res[i].registered == 0)
7354                                         continue;
7355
7356                                 if (!persis_offset
7357                                  && i <CTL_MAX_INITIATORS)
7358                                         lun->pending_sense[i].ua_pending |=
7359                                                 CTL_UA_REG_PREEMPT;
7360                                 else if (persis_offset
7361                                       && i >= persis_offset)
7362                                         lun->pending_sense[i-persis_offset
7363                                                 ].ua_pending |=
7364                                                 CTL_UA_REG_PREEMPT;
7365                                 lun->per_res[i].registered = 0;
7366                                 memset(&lun->per_res[i].res_key, 0,
7367                                        sizeof(struct scsi_per_res_key));
7368                         }
7369                         lun->per_res[residx].registered = 1;
7370                         lun->pr_key_count = 1;
7371                         lun->res_type = type;
7372                         if (lun->res_type != SPR_TYPE_WR_EX_AR
7373                          && lun->res_type != SPR_TYPE_EX_AC_AR)
7374                                 lun->pr_res_idx = residx;
7375
7376                         mtx_unlock(&softc->ctl_lock);
7377                         /* send msg to other side */
7378                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
7379                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
7380                         persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
7381                         persis_io.pr.pr_info.residx = lun->pr_res_idx;
7382                         persis_io.pr.pr_info.res_type = type;
7383                         memcpy(persis_io.pr.pr_info.sa_res_key,
7384                                param->serv_act_res_key,
7385                                sizeof(param->serv_act_res_key));
7386                         if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
7387                              &persis_io, sizeof(persis_io), 0)) >
7388                              CTL_HA_STATUS_SUCCESS) {
7389                                 printf("CTL:Persis Out error returned "
7390                                        "from ctl_ha_msg_send %d\n",
7391                                        isc_retval);
7392                         }
7393                 } else {
7394                         /* not all registrants */
7395                         mtx_unlock(&softc->ctl_lock);
7396                         free(ctsio->kern_data_ptr, M_CTL);
7397                         ctl_set_invalid_field(ctsio,
7398                                               /*sks_valid*/ 1,
7399                                               /*command*/ 0,
7400                                               /*field*/ 8,
7401                                               /*bit_valid*/ 0,
7402                                               /*bit*/ 0);
7403                         ctl_done((union ctl_io *)ctsio);
7404                         return (1);
7405                 }
7406         } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS
7407                 || !(lun->flags & CTL_LUN_PR_RESERVED)) {
7408                 int found = 0;
7409
7410                 mtx_lock(&softc->ctl_lock);
7411                 if (res_key == sa_res_key) {
7412                         /* special case */
7413                         /*
7414                          * The spec implies this is not good but doesn't
7415                          * say what to do. There are two choices either
7416                          * generate a res conflict or check condition
7417                          * with illegal field in parameter data. Since
7418                          * that is what is done when the sa_res_key is
7419                          * zero I'll take that approach since this has
7420                          * to do with the sa_res_key.
7421                          */
7422                         mtx_unlock(&softc->ctl_lock);
7423                         free(ctsio->kern_data_ptr, M_CTL);
7424                         ctl_set_invalid_field(ctsio,
7425                                               /*sks_valid*/ 1,
7426                                               /*command*/ 0,
7427                                               /*field*/ 8,
7428                                               /*bit_valid*/ 0,
7429                                               /*bit*/ 0);
7430                         ctl_done((union ctl_io *)ctsio);
7431                         return (1);
7432                 }
7433
7434                 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7435                         if (lun->per_res[i].registered
7436                          && memcmp(param->serv_act_res_key,
7437                             lun->per_res[i].res_key.key,
7438                             sizeof(struct scsi_per_res_key)) != 0)
7439                                 continue;
7440
7441                         found = 1;
7442                         lun->per_res[i].registered = 0;
7443                         memset(&lun->per_res[i].res_key, 0,
7444                                sizeof(struct scsi_per_res_key));
7445                         lun->pr_key_count--;
7446
7447                         if (!persis_offset
7448                          && i < CTL_MAX_INITIATORS)
7449                                 lun->pending_sense[i].ua_pending |=
7450                                         CTL_UA_REG_PREEMPT;
7451                         else if (persis_offset
7452                               && i >= persis_offset)
7453                                 lun->pending_sense[i-persis_offset].ua_pending|=
7454                                         CTL_UA_REG_PREEMPT;
7455                 }
7456                 mtx_unlock(&softc->ctl_lock);
7457                 if (!found) {
7458                         free(ctsio->kern_data_ptr, M_CTL);
7459                         ctl_set_reservation_conflict(ctsio);
7460                         ctl_done((union ctl_io *)ctsio);
7461                         return (CTL_RETVAL_COMPLETE);
7462                 }
7463                 /* send msg to other side */
7464                 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
7465                 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
7466                 persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
7467                 persis_io.pr.pr_info.residx = lun->pr_res_idx;
7468                 persis_io.pr.pr_info.res_type = type;
7469                 memcpy(persis_io.pr.pr_info.sa_res_key,
7470                        param->serv_act_res_key,
7471                        sizeof(param->serv_act_res_key));
7472                 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
7473                      &persis_io, sizeof(persis_io), 0)) >
7474                      CTL_HA_STATUS_SUCCESS) {
7475                         printf("CTL:Persis Out error returned from "
7476                                "ctl_ha_msg_send %d\n", isc_retval);
7477                 }
7478         } else {
7479                 /* Reserved but not all registrants */
7480                 /* sa_res_key is res holder */
7481                 if (memcmp(param->serv_act_res_key,
7482                    lun->per_res[lun->pr_res_idx].res_key.key,
7483                    sizeof(struct scsi_per_res_key)) == 0) {
7484                         /* validate scope and type */
7485                         if ((cdb->scope_type & SPR_SCOPE_MASK) !=
7486                              SPR_LU_SCOPE) {
7487                                 ctl_set_invalid_field(/*ctsio*/ ctsio,
7488                                                       /*sks_valid*/ 1,
7489                                                       /*command*/ 1,
7490                                                       /*field*/ 2,
7491                                                       /*bit_valid*/ 1,
7492                                                       /*bit*/ 4);
7493                                 ctl_done((union ctl_io *)ctsio);
7494                                 return (1);
7495                         }
7496
7497                         if (type>8 || type==2 || type==4 || type==0) {
7498                                 ctl_set_invalid_field(/*ctsio*/ ctsio,
7499                                                       /*sks_valid*/ 1,
7500                                                       /*command*/ 1,
7501                                                       /*field*/ 2,
7502                                                       /*bit_valid*/ 1,
7503                                                       /*bit*/ 0);
7504                                 ctl_done((union ctl_io *)ctsio);
7505                                 return (1);
7506                         }
7507
7508                         /*
7509                          * Do the following:
7510                          * if sa_res_key != res_key remove all
7511                          * registrants w/sa_res_key and generate UA
7512                          * for these registrants(Registrations
7513                          * Preempted) if it wasn't an exclusive
7514                          * reservation generate UA(Reservations
7515                          * Preempted) for all other registered nexuses
7516                          * if the type has changed. Establish the new
7517                          * reservation and holder. If res_key and
7518                          * sa_res_key are the same do the above
7519                          * except don't unregister the res holder.
7520                          */
7521
7522                         /*
7523                          * Temporarily unregister so it won't get
7524                          * removed or UA generated
7525                          */
7526                         lun->per_res[residx].registered = 0;
7527                         for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7528                                 if (lun->per_res[i].registered == 0)
7529                                         continue;
7530
7531                                 if (memcmp(param->serv_act_res_key,
7532                                     lun->per_res[i].res_key.key,
7533                                     sizeof(struct scsi_per_res_key)) == 0) {
7534                                         lun->per_res[i].registered = 0;
7535                                         memset(&lun->per_res[i].res_key,
7536                                                0,
7537                                                sizeof(struct scsi_per_res_key));
7538                                         lun->pr_key_count--;
7539
7540                                         if (!persis_offset
7541                                          && i < CTL_MAX_INITIATORS)
7542                                                 lun->pending_sense[i
7543                                                         ].ua_pending |=
7544                                                         CTL_UA_REG_PREEMPT;
7545                                         else if (persis_offset
7546                                               && i >= persis_offset)
7547                                                 lun->pending_sense[
7548                                                   i-persis_offset].ua_pending |=
7549                                                   CTL_UA_REG_PREEMPT;
7550                                 } else if (type != lun->res_type
7551                                         && (lun->res_type == SPR_TYPE_WR_EX_RO
7552                                          || lun->res_type ==SPR_TYPE_EX_AC_RO)){
7553                                                 if (!persis_offset
7554                                                  && i < CTL_MAX_INITIATORS)
7555                                                         lun->pending_sense[i
7556                                                         ].ua_pending |=
7557                                                         CTL_UA_RES_RELEASE;
7558                                                 else if (persis_offset
7559                                                       && i >= persis_offset)
7560                                                         lun->pending_sense[
7561                                                         i-persis_offset
7562                                                         ].ua_pending |=
7563                                                         CTL_UA_RES_RELEASE;
7564                                 }
7565                         }
7566                         lun->per_res[residx].registered = 1;
7567                         lun->res_type = type;
7568                         if (lun->res_type != SPR_TYPE_WR_EX_AR
7569                          && lun->res_type != SPR_TYPE_EX_AC_AR)
7570                                 lun->pr_res_idx = residx;
7571                         else
7572                                 lun->pr_res_idx =
7573                                         CTL_PR_ALL_REGISTRANTS;
7574
7575                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
7576                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
7577                         persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
7578                         persis_io.pr.pr_info.residx = lun->pr_res_idx;
7579                         persis_io.pr.pr_info.res_type = type;
7580                         memcpy(persis_io.pr.pr_info.sa_res_key,
7581                                param->serv_act_res_key,
7582                                sizeof(param->serv_act_res_key));
7583                         if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
7584                              &persis_io, sizeof(persis_io), 0)) >
7585                              CTL_HA_STATUS_SUCCESS) {
7586                                 printf("CTL:Persis Out error returned "
7587                                        "from ctl_ha_msg_send %d\n",
7588                                        isc_retval);
7589                         }
7590                 } else {
7591                         /*
7592                          * sa_res_key is not the res holder just
7593                          * remove registrants
7594                          */
7595                         int found=0;
7596                         mtx_lock(&softc->ctl_lock);
7597
7598                         for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7599                                 if (memcmp(param->serv_act_res_key,
7600                                     lun->per_res[i].res_key.key,
7601                                     sizeof(struct scsi_per_res_key)) != 0)
7602                                         continue;
7603
7604                                 found = 1;
7605                                 lun->per_res[i].registered = 0;
7606                                 memset(&lun->per_res[i].res_key, 0,
7607                                        sizeof(struct scsi_per_res_key));
7608                                 lun->pr_key_count--;
7609
7610                                 if (!persis_offset
7611                                  && i < CTL_MAX_INITIATORS)
7612                                         lun->pending_sense[i].ua_pending |=
7613                                                 CTL_UA_REG_PREEMPT;
7614                                 else if (persis_offset
7615                                       && i >= persis_offset)
7616                                         lun->pending_sense[
7617                                                 i-persis_offset].ua_pending |=
7618                                                 CTL_UA_REG_PREEMPT;
7619                         }
7620
7621                         if (!found) {
7622                                 mtx_unlock(&softc->ctl_lock);
7623                                 free(ctsio->kern_data_ptr, M_CTL);
7624                                 ctl_set_reservation_conflict(ctsio);
7625                                 ctl_done((union ctl_io *)ctsio);
7626                                 return (1);
7627                         }
7628                         mtx_unlock(&softc->ctl_lock);
7629                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
7630                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
7631                         persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
7632                         persis_io.pr.pr_info.residx = lun->pr_res_idx;
7633                         persis_io.pr.pr_info.res_type = type;
7634                         memcpy(persis_io.pr.pr_info.sa_res_key,
7635                                param->serv_act_res_key,
7636                                sizeof(param->serv_act_res_key));
7637                         if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
7638                              &persis_io, sizeof(persis_io), 0)) >
7639                              CTL_HA_STATUS_SUCCESS) {
7640                                 printf("CTL:Persis Out error returned "
7641                                        "from ctl_ha_msg_send %d\n",
7642                                 isc_retval);
7643                         }
7644                 }
7645         }
7646
7647         lun->PRGeneration++;
7648
7649         return (retval);
7650 }
7651
7652 static void
7653 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
7654 {
7655         int i;
7656
7657         if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS
7658          || lun->pr_res_idx == CTL_PR_NO_RESERVATION
7659          || memcmp(&lun->per_res[lun->pr_res_idx].res_key,
7660                    msg->pr.pr_info.sa_res_key,
7661                    sizeof(struct scsi_per_res_key)) != 0) {
7662                 uint64_t sa_res_key;
7663                 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key);
7664
7665                 if (sa_res_key == 0) {
7666                         /* temporarily unregister this nexus */
7667                         lun->per_res[msg->pr.pr_info.residx].registered = 0;
7668
7669                         /*
7670                          * Unregister everybody else and build UA for
7671                          * them
7672                          */
7673                         for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7674                                 if (lun->per_res[i].registered == 0)
7675                                         continue;
7676
7677                                 if (!persis_offset
7678                                  && i < CTL_MAX_INITIATORS)
7679                                         lun->pending_sense[i].ua_pending |=
7680                                                 CTL_UA_REG_PREEMPT;
7681                                 else if (persis_offset && i >= persis_offset)
7682                                         lun->pending_sense[i -
7683                                                 persis_offset].ua_pending |=
7684                                                 CTL_UA_REG_PREEMPT;
7685                                 lun->per_res[i].registered = 0;
7686                                 memset(&lun->per_res[i].res_key, 0,
7687                                        sizeof(struct scsi_per_res_key));
7688                         }
7689
7690                         lun->per_res[msg->pr.pr_info.residx].registered = 1;
7691                         lun->pr_key_count = 1;
7692                         lun->res_type = msg->pr.pr_info.res_type;
7693                         if (lun->res_type != SPR_TYPE_WR_EX_AR
7694                          && lun->res_type != SPR_TYPE_EX_AC_AR)
7695                                 lun->pr_res_idx = msg->pr.pr_info.residx;
7696                 } else {
7697                         for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7698                                 if (memcmp(msg->pr.pr_info.sa_res_key,
7699                                    lun->per_res[i].res_key.key,
7700                                    sizeof(struct scsi_per_res_key)) != 0)
7701                                         continue;
7702
7703                                 lun->per_res[i].registered = 0;
7704                                 memset(&lun->per_res[i].res_key, 0,
7705                                        sizeof(struct scsi_per_res_key));
7706                                 lun->pr_key_count--;
7707
7708                                 if (!persis_offset
7709                                  && i < persis_offset)
7710                                         lun->pending_sense[i].ua_pending |=
7711                                                 CTL_UA_REG_PREEMPT;
7712                                 else if (persis_offset
7713                                       && i >= persis_offset)
7714                                         lun->pending_sense[i -
7715                                                 persis_offset].ua_pending |=
7716                                                 CTL_UA_REG_PREEMPT;
7717                         }
7718                 }
7719         } else {
7720                 /*
7721                  * Temporarily unregister so it won't get removed
7722                  * or UA generated
7723                  */
7724                 lun->per_res[msg->pr.pr_info.residx].registered = 0;
7725                 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
7726                         if (lun->per_res[i].registered == 0)
7727                                 continue;
7728
7729                         if (memcmp(msg->pr.pr_info.sa_res_key,
7730                            lun->per_res[i].res_key.key,
7731                            sizeof(struct scsi_per_res_key)) == 0) {
7732                                 lun->per_res[i].registered = 0;
7733                                 memset(&lun->per_res[i].res_key, 0,
7734                                        sizeof(struct scsi_per_res_key));
7735                                 lun->pr_key_count--;
7736                                 if (!persis_offset
7737                                  && i < CTL_MAX_INITIATORS)
7738                                         lun->pending_sense[i].ua_pending |=
7739                                                 CTL_UA_REG_PREEMPT;
7740                                 else if (persis_offset
7741                                       && i >= persis_offset)
7742                                         lun->pending_sense[i -
7743                                                 persis_offset].ua_pending |=
7744                                                 CTL_UA_REG_PREEMPT;
7745                         } else if (msg->pr.pr_info.res_type != lun->res_type
7746                                 && (lun->res_type == SPR_TYPE_WR_EX_RO
7747                                  || lun->res_type == SPR_TYPE_EX_AC_RO)) {
7748                                         if (!persis_offset
7749                                          && i < persis_offset)
7750                                                 lun->pending_sense[i
7751                                                         ].ua_pending |=
7752                                                         CTL_UA_RES_RELEASE;
7753                                         else if (persis_offset
7754                                               && i >= persis_offset)
7755                                         lun->pending_sense[i -
7756                                                 persis_offset].ua_pending |=
7757                                                 CTL_UA_RES_RELEASE;
7758                         }
7759                 }
7760                 lun->per_res[msg->pr.pr_info.residx].registered = 1;
7761                 lun->res_type = msg->pr.pr_info.res_type;
7762                 if (lun->res_type != SPR_TYPE_WR_EX_AR
7763                  && lun->res_type != SPR_TYPE_EX_AC_AR)
7764                         lun->pr_res_idx = msg->pr.pr_info.residx;
7765                 else
7766                         lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
7767         }
7768         lun->PRGeneration++;
7769
7770 }
7771
7772
7773 int
7774 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
7775 {
7776         int retval;
7777         int isc_retval;
7778         u_int32_t param_len;
7779         struct scsi_per_res_out *cdb;
7780         struct ctl_lun *lun;
7781         struct scsi_per_res_out_parms* param;
7782         struct ctl_softc *softc;
7783         uint32_t residx;
7784         uint64_t res_key, sa_res_key;
7785         uint8_t type;
7786         union ctl_ha_msg persis_io;
7787         int    i;
7788
7789         CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n"));
7790
7791         retval = CTL_RETVAL_COMPLETE;
7792
7793         softc = control_softc;
7794
7795         cdb = (struct scsi_per_res_out *)ctsio->cdb;
7796         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7797
7798         /*
7799          * We only support whole-LUN scope.  The scope & type are ignored for
7800          * register, register and ignore existing key and clear.
7801          * We sometimes ignore scope and type on preempts too!!
7802          * Verify reservation type here as well.
7803          */
7804         type = cdb->scope_type & SPR_TYPE_MASK;
7805         if ((cdb->action == SPRO_RESERVE)
7806          || (cdb->action == SPRO_RELEASE)) {
7807                 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) {
7808                         ctl_set_invalid_field(/*ctsio*/ ctsio,
7809                                               /*sks_valid*/ 1,
7810                                               /*command*/ 1,
7811                                               /*field*/ 2,
7812                                               /*bit_valid*/ 1,
7813                                               /*bit*/ 4);
7814                         ctl_done((union ctl_io *)ctsio);
7815                         return (CTL_RETVAL_COMPLETE);
7816                 }
7817
7818                 if (type>8 || type==2 || type==4 || type==0) {
7819                         ctl_set_invalid_field(/*ctsio*/ ctsio,
7820                                               /*sks_valid*/ 1,
7821                                               /*command*/ 1,
7822                                               /*field*/ 2,
7823                                               /*bit_valid*/ 1,
7824                                               /*bit*/ 0);
7825                         ctl_done((union ctl_io *)ctsio);
7826                         return (CTL_RETVAL_COMPLETE);
7827                 }
7828         }
7829
7830         switch (cdb->action & SPRO_ACTION_MASK) {
7831         case SPRO_REGISTER:
7832         case SPRO_RESERVE:
7833         case SPRO_RELEASE:
7834         case SPRO_CLEAR:
7835         case SPRO_PREEMPT:
7836         case SPRO_REG_IGNO:
7837                 break;
7838         case SPRO_REG_MOVE:
7839         case SPRO_PRE_ABO:
7840         default:
7841                 ctl_set_invalid_field(/*ctsio*/ ctsio,
7842                                       /*sks_valid*/ 1,
7843                                       /*command*/ 1,
7844                                       /*field*/ 1,
7845                                       /*bit_valid*/ 1,
7846                                       /*bit*/ 0);
7847                 ctl_done((union ctl_io *)ctsio);
7848                 return (CTL_RETVAL_COMPLETE);
7849                 break; /* NOTREACHED */
7850         }
7851
7852         param_len = scsi_4btoul(cdb->length);
7853
7854         if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
7855                 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
7856                 ctsio->kern_data_len = param_len;
7857                 ctsio->kern_total_len = param_len;
7858                 ctsio->kern_data_resid = 0;
7859                 ctsio->kern_rel_offset = 0;
7860                 ctsio->kern_sg_entries = 0;
7861                 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7862                 ctsio->be_move_done = ctl_config_move_done;
7863                 ctl_datamove((union ctl_io *)ctsio);
7864
7865                 return (CTL_RETVAL_COMPLETE);
7866         }
7867
7868         param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr;
7869
7870         residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
7871         res_key = scsi_8btou64(param->res_key.key);
7872         sa_res_key = scsi_8btou64(param->serv_act_res_key);
7873
7874         /*
7875          * Validate the reservation key here except for SPRO_REG_IGNO
7876          * This must be done for all other service actions
7877          */
7878         if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) {
7879                 mtx_lock(&softc->ctl_lock);
7880                 if (lun->per_res[residx].registered) {
7881                     if (memcmp(param->res_key.key,
7882                                lun->per_res[residx].res_key.key,
7883                                ctl_min(sizeof(param->res_key),
7884                                sizeof(lun->per_res[residx].res_key))) != 0) {
7885                                 /*
7886                                  * The current key passed in doesn't match
7887                                  * the one the initiator previously
7888                                  * registered.
7889                                  */
7890                                 mtx_unlock(&softc->ctl_lock);
7891                                 free(ctsio->kern_data_ptr, M_CTL);
7892                                 ctl_set_reservation_conflict(ctsio);
7893                                 ctl_done((union ctl_io *)ctsio);
7894                                 return (CTL_RETVAL_COMPLETE);
7895                         }
7896                 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) {
7897                         /*
7898                          * We are not registered
7899                          */
7900                         mtx_unlock(&softc->ctl_lock);
7901                         free(ctsio->kern_data_ptr, M_CTL);
7902                         ctl_set_reservation_conflict(ctsio);
7903                         ctl_done((union ctl_io *)ctsio);
7904                         return (CTL_RETVAL_COMPLETE);
7905                 } else if (res_key != 0) {
7906                         /*
7907                          * We are not registered and trying to register but
7908                          * the register key isn't zero.
7909                          */
7910                         mtx_unlock(&softc->ctl_lock);
7911                         free(ctsio->kern_data_ptr, M_CTL);
7912                         ctl_set_reservation_conflict(ctsio);
7913                         ctl_done((union ctl_io *)ctsio);
7914                         return (CTL_RETVAL_COMPLETE);
7915                 }
7916                 mtx_unlock(&softc->ctl_lock);
7917         }
7918
7919         switch (cdb->action & SPRO_ACTION_MASK) {
7920         case SPRO_REGISTER:
7921         case SPRO_REG_IGNO: {
7922
7923 #if 0
7924                 printf("Registration received\n");
7925 #endif
7926
7927                 /*
7928                  * We don't support any of these options, as we report in
7929                  * the read capabilities request (see
7930                  * ctl_persistent_reserve_in(), above).
7931                  */
7932                 if ((param->flags & SPR_SPEC_I_PT)
7933                  || (param->flags & SPR_ALL_TG_PT)
7934                  || (param->flags & SPR_APTPL)) {
7935                         int bit_ptr;
7936
7937                         if (param->flags & SPR_APTPL)
7938                                 bit_ptr = 0;
7939                         else if (param->flags & SPR_ALL_TG_PT)
7940                                 bit_ptr = 2;
7941                         else /* SPR_SPEC_I_PT */
7942                                 bit_ptr = 3;
7943
7944                         free(ctsio->kern_data_ptr, M_CTL);
7945                         ctl_set_invalid_field(ctsio,
7946                                               /*sks_valid*/ 1,
7947                                               /*command*/ 0,
7948                                               /*field*/ 20,
7949                                               /*bit_valid*/ 1,
7950                                               /*bit*/ bit_ptr);
7951                         ctl_done((union ctl_io *)ctsio);
7952                         return (CTL_RETVAL_COMPLETE);
7953                 }
7954
7955                 mtx_lock(&softc->ctl_lock);
7956
7957                 /*
7958                  * The initiator wants to clear the
7959                  * key/unregister.
7960                  */
7961                 if (sa_res_key == 0) {
7962                         if ((res_key == 0
7963                           && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER)
7964                          || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO
7965                           && !lun->per_res[residx].registered)) {
7966                                 mtx_unlock(&softc->ctl_lock);
7967                                 goto done;
7968                         }
7969
7970                         lun->per_res[residx].registered = 0;
7971                         memset(&lun->per_res[residx].res_key,
7972                                0, sizeof(lun->per_res[residx].res_key));
7973                         lun->pr_key_count--;
7974
7975                         if (residx == lun->pr_res_idx) {
7976                                 lun->flags &= ~CTL_LUN_PR_RESERVED;
7977                                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
7978
7979                                 if ((lun->res_type == SPR_TYPE_WR_EX_RO
7980                                   || lun->res_type == SPR_TYPE_EX_AC_RO)
7981                                  && lun->pr_key_count) {
7982                                         /*
7983                                          * If the reservation is a registrants
7984                                          * only type we need to generate a UA
7985                                          * for other registered inits.  The
7986                                          * sense code should be RESERVATIONS
7987                                          * RELEASED
7988                                          */
7989
7990                                         for (i = 0; i < CTL_MAX_INITIATORS;i++){
7991                                                 if (lun->per_res[
7992                                                     i+persis_offset].registered
7993                                                     == 0)
7994                                                         continue;
7995                                                 lun->pending_sense[i
7996                                                         ].ua_pending |=
7997                                                         CTL_UA_RES_RELEASE;
7998                                         }
7999                                 }
8000                                 lun->res_type = 0;
8001                         } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
8002                                 if (lun->pr_key_count==0) {
8003                                         lun->flags &= ~CTL_LUN_PR_RESERVED;
8004                                         lun->res_type = 0;
8005                                         lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8006                                 }
8007                         }
8008                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8009                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8010                         persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY;
8011                         persis_io.pr.pr_info.residx = residx;
8012                         if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8013                              &persis_io, sizeof(persis_io), 0 )) >
8014                              CTL_HA_STATUS_SUCCESS) {
8015                                 printf("CTL:Persis Out error returned from "
8016                                        "ctl_ha_msg_send %d\n", isc_retval);
8017                         }
8018                         mtx_unlock(&softc->ctl_lock);
8019                 } else /* sa_res_key != 0 */ {
8020
8021                         /*
8022                          * If we aren't registered currently then increment
8023                          * the key count and set the registered flag.
8024                          */
8025                         if (!lun->per_res[residx].registered) {
8026                                 lun->pr_key_count++;
8027                                 lun->per_res[residx].registered = 1;
8028                         }
8029
8030                         memcpy(&lun->per_res[residx].res_key,
8031                                param->serv_act_res_key,
8032                                ctl_min(sizeof(param->serv_act_res_key),
8033                                sizeof(lun->per_res[residx].res_key)));
8034
8035                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8036                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8037                         persis_io.pr.pr_info.action = CTL_PR_REG_KEY;
8038                         persis_io.pr.pr_info.residx = residx;
8039                         memcpy(persis_io.pr.pr_info.sa_res_key,
8040                                param->serv_act_res_key,
8041                                sizeof(param->serv_act_res_key));
8042                         mtx_unlock(&softc->ctl_lock);
8043                         if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8044                              &persis_io, sizeof(persis_io), 0)) >
8045                              CTL_HA_STATUS_SUCCESS) {
8046                                 printf("CTL:Persis Out error returned from "
8047                                        "ctl_ha_msg_send %d\n", isc_retval);
8048                         }
8049                 }
8050                 lun->PRGeneration++;
8051
8052                 break;
8053         }
8054         case SPRO_RESERVE:
8055 #if 0
8056                 printf("Reserve executed type %d\n", type);
8057 #endif
8058                 mtx_lock(&softc->ctl_lock);
8059                 if (lun->flags & CTL_LUN_PR_RESERVED) {
8060                         /*
8061                          * if this isn't the reservation holder and it's
8062                          * not a "all registrants" type or if the type is
8063                          * different then we have a conflict
8064                          */
8065                         if ((lun->pr_res_idx != residx
8066                           && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS)
8067                          || lun->res_type != type) {
8068                                 mtx_unlock(&softc->ctl_lock);
8069                                 free(ctsio->kern_data_ptr, M_CTL);
8070                                 ctl_set_reservation_conflict(ctsio);
8071                                 ctl_done((union ctl_io *)ctsio);
8072                                 return (CTL_RETVAL_COMPLETE);
8073                         }
8074                         mtx_unlock(&softc->ctl_lock);
8075                 } else /* create a reservation */ {
8076                         /*
8077                          * If it's not an "all registrants" type record
8078                          * reservation holder
8079                          */
8080                         if (type != SPR_TYPE_WR_EX_AR
8081                          && type != SPR_TYPE_EX_AC_AR)
8082                                 lun->pr_res_idx = residx; /* Res holder */
8083                         else
8084                                 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
8085
8086                         lun->flags |= CTL_LUN_PR_RESERVED;
8087                         lun->res_type = type;
8088
8089                         mtx_unlock(&softc->ctl_lock);
8090
8091                         /* send msg to other side */
8092                         persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8093                         persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8094                         persis_io.pr.pr_info.action = CTL_PR_RESERVE;
8095                         persis_io.pr.pr_info.residx = lun->pr_res_idx;
8096                         persis_io.pr.pr_info.res_type = type;
8097                         if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8098                              &persis_io, sizeof(persis_io), 0)) >
8099                              CTL_HA_STATUS_SUCCESS) {
8100                                 printf("CTL:Persis Out error returned from "
8101                                        "ctl_ha_msg_send %d\n", isc_retval);
8102                         }
8103                 }
8104                 break;
8105
8106         case SPRO_RELEASE:
8107                 mtx_lock(&softc->ctl_lock);
8108                 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) {
8109                         /* No reservation exists return good status */
8110                         mtx_unlock(&softc->ctl_lock);
8111                         goto done;
8112                 }
8113                 /*
8114                  * Is this nexus a reservation holder?
8115                  */
8116                 if (lun->pr_res_idx != residx
8117                  && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) {
8118                         /*
8119                          * not a res holder return good status but
8120                          * do nothing
8121                          */
8122                         mtx_unlock(&softc->ctl_lock);
8123                         goto done;
8124                 }
8125
8126                 if (lun->res_type != type) {
8127                         mtx_unlock(&softc->ctl_lock);
8128                         free(ctsio->kern_data_ptr, M_CTL);
8129                         ctl_set_illegal_pr_release(ctsio);
8130                         ctl_done((union ctl_io *)ctsio);
8131                         return (CTL_RETVAL_COMPLETE);
8132                 }
8133
8134                 /* okay to release */
8135                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8136                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8137                 lun->res_type = 0;
8138
8139                 /*
8140                  * if this isn't an exclusive access
8141                  * res generate UA for all other
8142                  * registrants.
8143                  */
8144                 if (type != SPR_TYPE_EX_AC
8145                  && type != SPR_TYPE_WR_EX) {
8146                         /*
8147                          * temporarily unregister so we don't generate UA
8148                          */
8149                         lun->per_res[residx].registered = 0;
8150
8151                         for (i = 0; i < CTL_MAX_INITIATORS; i++) {
8152                                 if (lun->per_res[i+persis_offset].registered
8153                                     == 0)
8154                                         continue;
8155                                 lun->pending_sense[i].ua_pending |=
8156                                         CTL_UA_RES_RELEASE;
8157                         }
8158
8159                         lun->per_res[residx].registered = 1;
8160                 }
8161                 mtx_unlock(&softc->ctl_lock);
8162                 /* Send msg to other side */
8163                 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8164                 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8165                 persis_io.pr.pr_info.action = CTL_PR_RELEASE;
8166                 if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io,
8167                      sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) {
8168                         printf("CTL:Persis Out error returned from "
8169                                "ctl_ha_msg_send %d\n", isc_retval);
8170                 }
8171                 break;
8172
8173         case SPRO_CLEAR:
8174                 /* send msg to other side */
8175
8176                 mtx_lock(&softc->ctl_lock);
8177                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8178                 lun->res_type = 0;
8179                 lun->pr_key_count = 0;
8180                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8181
8182
8183                 memset(&lun->per_res[residx].res_key,
8184                        0, sizeof(lun->per_res[residx].res_key));
8185                 lun->per_res[residx].registered = 0;
8186
8187                 for (i=0; i < 2*CTL_MAX_INITIATORS; i++)
8188                         if (lun->per_res[i].registered) {
8189                                 if (!persis_offset && i < CTL_MAX_INITIATORS)
8190                                         lun->pending_sense[i].ua_pending |=
8191                                                 CTL_UA_RES_PREEMPT;
8192                                 else if (persis_offset && i >= persis_offset)
8193                                         lun->pending_sense[i-persis_offset
8194                                             ].ua_pending |= CTL_UA_RES_PREEMPT;
8195
8196                                 memset(&lun->per_res[i].res_key,
8197                                        0, sizeof(struct scsi_per_res_key));
8198                                 lun->per_res[i].registered = 0;
8199                         }
8200                 lun->PRGeneration++;
8201                 mtx_unlock(&softc->ctl_lock);
8202                 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8203                 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8204                 persis_io.pr.pr_info.action = CTL_PR_CLEAR;
8205                 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8206                      sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) {
8207                         printf("CTL:Persis Out error returned from "
8208                                "ctl_ha_msg_send %d\n", isc_retval);
8209                 }
8210                 break;
8211
8212         case SPRO_PREEMPT: {
8213                 int nretval;
8214
8215                 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type,
8216                                           residx, ctsio, cdb, param);
8217                 if (nretval != 0)
8218                         return (CTL_RETVAL_COMPLETE);
8219                 break;
8220         }
8221         case SPRO_REG_MOVE:
8222         case SPRO_PRE_ABO:
8223         default:
8224                 free(ctsio->kern_data_ptr, M_CTL);
8225                 ctl_set_invalid_field(/*ctsio*/ ctsio,
8226                                       /*sks_valid*/ 1,
8227                                       /*command*/ 1,
8228                                       /*field*/ 1,
8229                                       /*bit_valid*/ 1,
8230                                       /*bit*/ 0);
8231                 ctl_done((union ctl_io *)ctsio);
8232                 return (CTL_RETVAL_COMPLETE);
8233                 break; /* NOTREACHED */
8234         }
8235
8236 done:
8237         free(ctsio->kern_data_ptr, M_CTL);
8238         ctl_set_success(ctsio);
8239         ctl_done((union ctl_io *)ctsio);
8240
8241         return (retval);
8242 }
8243
8244 /*
8245  * This routine is for handling a message from the other SC pertaining to
8246  * persistent reserve out. All the error checking will have been done
8247  * so only perorming the action need be done here to keep the two
8248  * in sync.
8249  */
8250 static void
8251 ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
8252 {
8253         struct ctl_lun *lun;
8254         struct ctl_softc *softc;
8255         int i;
8256         uint32_t targ_lun;
8257
8258         softc = control_softc;
8259
8260         mtx_lock(&softc->ctl_lock);
8261
8262         targ_lun = msg->hdr.nexus.targ_lun;
8263         if (msg->hdr.nexus.lun_map_fn != NULL)
8264                 targ_lun = msg->hdr.nexus.lun_map_fn(msg->hdr.nexus.lun_map_arg, targ_lun);
8265         lun = softc->ctl_luns[targ_lun];
8266         switch(msg->pr.pr_info.action) {
8267         case CTL_PR_REG_KEY:
8268                 if (!lun->per_res[msg->pr.pr_info.residx].registered) {
8269                         lun->per_res[msg->pr.pr_info.residx].registered = 1;
8270                         lun->pr_key_count++;
8271                 }
8272                 lun->PRGeneration++;
8273                 memcpy(&lun->per_res[msg->pr.pr_info.residx].res_key,
8274                        msg->pr.pr_info.sa_res_key,
8275                        sizeof(struct scsi_per_res_key));
8276                 break;
8277
8278         case CTL_PR_UNREG_KEY:
8279                 lun->per_res[msg->pr.pr_info.residx].registered = 0;
8280                 memset(&lun->per_res[msg->pr.pr_info.residx].res_key,
8281                        0, sizeof(struct scsi_per_res_key));
8282                 lun->pr_key_count--;
8283
8284                 /* XXX Need to see if the reservation has been released */
8285                 /* if so do we need to generate UA? */
8286                 if (msg->pr.pr_info.residx == lun->pr_res_idx) {
8287                         lun->flags &= ~CTL_LUN_PR_RESERVED;
8288                         lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8289
8290                         if ((lun->res_type == SPR_TYPE_WR_EX_RO
8291                           || lun->res_type == SPR_TYPE_EX_AC_RO)
8292                          && lun->pr_key_count) {
8293                                 /*
8294                                  * If the reservation is a registrants
8295                                  * only type we need to generate a UA
8296                                  * for other registered inits.  The
8297                                  * sense code should be RESERVATIONS
8298                                  * RELEASED
8299                                  */
8300
8301                                 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
8302                                         if (lun->per_res[i+
8303                                             persis_offset].registered == 0)
8304                                                 continue;
8305
8306                                         lun->pending_sense[i
8307                                                 ].ua_pending |=
8308                                                 CTL_UA_RES_RELEASE;
8309                                 }
8310                         }
8311                         lun->res_type = 0;
8312                 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
8313                         if (lun->pr_key_count==0) {
8314                                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8315                                 lun->res_type = 0;
8316                                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8317                         }
8318                 }
8319                 lun->PRGeneration++;
8320                 break;
8321
8322         case CTL_PR_RESERVE:
8323                 lun->flags |= CTL_LUN_PR_RESERVED;
8324                 lun->res_type = msg->pr.pr_info.res_type;
8325                 lun->pr_res_idx = msg->pr.pr_info.residx;
8326
8327                 break;
8328
8329         case CTL_PR_RELEASE:
8330                 /*
8331                  * if this isn't an exclusive access res generate UA for all
8332                  * other registrants.
8333                  */
8334                 if (lun->res_type != SPR_TYPE_EX_AC
8335                  && lun->res_type != SPR_TYPE_WR_EX) {
8336                         for (i = 0; i < CTL_MAX_INITIATORS; i++)
8337                                 if (lun->per_res[i+persis_offset].registered)
8338                                         lun->pending_sense[i].ua_pending |=
8339                                                 CTL_UA_RES_RELEASE;
8340                 }
8341
8342                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8343                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8344                 lun->res_type = 0;
8345                 break;
8346
8347         case CTL_PR_PREEMPT:
8348                 ctl_pro_preempt_other(lun, msg);
8349                 break;
8350         case CTL_PR_CLEAR:
8351                 lun->flags &= ~CTL_LUN_PR_RESERVED;
8352                 lun->res_type = 0;
8353                 lun->pr_key_count = 0;
8354                 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8355
8356                 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
8357                         if (lun->per_res[i].registered == 0)
8358                                 continue;
8359                         if (!persis_offset
8360                          && i < CTL_MAX_INITIATORS)
8361                                 lun->pending_sense[i].ua_pending |=
8362                                         CTL_UA_RES_PREEMPT;
8363                         else if (persis_offset
8364                               && i >= persis_offset)
8365                                 lun->pending_sense[i-persis_offset].ua_pending|=
8366                                         CTL_UA_RES_PREEMPT;
8367                         memset(&lun->per_res[i].res_key, 0,
8368                                sizeof(struct scsi_per_res_key));
8369                         lun->per_res[i].registered = 0;
8370                 }
8371                 lun->PRGeneration++;
8372                 break;
8373         }
8374
8375         mtx_unlock(&softc->ctl_lock);
8376 }
8377
8378 int
8379 ctl_read_write(struct ctl_scsiio *ctsio)
8380 {
8381         struct ctl_lun *lun;
8382         struct ctl_lba_len lbalen;
8383         uint64_t lba;
8384         uint32_t num_blocks;
8385         int reladdr, fua, dpo, ebp;
8386         int retval;
8387         int isread;
8388
8389         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8390
8391         CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0]));
8392
8393         reladdr = 0;
8394         fua = 0;
8395         dpo = 0;
8396         ebp = 0;
8397
8398         retval = CTL_RETVAL_COMPLETE;
8399
8400         isread = ctsio->cdb[0] == READ_6  || ctsio->cdb[0] == READ_10
8401               || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16;
8402         if (lun->flags & CTL_LUN_PR_RESERVED && isread) {
8403                 uint32_t residx;
8404
8405                 /*
8406                  * XXX KDM need a lock here.
8407                  */
8408                 residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
8409                 if ((lun->res_type == SPR_TYPE_EX_AC
8410                   && residx != lun->pr_res_idx)
8411                  || ((lun->res_type == SPR_TYPE_EX_AC_RO
8412                    || lun->res_type == SPR_TYPE_EX_AC_AR)
8413                   && !lun->per_res[residx].registered)) {
8414                         ctl_set_reservation_conflict(ctsio);
8415                         ctl_done((union ctl_io *)ctsio);
8416                         return (CTL_RETVAL_COMPLETE);
8417                 }
8418         }
8419
8420         switch (ctsio->cdb[0]) {
8421         case READ_6:
8422         case WRITE_6: {
8423                 struct scsi_rw_6 *cdb;
8424
8425                 cdb = (struct scsi_rw_6 *)ctsio->cdb;
8426
8427                 lba = scsi_3btoul(cdb->addr);
8428                 /* only 5 bits are valid in the most significant address byte */
8429                 lba &= 0x1fffff;
8430                 num_blocks = cdb->length;
8431                 /*
8432                  * This is correct according to SBC-2.
8433                  */
8434                 if (num_blocks == 0)
8435                         num_blocks = 256;
8436                 break;
8437         }
8438         case READ_10:
8439         case WRITE_10: {
8440                 struct scsi_rw_10 *cdb;
8441
8442                 cdb = (struct scsi_rw_10 *)ctsio->cdb;
8443
8444                 if (cdb->byte2 & SRW10_RELADDR)
8445                         reladdr = 1;
8446                 if (cdb->byte2 & SRW10_FUA)
8447                         fua = 1;
8448                 if (cdb->byte2 & SRW10_DPO)
8449                         dpo = 1;
8450
8451                 if ((cdb->opcode == WRITE_10)
8452                  && (cdb->byte2 & SRW10_EBP))
8453                         ebp = 1;
8454
8455                 lba = scsi_4btoul(cdb->addr);
8456                 num_blocks = scsi_2btoul(cdb->length);
8457                 break;
8458         }
8459         case WRITE_VERIFY_10: {
8460                 struct scsi_write_verify_10 *cdb;
8461
8462                 cdb = (struct scsi_write_verify_10 *)ctsio->cdb;
8463
8464                 /*
8465                  * XXX KDM we should do actual write verify support at some
8466                  * point.  This is obviously fake, we're just translating
8467                  * things to a write.  So we don't even bother checking the
8468                  * BYTCHK field, since we don't do any verification.  If
8469                  * the user asks for it, we'll just pretend we did it.
8470                  */
8471                 if (cdb->byte2 & SWV_DPO)
8472                         dpo = 1;
8473
8474                 lba = scsi_4btoul(cdb->addr);
8475                 num_blocks = scsi_2btoul(cdb->length);
8476                 break;
8477         }
8478         case READ_12:
8479         case WRITE_12: {
8480                 struct scsi_rw_12 *cdb;
8481
8482                 cdb = (struct scsi_rw_12 *)ctsio->cdb;
8483
8484                 if (cdb->byte2 & SRW12_RELADDR)
8485                         reladdr = 1;
8486                 if (cdb->byte2 & SRW12_FUA)
8487                         fua = 1;
8488                 if (cdb->byte2 & SRW12_DPO)
8489                         dpo = 1;
8490                 lba = scsi_4btoul(cdb->addr);
8491                 num_blocks = scsi_4btoul(cdb->length);
8492                 break;
8493         }
8494         case WRITE_VERIFY_12: {
8495                 struct scsi_write_verify_12 *cdb;
8496
8497                 cdb = (struct scsi_write_verify_12 *)ctsio->cdb;
8498
8499                 if (cdb->byte2 & SWV_DPO)
8500                         dpo = 1;
8501                 
8502                 lba = scsi_4btoul(cdb->addr);
8503                 num_blocks = scsi_4btoul(cdb->length);
8504
8505                 break;
8506         }
8507         case READ_16:
8508         case WRITE_16: {
8509                 struct scsi_rw_16 *cdb;
8510
8511                 cdb = (struct scsi_rw_16 *)ctsio->cdb;
8512
8513                 if (cdb->byte2 & SRW12_RELADDR)
8514                         reladdr = 1;
8515                 if (cdb->byte2 & SRW12_FUA)
8516                         fua = 1;
8517                 if (cdb->byte2 & SRW12_DPO)
8518                         dpo = 1;
8519
8520                 lba = scsi_8btou64(cdb->addr);
8521                 num_blocks = scsi_4btoul(cdb->length);
8522                 break;
8523         }
8524         case WRITE_VERIFY_16: {
8525                 struct scsi_write_verify_16 *cdb;
8526
8527                 cdb = (struct scsi_write_verify_16 *)ctsio->cdb;
8528
8529                 if (cdb->byte2 & SWV_DPO)
8530                         dpo = 1;
8531
8532                 lba = scsi_8btou64(cdb->addr);
8533                 num_blocks = scsi_4btoul(cdb->length);
8534                 break;
8535         }
8536         default:
8537                 /*
8538                  * We got a command we don't support.  This shouldn't
8539                  * happen, commands should be filtered out above us.
8540                  */
8541                 ctl_set_invalid_opcode(ctsio);
8542                 ctl_done((union ctl_io *)ctsio);
8543
8544                 return (CTL_RETVAL_COMPLETE);
8545                 break; /* NOTREACHED */
8546         }
8547
8548         /*
8549          * XXX KDM what do we do with the DPO and FUA bits?  FUA might be
8550          * interesting for us, but if RAIDCore is in write-back mode,
8551          * getting it to do write-through for a particular transaction may
8552          * not be possible.
8553          */
8554         /*
8555          * We don't support relative addressing.  That also requires
8556          * supporting linked commands, which we don't do.
8557          */
8558         if (reladdr != 0) {
8559                 ctl_set_invalid_field(ctsio,
8560                                       /*sks_valid*/ 1,
8561                                       /*command*/ 1,
8562                                       /*field*/ 1,
8563                                       /*bit_valid*/ 1,
8564                                       /*bit*/ 0);
8565                 ctl_done((union ctl_io *)ctsio);
8566                 return (CTL_RETVAL_COMPLETE);
8567         }
8568
8569         /*
8570          * The first check is to make sure we're in bounds, the second
8571          * check is to catch wrap-around problems.  If the lba + num blocks
8572          * is less than the lba, then we've wrapped around and the block
8573          * range is invalid anyway.
8574          */
8575         if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
8576          || ((lba + num_blocks) < lba)) {
8577                 ctl_set_lba_out_of_range(ctsio);
8578                 ctl_done((union ctl_io *)ctsio);
8579                 return (CTL_RETVAL_COMPLETE);
8580         }
8581
8582         /*
8583          * According to SBC-3, a transfer length of 0 is not an error.
8584          * Note that this cannot happen with WRITE(6) or READ(6), since 0
8585          * translates to 256 blocks for those commands.
8586          */
8587         if (num_blocks == 0) {
8588                 ctl_set_success(ctsio);
8589                 ctl_done((union ctl_io *)ctsio);
8590                 return (CTL_RETVAL_COMPLETE);
8591         }
8592
8593         lbalen.lba = lba;
8594         lbalen.len = num_blocks;
8595         memcpy(ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, &lbalen,
8596                sizeof(lbalen));
8597
8598         CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n"));
8599
8600         retval = lun->backend->data_submit((union ctl_io *)ctsio);
8601
8602         return (retval);
8603 }
8604
8605 int
8606 ctl_report_luns(struct ctl_scsiio *ctsio)
8607 {
8608         struct scsi_report_luns *cdb;
8609         struct scsi_report_luns_data *lun_data;
8610         struct ctl_lun *lun, *request_lun;
8611         int num_luns, retval;
8612         uint32_t alloc_len, lun_datalen;
8613         int num_filled, well_known;
8614         uint32_t initidx, targ_lun_id, lun_id;
8615
8616         retval = CTL_RETVAL_COMPLETE;
8617         well_known = 0;
8618
8619         cdb = (struct scsi_report_luns *)ctsio->cdb;
8620
8621         CTL_DEBUG_PRINT(("ctl_report_luns\n"));
8622
8623         mtx_lock(&control_softc->ctl_lock);
8624         num_luns = control_softc->num_luns;
8625         mtx_unlock(&control_softc->ctl_lock);
8626
8627         switch (cdb->select_report) {
8628         case RPL_REPORT_DEFAULT:
8629         case RPL_REPORT_ALL:
8630                 break;
8631         case RPL_REPORT_WELLKNOWN:
8632                 well_known = 1;
8633                 num_luns = 0;
8634                 break;
8635         default:
8636                 ctl_set_invalid_field(ctsio,
8637                                       /*sks_valid*/ 1,
8638                                       /*command*/ 1,
8639                                       /*field*/ 2,
8640                                       /*bit_valid*/ 0,
8641                                       /*bit*/ 0);
8642                 ctl_done((union ctl_io *)ctsio);
8643                 return (retval);
8644                 break; /* NOTREACHED */
8645         }
8646
8647         alloc_len = scsi_4btoul(cdb->length);
8648         /*
8649          * The initiator has to allocate at least 16 bytes for this request,
8650          * so he can at least get the header and the first LUN.  Otherwise
8651          * we reject the request (per SPC-3 rev 14, section 6.21).
8652          */
8653         if (alloc_len < (sizeof(struct scsi_report_luns_data) +
8654             sizeof(struct scsi_report_luns_lundata))) {
8655                 ctl_set_invalid_field(ctsio,
8656                                       /*sks_valid*/ 1,
8657                                       /*command*/ 1,
8658                                       /*field*/ 6,
8659                                       /*bit_valid*/ 0,
8660                                       /*bit*/ 0);
8661                 ctl_done((union ctl_io *)ctsio);
8662                 return (retval);
8663         }
8664
8665         request_lun = (struct ctl_lun *)
8666                 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8667
8668         lun_datalen = sizeof(*lun_data) +
8669                 (num_luns * sizeof(struct scsi_report_luns_lundata));
8670
8671         ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO);
8672         lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr;
8673         ctsio->kern_sg_entries = 0;
8674
8675         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
8676
8677         mtx_lock(&control_softc->ctl_lock);
8678         for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) {
8679                 lun_id = targ_lun_id;
8680                 if (ctsio->io_hdr.nexus.lun_map_fn != NULL)
8681                         lun_id = ctsio->io_hdr.nexus.lun_map_fn(ctsio->io_hdr.nexus.lun_map_arg, lun_id);
8682                 if (lun_id >= CTL_MAX_LUNS)
8683                         continue;
8684                 lun = control_softc->ctl_luns[lun_id];
8685                 if (lun == NULL)
8686                         continue;
8687
8688                 if (targ_lun_id <= 0xff) {
8689                         /*
8690                          * Peripheral addressing method, bus number 0.
8691                          */
8692                         lun_data->luns[num_filled].lundata[0] =
8693                                 RPL_LUNDATA_ATYP_PERIPH;
8694                         lun_data->luns[num_filled].lundata[1] = targ_lun_id;
8695                         num_filled++;
8696                 } else if (targ_lun_id <= 0x3fff) {
8697                         /*
8698                          * Flat addressing method.
8699                          */
8700                         lun_data->luns[num_filled].lundata[0] =
8701                                 RPL_LUNDATA_ATYP_FLAT |
8702                                 (targ_lun_id & RPL_LUNDATA_FLAT_LUN_MASK);
8703 #ifdef OLDCTLHEADERS
8704                                 (SRLD_ADDR_FLAT << SRLD_ADDR_SHIFT) |
8705                                 (targ_lun_id & SRLD_BUS_LUN_MASK);
8706 #endif
8707                         lun_data->luns[num_filled].lundata[1] =
8708 #ifdef OLDCTLHEADERS
8709                                 targ_lun_id >> SRLD_BUS_LUN_BITS;
8710 #endif
8711                                 targ_lun_id >> RPL_LUNDATA_FLAT_LUN_BITS;
8712                         num_filled++;
8713                 } else {
8714                         printf("ctl_report_luns: bogus LUN number %jd, "
8715                                "skipping\n", (intmax_t)targ_lun_id);
8716                 }
8717                 /*
8718                  * According to SPC-3, rev 14 section 6.21:
8719                  *
8720                  * "The execution of a REPORT LUNS command to any valid and
8721                  * installed logical unit shall clear the REPORTED LUNS DATA
8722                  * HAS CHANGED unit attention condition for all logical
8723                  * units of that target with respect to the requesting
8724                  * initiator. A valid and installed logical unit is one
8725                  * having a PERIPHERAL QUALIFIER of 000b in the standard
8726                  * INQUIRY data (see 6.4.2)."
8727                  *
8728                  * If request_lun is NULL, the LUN this report luns command
8729                  * was issued to is either disabled or doesn't exist. In that
8730                  * case, we shouldn't clear any pending lun change unit
8731                  * attention.
8732                  */
8733                 if (request_lun != NULL)
8734                         lun->pending_sense[initidx].ua_pending &=
8735                                 ~CTL_UA_LUN_CHANGE;
8736         }
8737         mtx_unlock(&control_softc->ctl_lock);
8738
8739         /*
8740          * It's quite possible that we've returned fewer LUNs than we allocated
8741          * space for.  Trim it.
8742          */
8743         lun_datalen = sizeof(*lun_data) +
8744                 (num_filled * sizeof(struct scsi_report_luns_lundata));
8745
8746         if (lun_datalen < alloc_len) {
8747                 ctsio->residual = alloc_len - lun_datalen;
8748                 ctsio->kern_data_len = lun_datalen;
8749                 ctsio->kern_total_len = lun_datalen;
8750         } else {
8751                 ctsio->residual = 0;
8752                 ctsio->kern_data_len = alloc_len;
8753                 ctsio->kern_total_len = alloc_len;
8754         }
8755         ctsio->kern_data_resid = 0;
8756         ctsio->kern_rel_offset = 0;
8757         ctsio->kern_sg_entries = 0;
8758
8759         /*
8760          * We set this to the actual data length, regardless of how much
8761          * space we actually have to return results.  If the user looks at
8762          * this value, he'll know whether or not he allocated enough space
8763          * and reissue the command if necessary.  We don't support well
8764          * known logical units, so if the user asks for that, return none.
8765          */
8766         scsi_ulto4b(lun_datalen - 8, lun_data->length);
8767
8768         /*
8769          * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy
8770          * this request.
8771          */
8772         ctsio->scsi_status = SCSI_STATUS_OK;
8773
8774         ctsio->be_move_done = ctl_config_move_done;
8775         ctl_datamove((union ctl_io *)ctsio);
8776
8777         return (retval);
8778 }
8779
8780 int
8781 ctl_request_sense(struct ctl_scsiio *ctsio)
8782 {
8783         struct scsi_request_sense *cdb;
8784         struct scsi_sense_data *sense_ptr;
8785         struct ctl_lun *lun;
8786         uint32_t initidx;
8787         int have_error;
8788         scsi_sense_data_type sense_format;
8789
8790         cdb = (struct scsi_request_sense *)ctsio->cdb;
8791
8792         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8793
8794         CTL_DEBUG_PRINT(("ctl_request_sense\n"));
8795
8796         /*
8797          * Determine which sense format the user wants.
8798          */
8799         if (cdb->byte2 & SRS_DESC)
8800                 sense_format = SSD_TYPE_DESC;
8801         else
8802                 sense_format = SSD_TYPE_FIXED;
8803
8804         ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK);
8805         sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr;
8806         ctsio->kern_sg_entries = 0;
8807
8808         /*
8809          * struct scsi_sense_data, which is currently set to 256 bytes, is
8810          * larger than the largest allowed value for the length field in the
8811          * REQUEST SENSE CDB, which is 252 bytes as of SPC-4.
8812          */
8813         ctsio->residual = 0;
8814         ctsio->kern_data_len = cdb->length;
8815         ctsio->kern_total_len = cdb->length;
8816
8817         ctsio->kern_data_resid = 0;
8818         ctsio->kern_rel_offset = 0;
8819         ctsio->kern_sg_entries = 0;
8820
8821         /*
8822          * If we don't have a LUN, we don't have any pending sense.
8823          */
8824         if (lun == NULL)
8825                 goto no_sense;
8826
8827         have_error = 0;
8828         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
8829         /*
8830          * Check for pending sense, and then for pending unit attentions.
8831          * Pending sense gets returned first, then pending unit attentions.
8832          */
8833         mtx_lock(&lun->ctl_softc->ctl_lock);
8834         if (ctl_is_set(lun->have_ca, initidx)) {
8835                 scsi_sense_data_type stored_format;
8836
8837                 /*
8838                  * Check to see which sense format was used for the stored
8839                  * sense data.
8840                  */
8841                 stored_format = scsi_sense_type(
8842                     &lun->pending_sense[initidx].sense);
8843
8844                 /*
8845                  * If the user requested a different sense format than the
8846                  * one we stored, then we need to convert it to the other
8847                  * format.  If we're going from descriptor to fixed format
8848                  * sense data, we may lose things in translation, depending
8849                  * on what options were used.
8850                  *
8851                  * If the stored format is SSD_TYPE_NONE (i.e. invalid),
8852                  * for some reason we'll just copy it out as-is.
8853                  */
8854                 if ((stored_format == SSD_TYPE_FIXED)
8855                  && (sense_format == SSD_TYPE_DESC))
8856                         ctl_sense_to_desc((struct scsi_sense_data_fixed *)
8857                             &lun->pending_sense[initidx].sense,
8858                             (struct scsi_sense_data_desc *)sense_ptr);
8859                 else if ((stored_format == SSD_TYPE_DESC)
8860                       && (sense_format == SSD_TYPE_FIXED))
8861                         ctl_sense_to_fixed((struct scsi_sense_data_desc *)
8862                             &lun->pending_sense[initidx].sense,
8863                             (struct scsi_sense_data_fixed *)sense_ptr);
8864                 else
8865                         memcpy(sense_ptr, &lun->pending_sense[initidx].sense,
8866                                ctl_min(sizeof(*sense_ptr),
8867                                sizeof(lun->pending_sense[initidx].sense)));
8868
8869                 ctl_clear_mask(lun->have_ca, initidx);
8870                 have_error = 1;
8871         } else if (lun->pending_sense[initidx].ua_pending != CTL_UA_NONE) {
8872                 ctl_ua_type ua_type;
8873
8874                 ua_type = ctl_build_ua(lun->pending_sense[initidx].ua_pending,
8875                                        sense_ptr, sense_format);
8876                 if (ua_type != CTL_UA_NONE) {
8877                         have_error = 1;
8878                         /* We're reporting this UA, so clear it */
8879                         lun->pending_sense[initidx].ua_pending &= ~ua_type;
8880                 }
8881         }
8882         mtx_unlock(&lun->ctl_softc->ctl_lock);
8883
8884         /*
8885          * We already have a pending error, return it.
8886          */
8887         if (have_error != 0) {
8888                 /*
8889                  * We report the SCSI status as OK, since the status of the
8890                  * request sense command itself is OK.
8891                  */
8892                 ctsio->scsi_status = SCSI_STATUS_OK;
8893
8894                 /*
8895                  * We report 0 for the sense length, because we aren't doing
8896                  * autosense in this case.  We're reporting sense as
8897                  * parameter data.
8898                  */
8899                 ctsio->sense_len = 0;
8900
8901                 ctsio->be_move_done = ctl_config_move_done;
8902                 ctl_datamove((union ctl_io *)ctsio);
8903
8904                 return (CTL_RETVAL_COMPLETE);
8905         }
8906
8907 no_sense:
8908
8909         /*
8910          * No sense information to report, so we report that everything is
8911          * okay.
8912          */
8913         ctl_set_sense_data(sense_ptr,
8914                            lun,
8915                            sense_format,
8916                            /*current_error*/ 1,
8917                            /*sense_key*/ SSD_KEY_NO_SENSE,
8918                            /*asc*/ 0x00,
8919                            /*ascq*/ 0x00,
8920                            SSD_ELEM_NONE);
8921
8922         ctsio->scsi_status = SCSI_STATUS_OK;
8923
8924         /*
8925          * We report 0 for the sense length, because we aren't doing
8926          * autosense in this case.  We're reporting sense as parameter data.
8927          */
8928         ctsio->sense_len = 0;
8929         ctsio->be_move_done = ctl_config_move_done;
8930         ctl_datamove((union ctl_io *)ctsio);
8931
8932         return (CTL_RETVAL_COMPLETE);
8933 }
8934
8935 int
8936 ctl_tur(struct ctl_scsiio *ctsio)
8937 {
8938         struct ctl_lun *lun;
8939
8940         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8941
8942         CTL_DEBUG_PRINT(("ctl_tur\n"));
8943
8944         if (lun == NULL)
8945                 return (-EINVAL);
8946
8947         ctsio->scsi_status = SCSI_STATUS_OK;
8948         ctsio->io_hdr.status = CTL_SUCCESS;
8949
8950         ctl_done((union ctl_io *)ctsio);
8951
8952         return (CTL_RETVAL_COMPLETE);
8953 }
8954
8955 #ifdef notyet
8956 static int
8957 ctl_cmddt_inquiry(struct ctl_scsiio *ctsio)
8958 {
8959
8960 }
8961 #endif
8962
8963 static int
8964 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len)
8965 {
8966         struct scsi_vpd_supported_pages *pages;
8967         int sup_page_size;
8968         struct ctl_lun *lun;
8969
8970         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8971
8972         sup_page_size = sizeof(struct scsi_vpd_supported_pages) +
8973                 SCSI_EVPD_NUM_SUPPORTED_PAGES;
8974         ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO);
8975         pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr;
8976         ctsio->kern_sg_entries = 0;
8977
8978         if (sup_page_size < alloc_len) {
8979                 ctsio->residual = alloc_len - sup_page_size;
8980                 ctsio->kern_data_len = sup_page_size;
8981                 ctsio->kern_total_len = sup_page_size;
8982         } else {
8983                 ctsio->residual = 0;
8984                 ctsio->kern_data_len = alloc_len;
8985                 ctsio->kern_total_len = alloc_len;
8986         }
8987         ctsio->kern_data_resid = 0;
8988         ctsio->kern_rel_offset = 0;
8989         ctsio->kern_sg_entries = 0;
8990
8991         /*
8992          * The control device is always connected.  The disk device, on the
8993          * other hand, may not be online all the time.  Need to change this
8994          * to figure out whether the disk device is actually online or not.
8995          */
8996         if (lun != NULL)
8997                 pages->device = (SID_QUAL_LU_CONNECTED << 5) |
8998                                 lun->be_lun->lun_type;
8999         else
9000                 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9001
9002         pages->length = SCSI_EVPD_NUM_SUPPORTED_PAGES;
9003         /* Supported VPD pages */
9004         pages->page_list[0] = SVPD_SUPPORTED_PAGES;
9005         /* Serial Number */
9006         pages->page_list[1] = SVPD_UNIT_SERIAL_NUMBER;
9007         /* Device Identification */
9008         pages->page_list[2] = SVPD_DEVICE_ID;
9009
9010         ctsio->scsi_status = SCSI_STATUS_OK;
9011
9012         ctsio->be_move_done = ctl_config_move_done;
9013         ctl_datamove((union ctl_io *)ctsio);
9014
9015         return (CTL_RETVAL_COMPLETE);
9016 }
9017
9018 static int
9019 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
9020 {
9021         struct scsi_vpd_unit_serial_number *sn_ptr;
9022         struct ctl_lun *lun;
9023 #ifndef CTL_USE_BACKEND_SN
9024         char tmpstr[32];
9025 #endif
9026
9027         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9028
9029         ctsio->kern_data_ptr = malloc(sizeof(*sn_ptr), M_CTL, M_WAITOK | M_ZERO);
9030         sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr;
9031         ctsio->kern_sg_entries = 0;
9032
9033         if (sizeof(*sn_ptr) < alloc_len) {
9034                 ctsio->residual = alloc_len - sizeof(*sn_ptr);
9035                 ctsio->kern_data_len = sizeof(*sn_ptr);
9036                 ctsio->kern_total_len = sizeof(*sn_ptr);
9037         } else {
9038                 ctsio->residual = 0;
9039                 ctsio->kern_data_len = alloc_len;
9040                 ctsio->kern_total_len = alloc_len;
9041         }
9042         ctsio->kern_data_resid = 0;
9043         ctsio->kern_rel_offset = 0;
9044         ctsio->kern_sg_entries = 0;
9045
9046         /*
9047          * The control device is always connected.  The disk device, on the
9048          * other hand, may not be online all the time.  Need to change this
9049          * to figure out whether the disk device is actually online or not.
9050          */
9051         if (lun != NULL)
9052                 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9053                                   lun->be_lun->lun_type;
9054         else
9055                 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9056
9057         sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER;
9058         sn_ptr->length = ctl_min(sizeof(*sn_ptr) - 4, CTL_SN_LEN);
9059 #ifdef CTL_USE_BACKEND_SN
9060         /*
9061          * If we don't have a LUN, we just leave the serial number as
9062          * all spaces.
9063          */
9064         memset(sn_ptr->serial_num, 0x20, sizeof(sn_ptr->serial_num));
9065         if (lun != NULL) {
9066                 strncpy((char *)sn_ptr->serial_num,
9067                         (char *)lun->be_lun->serial_num, CTL_SN_LEN);
9068         }
9069 #else
9070         /*
9071          * Note that we're using a non-unique serial number here,
9072          */
9073         snprintf(tmpstr, sizeof(tmpstr), "MYSERIALNUMIS000");
9074         memset(sn_ptr->serial_num, 0x20, sizeof(sn_ptr->serial_num));
9075         strncpy(sn_ptr->serial_num, tmpstr, ctl_min(CTL_SN_LEN,
9076                 ctl_min(sizeof(tmpstr), sizeof(*sn_ptr) - 4)));
9077 #endif
9078         ctsio->scsi_status = SCSI_STATUS_OK;
9079
9080         ctsio->be_move_done = ctl_config_move_done;
9081         ctl_datamove((union ctl_io *)ctsio);
9082
9083         return (CTL_RETVAL_COMPLETE);
9084 }
9085
9086
9087 static int
9088 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
9089 {
9090         struct scsi_vpd_device_id *devid_ptr;
9091         struct scsi_vpd_id_descriptor *desc, *desc1;
9092         struct scsi_vpd_id_descriptor *desc2, *desc3; /* for types 4h and 5h */
9093         struct scsi_vpd_id_t10 *t10id;
9094         struct ctl_softc *ctl_softc;
9095         struct ctl_lun *lun;
9096         struct ctl_frontend *fe;
9097 #ifndef CTL_USE_BACKEND_SN
9098         char tmpstr[32];
9099 #endif /* CTL_USE_BACKEND_SN */
9100         int devid_len;
9101
9102         ctl_softc = control_softc;
9103
9104         mtx_lock(&ctl_softc->ctl_lock);
9105         fe = ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
9106         mtx_unlock(&ctl_softc->ctl_lock);
9107
9108         if (fe->devid != NULL)
9109                 return ((fe->devid)(ctsio, alloc_len));
9110
9111         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9112
9113         devid_len = sizeof(struct scsi_vpd_device_id) +
9114                 sizeof(struct scsi_vpd_id_descriptor) +
9115                 sizeof(struct scsi_vpd_id_t10) + CTL_DEVID_LEN +
9116                 sizeof(struct scsi_vpd_id_descriptor) + CTL_WWPN_LEN +
9117                 sizeof(struct scsi_vpd_id_descriptor) +
9118                 sizeof(struct scsi_vpd_id_rel_trgt_port_id) +
9119                 sizeof(struct scsi_vpd_id_descriptor) +
9120                 sizeof(struct scsi_vpd_id_trgt_port_grp_id);
9121
9122         ctsio->kern_data_ptr = malloc(devid_len, M_CTL, M_WAITOK | M_ZERO);
9123         devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr;
9124         ctsio->kern_sg_entries = 0;
9125
9126         if (devid_len < alloc_len) {
9127                 ctsio->residual = alloc_len - devid_len;
9128                 ctsio->kern_data_len = devid_len;
9129                 ctsio->kern_total_len = devid_len;
9130         } else {
9131                 ctsio->residual = 0;
9132                 ctsio->kern_data_len = alloc_len;
9133                 ctsio->kern_total_len = alloc_len;
9134         }
9135         ctsio->kern_data_resid = 0;
9136         ctsio->kern_rel_offset = 0;
9137         ctsio->kern_sg_entries = 0;
9138
9139         desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list;
9140         t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0];
9141         desc1 = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
9142                 sizeof(struct scsi_vpd_id_t10) + CTL_DEVID_LEN);
9143         desc2 = (struct scsi_vpd_id_descriptor *)(&desc1->identifier[0] +
9144                   CTL_WWPN_LEN);
9145         desc3 = (struct scsi_vpd_id_descriptor *)(&desc2->identifier[0] +
9146                  sizeof(struct scsi_vpd_id_rel_trgt_port_id));
9147
9148         /*
9149          * The control device is always connected.  The disk device, on the
9150          * other hand, may not be online all the time.
9151          */
9152         if (lun != NULL)
9153                 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9154                                      lun->be_lun->lun_type;
9155         else
9156                 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9157
9158         devid_ptr->page_code = SVPD_DEVICE_ID;
9159
9160         scsi_ulto2b(devid_len - 4, devid_ptr->length);
9161
9162         mtx_lock(&ctl_softc->ctl_lock);
9163
9164         /*
9165          * For Fibre channel,
9166          */
9167         if (fe->port_type == CTL_PORT_FC)
9168         {
9169                 desc->proto_codeset = (SCSI_PROTO_FC << 4) |
9170                                       SVPD_ID_CODESET_ASCII;
9171                 desc1->proto_codeset = (SCSI_PROTO_FC << 4) |
9172                               SVPD_ID_CODESET_BINARY;
9173         }
9174         else
9175         {
9176                 desc->proto_codeset = (SCSI_PROTO_SPI << 4) |
9177                                       SVPD_ID_CODESET_ASCII;
9178                 desc1->proto_codeset = (SCSI_PROTO_SPI << 4) |
9179                               SVPD_ID_CODESET_BINARY;
9180         }
9181         desc2->proto_codeset = desc3->proto_codeset = desc1->proto_codeset;
9182         mtx_unlock(&ctl_softc->ctl_lock);
9183
9184         /*
9185          * We're using a LUN association here.  i.e., this device ID is a
9186          * per-LUN identifier.
9187          */
9188         desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10;
9189         desc->length = sizeof(*t10id) + CTL_DEVID_LEN;
9190         strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor));
9191
9192         /*
9193          * desc1 is for the WWPN which is a port asscociation.
9194          */
9195         desc1->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_NAA;
9196         desc1->length = CTL_WWPN_LEN;
9197         /* XXX Call Reggie's get_WWNN func here then add port # to the end */
9198         /* For testing just create the WWPN */
9199 #if 0
9200         ddb_GetWWNN((char *)desc1->identifier);
9201
9202         /* NOTE: if the port is 0 or 8 we don't want to subtract 1 */
9203         /* This is so Copancontrol will return something sane */
9204         if (ctsio->io_hdr.nexus.targ_port!=0 &&
9205             ctsio->io_hdr.nexus.targ_port!=8)
9206                 desc1->identifier[7] += ctsio->io_hdr.nexus.targ_port-1;
9207         else
9208                 desc1->identifier[7] += ctsio->io_hdr.nexus.targ_port;
9209 #endif
9210
9211         be64enc(desc1->identifier, fe->wwpn);
9212
9213         /*
9214          * desc2 is for the Relative Target Port(type 4h) identifier
9215          */
9216         desc2->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT
9217                          | SVPD_ID_TYPE_RELTARG;
9218         desc2->length = 4;
9219 //#if 0
9220         /* NOTE: if the port is 0 or 8 we don't want to subtract 1 */
9221         /* This is so Copancontrol will return something sane */
9222         if (ctsio->io_hdr.nexus.targ_port!=0 &&
9223             ctsio->io_hdr.nexus.targ_port!=8)
9224                 desc2->identifier[3] = ctsio->io_hdr.nexus.targ_port - 1;
9225         else
9226                 desc2->identifier[3] = ctsio->io_hdr.nexus.targ_port;
9227 //#endif
9228
9229         /*
9230          * desc3 is for the Target Port Group(type 5h) identifier
9231          */
9232         desc3->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT
9233                          | SVPD_ID_TYPE_TPORTGRP;
9234         desc3->length = 4;
9235         if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS || ctl_is_single)
9236                 desc3->identifier[3] = 1;
9237         else
9238                 desc3->identifier[3] = 2;
9239
9240 #ifdef CTL_USE_BACKEND_SN
9241         /*
9242          * If we've actually got a backend, copy the device id from the
9243          * per-LUN data.  Otherwise, set it to all spaces.
9244          */
9245         if (lun != NULL) {
9246                 /*
9247                  * Copy the backend's LUN ID.
9248                  */
9249                 strncpy((char *)t10id->vendor_spec_id,
9250                         (char *)lun->be_lun->device_id, CTL_DEVID_LEN);
9251         } else {
9252                 /*
9253                  * No backend, set this to spaces.
9254                  */
9255                 memset(t10id->vendor_spec_id, 0x20, CTL_DEVID_LEN);
9256         }
9257 #else
9258         snprintf(tmpstr, sizeof(tmpstr), "MYDEVICEIDIS%4d",
9259                  (lun != NULL) ?  (int)lun->lun : 0);
9260         strncpy(t10id->vendor_spec_id, tmpstr, ctl_min(CTL_DEVID_LEN,
9261                 sizeof(tmpstr)));
9262 #endif
9263
9264         ctsio->scsi_status = SCSI_STATUS_OK;
9265
9266         ctsio->be_move_done = ctl_config_move_done;
9267         ctl_datamove((union ctl_io *)ctsio);
9268
9269         return (CTL_RETVAL_COMPLETE);
9270 }
9271
9272 static int
9273 ctl_inquiry_evpd(struct ctl_scsiio *ctsio)
9274 {
9275         struct scsi_inquiry *cdb;
9276         int alloc_len, retval;
9277
9278         cdb = (struct scsi_inquiry *)ctsio->cdb;
9279
9280         retval = CTL_RETVAL_COMPLETE;
9281
9282         alloc_len = scsi_2btoul(cdb->length);
9283
9284         switch (cdb->page_code) {
9285         case SVPD_SUPPORTED_PAGES:
9286                 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len);
9287                 break;
9288         case SVPD_UNIT_SERIAL_NUMBER:
9289                 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len);
9290                 break;
9291         case SVPD_DEVICE_ID:
9292                 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len);
9293                 break;
9294         default:
9295                 ctl_set_invalid_field(ctsio,
9296                                       /*sks_valid*/ 1,
9297                                       /*command*/ 1,
9298                                       /*field*/ 2,
9299                                       /*bit_valid*/ 0,
9300                                       /*bit*/ 0);
9301                 ctl_done((union ctl_io *)ctsio);
9302                 retval = CTL_RETVAL_COMPLETE;
9303                 break;
9304         }
9305
9306         return (retval);
9307 }
9308
9309 static int
9310 ctl_inquiry_std(struct ctl_scsiio *ctsio)
9311 {
9312         struct scsi_inquiry_data *inq_ptr;
9313         struct scsi_inquiry *cdb;
9314         struct ctl_softc *ctl_softc;
9315         struct ctl_lun *lun;
9316         uint32_t alloc_len;
9317         int is_fc;
9318
9319         ctl_softc = control_softc;
9320
9321         /*
9322          * Figure out whether we're talking to a Fibre Channel port or not.
9323          * We treat the ioctl front end, and any SCSI adapters, as packetized
9324          * SCSI front ends.
9325          */
9326         mtx_lock(&ctl_softc->ctl_lock);
9327         if (ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type !=
9328             CTL_PORT_FC)
9329                 is_fc = 0;
9330         else
9331                 is_fc = 1;
9332         mtx_unlock(&ctl_softc->ctl_lock);
9333
9334         lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9335         cdb = (struct scsi_inquiry *)ctsio->cdb;
9336         alloc_len = scsi_2btoul(cdb->length);
9337
9338         /*
9339          * We malloc the full inquiry data size here and fill it
9340          * in.  If the user only asks for less, we'll give him
9341          * that much.
9342          */
9343         ctsio->kern_data_ptr = malloc(sizeof(*inq_ptr), M_CTL, M_WAITOK | M_ZERO);
9344         inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr;
9345         ctsio->kern_sg_entries = 0;
9346         ctsio->kern_data_resid = 0;
9347         ctsio->kern_rel_offset = 0;
9348
9349         if (sizeof(*inq_ptr) < alloc_len) {
9350                 ctsio->residual = alloc_len - sizeof(*inq_ptr);
9351                 ctsio->kern_data_len = sizeof(*inq_ptr);
9352                 ctsio->kern_total_len = sizeof(*inq_ptr);
9353         } else {
9354                 ctsio->residual = 0;
9355                 ctsio->kern_data_len = alloc_len;
9356                 ctsio->kern_total_len = alloc_len;
9357         }
9358
9359         /*
9360          * If we have a LUN configured, report it as connected.  Otherwise,
9361          * report that it is offline or no device is supported, depending 
9362          * on the value of inquiry_pq_no_lun.
9363          *
9364          * According to the spec (SPC-4 r34), the peripheral qualifier
9365          * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario:
9366          *
9367          * "A peripheral device having the specified peripheral device type 
9368          * is not connected to this logical unit. However, the device
9369          * server is capable of supporting the specified peripheral device
9370          * type on this logical unit."
9371          *
9372          * According to the same spec, the peripheral qualifier
9373          * SID_QUAL_BAD_LU (011b) is used in this scenario:
9374          *
9375          * "The device server is not capable of supporting a peripheral
9376          * device on this logical unit. For this peripheral qualifier the
9377          * peripheral device type shall be set to 1Fh. All other peripheral
9378          * device type values are reserved for this peripheral qualifier."
9379          *
9380          * Given the text, it would seem that we probably want to report that
9381          * the LUN is offline here.  There is no LUN connected, but we can
9382          * support a LUN at the given LUN number.
9383          *
9384          * In the real world, though, it sounds like things are a little
9385          * different:
9386          *
9387          * - Linux, when presented with a LUN with the offline peripheral
9388          *   qualifier, will create an sg driver instance for it.  So when
9389          *   you attach it to CTL, you wind up with a ton of sg driver
9390          *   instances.  (One for every LUN that Linux bothered to probe.)
9391          *   Linux does this despite the fact that it issues a REPORT LUNs
9392          *   to LUN 0 to get the inventory of supported LUNs.
9393          *
9394          * - There is other anecdotal evidence (from Emulex folks) about
9395          *   arrays that use the offline peripheral qualifier for LUNs that
9396          *   are on the "passive" path in an active/passive array.
9397          *
9398          * So the solution is provide a hopefully reasonable default
9399          * (return bad/no LUN) and allow the user to change the behavior
9400          * with a tunable/sysctl variable.
9401          */
9402         if (lun != NULL)
9403                 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9404                                   lun->be_lun->lun_type;
9405         else if (ctl_softc->inquiry_pq_no_lun == 0)
9406                 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9407         else
9408                 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE;
9409
9410         /* RMB in byte 2 is 0 */
9411         inq_ptr->version = SCSI_REV_SPC3;
9412
9413         /*
9414          * According to SAM-3, even if a device only supports a single
9415          * level of LUN addressing, it should still set the HISUP bit:
9416          *
9417          * 4.9.1 Logical unit numbers overview
9418          *
9419          * All logical unit number formats described in this standard are
9420          * hierarchical in structure even when only a single level in that
9421          * hierarchy is used. The HISUP bit shall be set to one in the
9422          * standard INQUIRY data (see SPC-2) when any logical unit number
9423          * format described in this standard is used.  Non-hierarchical
9424          * formats are outside the scope of this standard.
9425          *
9426          * Therefore we set the HiSup bit here.
9427          *
9428          * The reponse format is 2, per SPC-3.
9429          */
9430         inq_ptr->response_format = SID_HiSup | 2;
9431
9432         inq_ptr->additional_length = sizeof(*inq_ptr) - 4;
9433         CTL_DEBUG_PRINT(("additional_length = %d\n",
9434                          inq_ptr->additional_length));
9435
9436         inq_ptr->spc3_flags = SPC3_SID_TPGS_IMPLICIT;
9437         /* 16 bit addressing */
9438         if (is_fc == 0)
9439                 inq_ptr->spc2_flags = SPC2_SID_ADDR16;
9440         /* XXX set the SID_MultiP bit here if we're actually going to
9441            respond on multiple ports */
9442         inq_ptr->spc2_flags |= SPC2_SID_MultiP;
9443
9444         /* 16 bit data bus, synchronous transfers */
9445         /* XXX these flags don't apply for FC */
9446         if (is_fc == 0)
9447                 inq_ptr->flags = SID_WBus16 | SID_Sync;
9448         /*
9449          * XXX KDM do we want to support tagged queueing on the control
9450          * device at all?
9451          */
9452         if ((lun == NULL)
9453          || (lun->be_lun->lun_type != T_PROCESSOR))
9454                 inq_ptr->flags |= SID_CmdQue;
9455         /*
9456          * Per SPC-3, unused bytes in ASCII strings are filled with spaces.
9457          * We have 8 bytes for the vendor name, and 16 bytes for the device
9458          * name and 4 bytes for the revision.
9459          */
9460         strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor));
9461         if (lun == NULL) {
9462                 strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT);
9463         } else {
9464                 switch (lun->be_lun->lun_type) {
9465                 case T_DIRECT:
9466                         strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT);
9467                         break;
9468                 case T_PROCESSOR:
9469                         strcpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT);
9470                         break;
9471                 default:
9472                         strcpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT);
9473                         break;
9474                 }
9475         }
9476
9477         /*
9478          * XXX make this a macro somewhere so it automatically gets
9479          * incremented when we make changes.
9480          */
9481         strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision));
9482
9483         /*
9484          * For parallel SCSI, we support double transition and single
9485          * transition clocking.  We also support QAS (Quick Arbitration
9486          * and Selection) and Information Unit transfers on both the
9487          * control and array devices.
9488          */
9489         if (is_fc == 0)
9490                 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS |
9491                                     SID_SPI_IUS;
9492
9493         /* SAM-3 */
9494         scsi_ulto2b(0x0060, inq_ptr->version1);
9495         /* SPC-3 (no version claimed) XXX should we claim a version? */
9496         scsi_ulto2b(0x0300, inq_ptr->version2);
9497         if (is_fc) {
9498                 /* FCP-2 ANSI INCITS.350:2003 */
9499                 scsi_ulto2b(0x0917, inq_ptr->version3);
9500         } else {
9501                 /* SPI-4 ANSI INCITS.362:200x */
9502                 scsi_ulto2b(0x0B56, inq_ptr->version3);
9503         }
9504
9505         if (lun == NULL) {
9506                 /* SBC-2 (no version claimed) XXX should we claim a version? */
9507                 scsi_ulto2b(0x0320, inq_ptr->version4);
9508         } else {
9509                 switch (lun->be_lun->lun_type) {
9510                 case T_DIRECT:
9511                         /*
9512                          * SBC-2 (no version claimed) XXX should we claim a
9513                          * version?
9514                          */
9515                         scsi_ulto2b(0x0320, inq_ptr->version4);
9516                         break;
9517                 case T_PROCESSOR:
9518                 default:
9519                         break;
9520                 }
9521         }
9522
9523         ctsio->scsi_status = SCSI_STATUS_OK;
9524         if (ctsio->kern_data_len > 0) {
9525                 ctsio->be_move_done = ctl_config_move_done;
9526                 ctl_datamove((union ctl_io *)ctsio);
9527         } else {
9528                 ctsio->io_hdr.status = CTL_SUCCESS;
9529                 ctl_done((union ctl_io *)ctsio);
9530         }
9531
9532         return (CTL_RETVAL_COMPLETE);
9533 }
9534
9535 int
9536 ctl_inquiry(struct ctl_scsiio *ctsio)
9537 {
9538         struct scsi_inquiry *cdb;
9539         int retval;
9540
9541         cdb = (struct scsi_inquiry *)ctsio->cdb;
9542
9543         retval = 0;
9544
9545         CTL_DEBUG_PRINT(("ctl_inquiry\n"));
9546
9547         /*
9548          * Right now, we don't support the CmdDt inquiry information.
9549          * This would be nice to support in the future.  When we do
9550          * support it, we should change this test so that it checks to make
9551          * sure SI_EVPD and SI_CMDDT aren't both set at the same time.
9552          */
9553 #ifdef notyet
9554         if (((cdb->byte2 & SI_EVPD)
9555          && (cdb->byte2 & SI_CMDDT)))
9556 #endif
9557         if (cdb->byte2 & SI_CMDDT) {
9558                 /*
9559                  * Point to the SI_CMDDT bit.  We might change this
9560                  * when we support SI_CMDDT, but since both bits would be
9561                  * "wrong", this should probably just stay as-is then.
9562                  */
9563                 ctl_set_invalid_field(ctsio,
9564                                       /*sks_valid*/ 1,
9565                                       /*command*/ 1,
9566                                       /*field*/ 1,
9567                                       /*bit_valid*/ 1,
9568                                       /*bit*/ 1);
9569                 ctl_done((union ctl_io *)ctsio);
9570                 return (CTL_RETVAL_COMPLETE);
9571         }
9572         if (cdb->byte2 & SI_EVPD)
9573                 retval = ctl_inquiry_evpd(ctsio);
9574 #ifdef notyet
9575         else if (cdb->byte2 & SI_CMDDT)
9576                 retval = ctl_inquiry_cmddt(ctsio);
9577 #endif
9578         else
9579                 retval = ctl_inquiry_std(ctsio);
9580
9581         return (retval);
9582 }
9583
9584 /*
9585  * For known CDB types, parse the LBA and length.
9586  */
9587 static int
9588 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len)
9589 {
9590         if (io->io_hdr.io_type != CTL_IO_SCSI)
9591                 return (1);
9592
9593         switch (io->scsiio.cdb[0]) {
9594         case READ_6:
9595         case WRITE_6: {
9596                 struct scsi_rw_6 *cdb;
9597
9598                 cdb = (struct scsi_rw_6 *)io->scsiio.cdb;
9599
9600                 *lba = scsi_3btoul(cdb->addr);
9601                 /* only 5 bits are valid in the most significant address byte */
9602                 *lba &= 0x1fffff;
9603                 *len = cdb->length;
9604                 break;
9605         }
9606         case READ_10:
9607         case WRITE_10: {
9608                 struct scsi_rw_10 *cdb;
9609
9610                 cdb = (struct scsi_rw_10 *)io->scsiio.cdb;
9611
9612                 *lba = scsi_4btoul(cdb->addr);
9613                 *len = scsi_2btoul(cdb->length);
9614                 break;
9615         }
9616         case WRITE_VERIFY_10: {
9617                 struct scsi_write_verify_10 *cdb;
9618
9619                 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb;
9620
9621                 *lba = scsi_4btoul(cdb->addr);
9622                 *len = scsi_2btoul(cdb->length);
9623                 break;
9624         }
9625         case READ_12:
9626         case WRITE_12: {
9627                 struct scsi_rw_12 *cdb;
9628
9629                 cdb = (struct scsi_rw_12 *)io->scsiio.cdb;
9630
9631                 *lba = scsi_4btoul(cdb->addr);
9632                 *len = scsi_4btoul(cdb->length);
9633                 break;
9634         }
9635         case WRITE_VERIFY_12: {
9636                 struct scsi_write_verify_12 *cdb;
9637
9638                 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb;
9639
9640                 *lba = scsi_4btoul(cdb->addr);
9641                 *len = scsi_4btoul(cdb->length);
9642                 break;
9643         }
9644         case READ_16:
9645         case WRITE_16: {
9646                 struct scsi_rw_16 *cdb;
9647
9648                 cdb = (struct scsi_rw_16 *)io->scsiio.cdb;
9649
9650                 *lba = scsi_8btou64(cdb->addr);
9651                 *len = scsi_4btoul(cdb->length);
9652                 break;
9653         }
9654         case WRITE_VERIFY_16: {
9655                 struct scsi_write_verify_16 *cdb;
9656
9657                 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb;
9658
9659                 
9660                 *lba = scsi_8btou64(cdb->addr);
9661                 *len = scsi_4btoul(cdb->length);
9662                 break;
9663         }
9664         default:
9665                 return (1);
9666                 break; /* NOTREACHED */
9667         }
9668
9669         return (0);
9670 }
9671
9672 static ctl_action
9673 ctl_extent_check_lba(uint64_t lba1, uint32_t len1, uint64_t lba2, uint32_t len2)
9674 {
9675         uint64_t endlba1, endlba2;
9676
9677         endlba1 = lba1 + len1 - 1;
9678         endlba2 = lba2 + len2 - 1;
9679
9680         if ((endlba1 < lba2)
9681          || (endlba2 < lba1))
9682                 return (CTL_ACTION_PASS);
9683         else
9684                 return (CTL_ACTION_BLOCK);
9685 }
9686
9687 static ctl_action
9688 ctl_extent_check(union ctl_io *io1, union ctl_io *io2)
9689 {
9690         uint64_t lba1, lba2;
9691         uint32_t len1, len2;
9692         int retval;
9693
9694         retval = ctl_get_lba_len(io1, &lba1, &len1);
9695         if (retval != 0)
9696                 return (CTL_ACTION_ERROR);
9697
9698         retval = ctl_get_lba_len(io2, &lba2, &len2);
9699         if (retval != 0)
9700                 return (CTL_ACTION_ERROR);
9701
9702         return (ctl_extent_check_lba(lba1, len1, lba2, len2));
9703 }
9704
9705 static ctl_action
9706 ctl_check_for_blockage(union ctl_io *pending_io, union ctl_io *ooa_io)
9707 {
9708         struct ctl_cmd_entry *pending_entry, *ooa_entry;
9709         ctl_serialize_action *serialize_row;
9710
9711         /*
9712          * The initiator attempted multiple untagged commands at the same
9713          * time.  Can't do that.
9714          */
9715         if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
9716          && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
9717          && ((pending_io->io_hdr.nexus.targ_port ==
9718               ooa_io->io_hdr.nexus.targ_port)
9719           && (pending_io->io_hdr.nexus.initid.id ==
9720               ooa_io->io_hdr.nexus.initid.id))
9721          && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0))
9722                 return (CTL_ACTION_OVERLAP);
9723
9724         /*
9725          * The initiator attempted to send multiple tagged commands with
9726          * the same ID.  (It's fine if different initiators have the same
9727          * tag ID.)
9728          *
9729          * Even if all of those conditions are true, we don't kill the I/O
9730          * if the command ahead of us has been aborted.  We won't end up
9731          * sending it to the FETD, and it's perfectly legal to resend a
9732          * command with the same tag number as long as the previous
9733          * instance of this tag number has been aborted somehow.
9734          */
9735         if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
9736          && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
9737          && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num)
9738          && ((pending_io->io_hdr.nexus.targ_port ==
9739               ooa_io->io_hdr.nexus.targ_port)
9740           && (pending_io->io_hdr.nexus.initid.id ==
9741               ooa_io->io_hdr.nexus.initid.id))
9742          && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0))
9743                 return (CTL_ACTION_OVERLAP_TAG);
9744
9745         /*
9746          * If we get a head of queue tag, SAM-3 says that we should
9747          * immediately execute it.
9748          *
9749          * What happens if this command would normally block for some other
9750          * reason?  e.g. a request sense with a head of queue tag
9751          * immediately after a write.  Normally that would block, but this
9752          * will result in its getting executed immediately...
9753          *
9754          * We currently return "pass" instead of "skip", so we'll end up
9755          * going through the rest of the queue to check for overlapped tags.
9756          *
9757          * XXX KDM check for other types of blockage first??
9758          */
9759         if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)
9760                 return (CTL_ACTION_PASS);
9761
9762         /*
9763          * Ordered tags have to block until all items ahead of them
9764          * have completed.  If we get called with an ordered tag, we always
9765          * block, if something else is ahead of us in the queue.
9766          */
9767         if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED)
9768                 return (CTL_ACTION_BLOCK);
9769
9770         /*
9771          * Simple tags get blocked until all head of queue and ordered tags
9772          * ahead of them have completed.  I'm lumping untagged commands in
9773          * with simple tags here.  XXX KDM is that the right thing to do?
9774          */
9775         if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
9776           || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE))
9777          && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)
9778           || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED)))
9779                 return (CTL_ACTION_BLOCK);
9780
9781         pending_entry = &ctl_cmd_table[pending_io->scsiio.cdb[0]];
9782         ooa_entry = &ctl_cmd_table[ooa_io->scsiio.cdb[0]];
9783
9784         serialize_row = ctl_serialize_table[ooa_entry->seridx];
9785
9786         switch (serialize_row[pending_entry->seridx]) {
9787         case CTL_SER_BLOCK:
9788                 return (CTL_ACTION_BLOCK);
9789                 break; /* NOTREACHED */
9790         case CTL_SER_EXTENT:
9791                 return (ctl_extent_check(pending_io, ooa_io));
9792                 break; /* NOTREACHED */
9793         case CTL_SER_PASS:
9794                 return (CTL_ACTION_PASS);
9795                 break; /* NOTREACHED */
9796         case CTL_SER_SKIP:
9797                 return (CTL_ACTION_SKIP);
9798                 break;
9799         default:
9800                 panic("invalid serialization value %d",
9801                       serialize_row[pending_entry->seridx]);
9802                 break; /* NOTREACHED */
9803         }
9804
9805         return (CTL_ACTION_ERROR);
9806 }
9807
9808 /*
9809  * Check for blockage or overlaps against the OOA (Order Of Arrival) queue.
9810  * Assumptions:
9811  * - pending_io is generally either incoming, or on the blocked queue
9812  * - starting I/O is the I/O we want to start the check with.
9813  */
9814 static ctl_action
9815 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
9816               union ctl_io *starting_io)
9817 {
9818         union ctl_io *ooa_io;
9819         ctl_action action;
9820
9821         mtx_assert(&control_softc->ctl_lock, MA_OWNED);
9822
9823         /*
9824          * Run back along the OOA queue, starting with the current
9825          * blocked I/O and going through every I/O before it on the
9826          * queue.  If starting_io is NULL, we'll just end up returning
9827          * CTL_ACTION_PASS.
9828          */
9829         for (ooa_io = starting_io; ooa_io != NULL;
9830              ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq,
9831              ooa_links)){
9832
9833                 /*
9834                  * This routine just checks to see whether
9835                  * cur_blocked is blocked by ooa_io, which is ahead
9836                  * of it in the queue.  It doesn't queue/dequeue
9837                  * cur_blocked.
9838                  */
9839                 action = ctl_check_for_blockage(pending_io, ooa_io);
9840                 switch (action) {
9841                 case CTL_ACTION_BLOCK:
9842                 case CTL_ACTION_OVERLAP:
9843                 case CTL_ACTION_OVERLAP_TAG:
9844                 case CTL_ACTION_SKIP:
9845                 case CTL_ACTION_ERROR:
9846                         return (action);
9847                         break; /* NOTREACHED */
9848                 case CTL_ACTION_PASS:
9849                         break;
9850                 default:
9851                         panic("invalid action %d", action);
9852                         break;  /* NOTREACHED */
9853                 }
9854         }
9855
9856         return (CTL_ACTION_PASS);
9857 }
9858
9859 /*
9860  * Assumptions:
9861  * - An I/O has just completed, and has been removed from the per-LUN OOA
9862  *   queue, so some items on the blocked queue may now be unblocked.
9863  */
9864 static int
9865 ctl_check_blocked(struct ctl_lun *lun)
9866 {
9867         union ctl_io *cur_blocked, *next_blocked;
9868
9869         mtx_assert(&control_softc->ctl_lock, MA_OWNED);
9870
9871         /*
9872          * Run forward from the head of the blocked queue, checking each
9873          * entry against the I/Os prior to it on the OOA queue to see if
9874          * there is still any blockage.
9875          *
9876          * We cannot use the TAILQ_FOREACH() macro, because it can't deal
9877          * with our removing a variable on it while it is traversing the
9878          * list.
9879          */
9880         for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue);
9881              cur_blocked != NULL; cur_blocked = next_blocked) {
9882                 union ctl_io *prev_ooa;
9883                 ctl_action action;
9884
9885                 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr,
9886                                                           blocked_links);
9887
9888                 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr,
9889                                                       ctl_ooaq, ooa_links);
9890
9891                 /*
9892                  * If cur_blocked happens to be the first item in the OOA
9893                  * queue now, prev_ooa will be NULL, and the action
9894                  * returned will just be CTL_ACTION_PASS.
9895                  */
9896                 action = ctl_check_ooa(lun, cur_blocked, prev_ooa);
9897
9898                 switch (action) {
9899                 case CTL_ACTION_BLOCK:
9900                         /* Nothing to do here, still blocked */
9901                         break;
9902                 case CTL_ACTION_OVERLAP:
9903                 case CTL_ACTION_OVERLAP_TAG:
9904                         /*
9905                          * This shouldn't happen!  In theory we've already
9906                          * checked this command for overlap...
9907                          */
9908                         break;
9909                 case CTL_ACTION_PASS:
9910                 case CTL_ACTION_SKIP: {
9911                         struct ctl_softc *softc;
9912                         struct ctl_cmd_entry *entry;
9913                         uint32_t initidx;
9914                         uint8_t opcode;
9915                         int isc_retval;
9916
9917                         /*
9918                          * The skip case shouldn't happen, this transaction
9919                          * should have never made it onto the blocked queue.
9920                          */
9921                         /*
9922                          * This I/O is no longer blocked, we can remove it
9923                          * from the blocked queue.  Since this is a TAILQ
9924                          * (doubly linked list), we can do O(1) removals
9925                          * from any place on the list.
9926                          */
9927                         TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr,
9928                                      blocked_links);
9929                         cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
9930
9931                         if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){
9932                                 /*
9933                                  * Need to send IO back to original side to
9934                                  * run
9935                                  */
9936                                 union ctl_ha_msg msg_info;
9937
9938                                 msg_info.hdr.original_sc =
9939                                         cur_blocked->io_hdr.original_sc;
9940                                 msg_info.hdr.serializing_sc = cur_blocked;
9941                                 msg_info.hdr.msg_type = CTL_MSG_R2R;
9942                                 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
9943                                      &msg_info, sizeof(msg_info), 0)) >
9944                                      CTL_HA_STATUS_SUCCESS) {
9945                                         printf("CTL:Check Blocked error from "
9946                                                "ctl_ha_msg_send %d\n",
9947                                                isc_retval);
9948                                 }
9949                                 break;
9950                         }
9951                         opcode = cur_blocked->scsiio.cdb[0];
9952                         entry = &ctl_cmd_table[opcode];
9953                         softc = control_softc;
9954
9955                         initidx = ctl_get_initindex(&cur_blocked->io_hdr.nexus);
9956
9957                         /*
9958                          * Check this I/O for LUN state changes that may
9959                          * have happened while this command was blocked.
9960                          * The LUN state may have been changed by a command
9961                          * ahead of us in the queue, so we need to re-check
9962                          * for any states that can be caused by SCSI
9963                          * commands.
9964                          */
9965                         if (ctl_scsiio_lun_check(softc, lun, entry,
9966                                                  &cur_blocked->scsiio) == 0) {
9967                                 cur_blocked->io_hdr.flags |=
9968                                                       CTL_FLAG_IS_WAS_ON_RTR;
9969                                 STAILQ_INSERT_TAIL(&lun->ctl_softc->rtr_queue,
9970                                                    &cur_blocked->io_hdr, links);
9971                                 /*
9972                                  * In the non CTL_DONE_THREAD case, we need
9973                                  * to wake up the work thread here.  When
9974                                  * we're processing completed requests from
9975                                  * the work thread context, we'll pop back
9976                                  * around and end up pulling things off the
9977                                  * RtR queue.  When we aren't processing
9978                                  * things from the work thread context,
9979                                  * though, we won't ever check the RtR queue.
9980                                  * So we need to wake up the thread to clear
9981                                  * things off the queue.  Otherwise this
9982                                  * transaction will just sit on the RtR queue
9983                                  * until a new I/O comes in.  (Which may or
9984                                  * may not happen...)
9985                                  */
9986 #ifndef CTL_DONE_THREAD
9987                                 ctl_wakeup_thread();
9988 #endif
9989                         } else
9990                                 ctl_done_lock(cur_blocked, /*have_lock*/ 1);
9991                         break;
9992                 }
9993                 default:
9994                         /*
9995                          * This probably shouldn't happen -- we shouldn't
9996                          * get CTL_ACTION_ERROR, or anything else.
9997                          */
9998                         break;
9999                 }
10000         }
10001
10002         return (CTL_RETVAL_COMPLETE);
10003 }
10004
10005 /*
10006  * This routine (with one exception) checks LUN flags that can be set by
10007  * commands ahead of us in the OOA queue.  These flags have to be checked
10008  * when a command initially comes in, and when we pull a command off the
10009  * blocked queue and are preparing to execute it.  The reason we have to
10010  * check these flags for commands on the blocked queue is that the LUN
10011  * state may have been changed by a command ahead of us while we're on the
10012  * blocked queue.
10013  *
10014  * Ordering is somewhat important with these checks, so please pay
10015  * careful attention to the placement of any new checks.
10016  */
10017 static int
10018 ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
10019                      struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio)
10020 {
10021         int retval;
10022
10023         retval = 0;
10024
10025         /*
10026          * If this shelf is a secondary shelf controller, we have to reject
10027          * any media access commands.
10028          */
10029 #if 0
10030         /* No longer needed for HA */
10031         if (((ctl_softc->flags & CTL_FLAG_MASTER_SHELF) == 0)
10032          && ((entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0)) {
10033                 ctl_set_lun_standby(ctsio);
10034                 retval = 1;
10035                 goto bailout;
10036         }
10037 #endif
10038
10039         /*
10040          * Check for a reservation conflict.  If this command isn't allowed
10041          * even on reserved LUNs, and if this initiator isn't the one who
10042          * reserved us, reject the command with a reservation conflict.
10043          */
10044         if ((lun->flags & CTL_LUN_RESERVED)
10045          && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) {
10046                 if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id)
10047                  || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port)
10048                  || (ctsio->io_hdr.nexus.targ_target.id !=
10049                      lun->rsv_nexus.targ_target.id)) {
10050                         ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
10051                         ctsio->io_hdr.status = CTL_SCSI_ERROR;
10052                         retval = 1;
10053                         goto bailout;
10054                 }
10055         }
10056
10057         if ( (lun->flags & CTL_LUN_PR_RESERVED)
10058          && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV) == 0)) {
10059                 uint32_t residx;
10060
10061                 residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
10062                 /*
10063                  * if we aren't registered or it's a res holder type
10064                  * reservation and this isn't the res holder then set a
10065                  * conflict.
10066                  * NOTE: Commands which might be allowed on write exclusive
10067                  * type reservations are checked in the particular command
10068                  * for a conflict. Read and SSU are the only ones.
10069                  */
10070                 if (!lun->per_res[residx].registered
10071                  || (residx != lun->pr_res_idx && lun->res_type < 4)) {
10072                         ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
10073                         ctsio->io_hdr.status = CTL_SCSI_ERROR;
10074                         retval = 1;
10075                         goto bailout;
10076                 }
10077
10078         }
10079
10080         if ((lun->flags & CTL_LUN_OFFLINE)
10081          && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) {
10082                 ctl_set_lun_not_ready(ctsio);
10083                 retval = 1;
10084                 goto bailout;
10085         }
10086
10087         /*
10088          * If the LUN is stopped, see if this particular command is allowed
10089          * for a stopped lun.  Otherwise, reject it with 0x04,0x02.
10090          */
10091         if ((lun->flags & CTL_LUN_STOPPED)
10092          && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) {
10093                 /* "Logical unit not ready, initializing cmd. required" */
10094                 ctl_set_lun_stopped(ctsio);
10095                 retval = 1;
10096                 goto bailout;
10097         }
10098
10099         if ((lun->flags & CTL_LUN_INOPERABLE)
10100          && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) {
10101                 /* "Medium format corrupted" */
10102                 ctl_set_medium_format_corrupted(ctsio);
10103                 retval = 1;
10104                 goto bailout;
10105         }
10106
10107 bailout:
10108         return (retval);
10109
10110 }
10111
10112 static void
10113 ctl_failover_io(union ctl_io *io, int have_lock)
10114 {
10115         ctl_set_busy(&io->scsiio);
10116         ctl_done_lock(io, have_lock);
10117 }
10118
10119 static void
10120 ctl_failover(void)
10121 {
10122         struct ctl_lun *lun;
10123         struct ctl_softc *ctl_softc;
10124         union ctl_io *next_io, *pending_io;
10125         union ctl_io *io;
10126         int lun_idx;
10127         int i;
10128
10129         ctl_softc = control_softc;
10130
10131         mtx_lock(&ctl_softc->ctl_lock);
10132         /*
10133          * Remove any cmds from the other SC from the rtr queue.  These
10134          * will obviously only be for LUNs for which we're the primary.
10135          * We can't send status or get/send data for these commands.
10136          * Since they haven't been executed yet, we can just remove them.
10137          * We'll either abort them or delete them below, depending on
10138          * which HA mode we're in.
10139          */
10140         for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->rtr_queue);
10141              io != NULL; io = next_io) {
10142                 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
10143                 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
10144                         STAILQ_REMOVE(&ctl_softc->rtr_queue, &io->io_hdr,
10145                                       ctl_io_hdr, links);
10146         }
10147
10148         for (lun_idx=0; lun_idx < ctl_softc->num_luns; lun_idx++) {
10149                 lun = ctl_softc->ctl_luns[lun_idx];
10150                 if (lun==NULL)
10151                         continue;
10152
10153                 /*
10154                  * Processor LUNs are primary on both sides.
10155                  * XXX will this always be true?
10156                  */
10157                 if (lun->be_lun->lun_type == T_PROCESSOR)
10158                         continue;
10159
10160                 if ((lun->flags & CTL_LUN_PRIMARY_SC)
10161                  && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) {
10162                         printf("FAILOVER: primary lun %d\n", lun_idx);
10163                         /*
10164                          * Remove all commands from the other SC. First from the
10165                          * blocked queue then from the ooa queue. Once we have
10166                          * removed them. Call ctl_check_blocked to see if there
10167                          * is anything that can run.
10168                          */
10169                         for (io = (union ctl_io *)TAILQ_FIRST(
10170                              &lun->blocked_queue); io != NULL; io = next_io) {
10171
10172                                 next_io = (union ctl_io *)TAILQ_NEXT(
10173                                     &io->io_hdr, blocked_links);
10174
10175                                 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) {
10176                                         TAILQ_REMOVE(&lun->blocked_queue,
10177                                                      &io->io_hdr,blocked_links);
10178                                         io->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
10179                                         TAILQ_REMOVE(&lun->ooa_queue,
10180                                                      &io->io_hdr, ooa_links);
10181
10182                                         ctl_free_io(io);
10183                                 }
10184                         }
10185
10186                         for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
10187                              io != NULL; io = next_io) {
10188
10189                                 next_io = (union ctl_io *)TAILQ_NEXT(
10190                                     &io->io_hdr, ooa_links);
10191
10192                                 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) {
10193
10194                                         TAILQ_REMOVE(&lun->ooa_queue,
10195                                                 &io->io_hdr,
10196                                                 ooa_links);
10197
10198                                         ctl_free_io(io);
10199                                 }
10200                         }
10201                         ctl_check_blocked(lun);
10202                 } else if ((lun->flags & CTL_LUN_PRIMARY_SC)
10203                         && (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) {
10204
10205                         printf("FAILOVER: primary lun %d\n", lun_idx);
10206                         /*
10207                          * Abort all commands from the other SC.  We can't
10208                          * send status back for them now.  These should get
10209                          * cleaned up when they are completed or come out
10210                          * for a datamove operation.
10211                          */
10212                         for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
10213                              io != NULL; io = next_io) {
10214                                 next_io = (union ctl_io *)TAILQ_NEXT(
10215                                         &io->io_hdr, ooa_links);
10216
10217                                 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
10218                                         io->io_hdr.flags |= CTL_FLAG_ABORT;
10219                         }
10220                 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0)
10221                         && (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) {
10222
10223                         printf("FAILOVER: secondary lun %d\n", lun_idx);
10224
10225                         lun->flags |= CTL_LUN_PRIMARY_SC;
10226
10227                         /*
10228                          * We send all I/O that was sent to this controller
10229                          * and redirected to the other side back with
10230                          * busy status, and have the initiator retry it.
10231                          * Figuring out how much data has been transferred,
10232                          * etc. and picking up where we left off would be 
10233                          * very tricky.
10234                          *
10235                          * XXX KDM need to remove I/O from the blocked
10236                          * queue as well!
10237                          */
10238                         for (pending_io = (union ctl_io *)TAILQ_FIRST(
10239                              &lun->ooa_queue); pending_io != NULL;
10240                              pending_io = next_io) {
10241
10242                                 next_io =  (union ctl_io *)TAILQ_NEXT(
10243                                         &pending_io->io_hdr, ooa_links);
10244
10245                                 pending_io->io_hdr.flags &=
10246                                         ~CTL_FLAG_SENT_2OTHER_SC;
10247
10248                                 if (pending_io->io_hdr.flags &
10249                                     CTL_FLAG_IO_ACTIVE) {
10250                                         pending_io->io_hdr.flags |=
10251                                                 CTL_FLAG_FAILOVER;
10252                                 } else {
10253                                         ctl_set_busy(&pending_io->scsiio);
10254                                         ctl_done_lock(pending_io,
10255                                                       /*have_lock*/1);
10256                                 }
10257                         }
10258
10259                         /*
10260                          * Build Unit Attention
10261                          */
10262                         for (i = 0; i < CTL_MAX_INITIATORS; i++) {
10263                                 lun->pending_sense[i].ua_pending |=
10264                                                      CTL_UA_ASYM_ACC_CHANGE;
10265                         }
10266                 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0)
10267                         && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) {
10268                         printf("FAILOVER: secondary lun %d\n", lun_idx);
10269                         /*
10270                          * if the first io on the OOA is not on the RtR queue
10271                          * add it.
10272                          */
10273                         lun->flags |= CTL_LUN_PRIMARY_SC;
10274
10275                         pending_io = (union ctl_io *)TAILQ_FIRST(
10276                             &lun->ooa_queue);
10277                         if (pending_io==NULL) {
10278                                 printf("Nothing on OOA queue\n");
10279                                 continue;
10280                         }
10281
10282                         pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
10283                         if ((pending_io->io_hdr.flags &
10284                              CTL_FLAG_IS_WAS_ON_RTR) == 0) {
10285                                 pending_io->io_hdr.flags |=
10286                                     CTL_FLAG_IS_WAS_ON_RTR;
10287                                 STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue,
10288                                                    &pending_io->io_hdr, links);
10289                         }
10290 #if 0
10291                         else
10292                         {
10293                                 printf("Tag 0x%04x is running\n",
10294                                       pending_io->scsiio.tag_num);
10295                         }
10296 #endif
10297
10298                         next_io = (union ctl_io *)TAILQ_NEXT(
10299                             &pending_io->io_hdr, ooa_links);
10300                         for (pending_io=next_io; pending_io != NULL;
10301                              pending_io = next_io) {
10302                                 pending_io->io_hdr.flags &=
10303                                     ~CTL_FLAG_SENT_2OTHER_SC;
10304                                 next_io = (union ctl_io *)TAILQ_NEXT(
10305                                         &pending_io->io_hdr, ooa_links);
10306                                 if (pending_io->io_hdr.flags &
10307                                     CTL_FLAG_IS_WAS_ON_RTR) {
10308 #if 0
10309                                         printf("Tag 0x%04x is running\n",
10310                                                 pending_io->scsiio.tag_num);
10311 #endif
10312                                         continue;
10313                                 }
10314
10315                                 switch (ctl_check_ooa(lun, pending_io,
10316                                     (union ctl_io *)TAILQ_PREV(
10317                                     &pending_io->io_hdr, ctl_ooaq,
10318                                     ooa_links))) {
10319
10320                                 case CTL_ACTION_BLOCK:
10321                                         TAILQ_INSERT_TAIL(&lun->blocked_queue,
10322                                                           &pending_io->io_hdr,
10323                                                           blocked_links);
10324                                         pending_io->io_hdr.flags |=
10325                                             CTL_FLAG_BLOCKED;
10326                                         break;
10327                                 case CTL_ACTION_PASS:
10328                                 case CTL_ACTION_SKIP:
10329                                         pending_io->io_hdr.flags |=
10330                                             CTL_FLAG_IS_WAS_ON_RTR;
10331                                         STAILQ_INSERT_TAIL(
10332                                             &ctl_softc->rtr_queue,
10333                                             &pending_io->io_hdr, links);
10334                                         break;
10335                                 case CTL_ACTION_OVERLAP:
10336                                         ctl_set_overlapped_cmd(
10337                                             (struct ctl_scsiio *)pending_io);
10338                                         ctl_done_lock(pending_io,
10339                                                       /*have_lock*/ 1);
10340                                         break;
10341                                 case CTL_ACTION_OVERLAP_TAG:
10342                                         ctl_set_overlapped_tag(
10343                                             (struct ctl_scsiio *)pending_io,
10344                                             pending_io->scsiio.tag_num & 0xff);
10345                                         ctl_done_lock(pending_io,
10346                                                       /*have_lock*/ 1);
10347                                         break;
10348                                 case CTL_ACTION_ERROR:
10349                                 default:
10350                                         ctl_set_internal_failure(
10351                                                 (struct ctl_scsiio *)pending_io,
10352                                                 0,  // sks_valid
10353                                                 0); //retry count
10354                                         ctl_done_lock(pending_io,
10355                                                       /*have_lock*/ 1);
10356                                         break;
10357                                 }
10358                         }
10359
10360                         /*
10361                          * Build Unit Attention
10362                          */
10363                         for (i = 0; i < CTL_MAX_INITIATORS; i++) {
10364                                 lun->pending_sense[i].ua_pending |=
10365                                                      CTL_UA_ASYM_ACC_CHANGE;
10366                         }
10367                 } else {
10368                         panic("Unhandled HA mode failover, LUN flags = %#x, "
10369                               "ha_mode = #%x", lun->flags, ctl_softc->ha_mode);
10370                 }
10371         }
10372         ctl_pause_rtr = 0;
10373         mtx_unlock(&ctl_softc->ctl_lock);
10374 }
10375
10376 static int
10377 ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio)
10378 {
10379         struct ctl_lun *lun;
10380         struct ctl_cmd_entry *entry;
10381         uint8_t opcode;
10382         uint32_t initidx, targ_lun;
10383         int retval;
10384
10385         retval = 0;
10386
10387         lun = NULL;
10388
10389         opcode = ctsio->cdb[0];
10390
10391         mtx_lock(&ctl_softc->ctl_lock);
10392
10393         targ_lun = ctsio->io_hdr.nexus.targ_lun;
10394         if (ctsio->io_hdr.nexus.lun_map_fn != NULL)
10395                 targ_lun = ctsio->io_hdr.nexus.lun_map_fn(ctsio->io_hdr.nexus.lun_map_arg, targ_lun);
10396         if ((targ_lun < CTL_MAX_LUNS)
10397          && (ctl_softc->ctl_luns[targ_lun] != NULL)) {
10398                 lun = ctl_softc->ctl_luns[targ_lun];
10399                 /*
10400                  * If the LUN is invalid, pretend that it doesn't exist.
10401                  * It will go away as soon as all pending I/O has been
10402                  * completed.
10403                  */
10404                 if (lun->flags & CTL_LUN_DISABLED) {
10405                         lun = NULL;
10406                 } else {
10407                         ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun;
10408                         ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr =
10409                                 lun->be_lun;
10410                         if (lun->be_lun->lun_type == T_PROCESSOR) {
10411                                 ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV;
10412                         }
10413                 }
10414         } else {
10415                 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL;
10416                 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL;
10417         }
10418
10419         entry = &ctl_cmd_table[opcode];
10420
10421         ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
10422         ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK;
10423
10424         /*
10425          * Check to see whether we can send this command to LUNs that don't
10426          * exist.  This should pretty much only be the case for inquiry
10427          * and request sense.  Further checks, below, really require having
10428          * a LUN, so we can't really check the command anymore.  Just put
10429          * it on the rtr queue.
10430          */
10431         if (lun == NULL) {
10432                 if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS)
10433                         goto queue_rtr;
10434
10435                 ctl_set_unsupported_lun(ctsio);
10436                 mtx_unlock(&ctl_softc->ctl_lock);
10437                 ctl_done((union ctl_io *)ctsio);
10438                 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n"));
10439                 goto bailout;
10440         } else {
10441                 /*
10442                  * Every I/O goes into the OOA queue for a particular LUN, and
10443                  * stays there until completion.
10444                  */
10445                 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
10446
10447                 /*
10448                  * Make sure we support this particular command on this LUN.
10449                  * e.g., we don't support writes to the control LUN.
10450                  */
10451                 switch (lun->be_lun->lun_type) {
10452                 case T_PROCESSOR:
10453                         if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0)
10454                          && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS)
10455                               == 0)) {
10456                                 ctl_set_invalid_opcode(ctsio);
10457                                 mtx_unlock(&ctl_softc->ctl_lock);
10458                                 ctl_done((union ctl_io *)ctsio);
10459                                 goto bailout;
10460                         }
10461                         break;
10462                 case T_DIRECT:
10463                         if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0)
10464                          && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS)
10465                               == 0)){
10466                                 ctl_set_invalid_opcode(ctsio);
10467                                 mtx_unlock(&ctl_softc->ctl_lock);
10468                                 ctl_done((union ctl_io *)ctsio);
10469                                 goto bailout;
10470                         }
10471                         break;
10472                 default:
10473                         printf("Unsupported CTL LUN type %d\n",
10474                                lun->be_lun->lun_type);
10475                         panic("Unsupported CTL LUN type %d\n",
10476                               lun->be_lun->lun_type);
10477                         break; /* NOTREACHED */
10478                 }
10479         }
10480
10481         initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
10482
10483         /*
10484          * If we've got a request sense, it'll clear the contingent
10485          * allegiance condition.  Otherwise, if we have a CA condition for
10486          * this initiator, clear it, because it sent down a command other
10487          * than request sense.
10488          */
10489         if ((opcode != REQUEST_SENSE)
10490          && (ctl_is_set(lun->have_ca, initidx)))
10491                 ctl_clear_mask(lun->have_ca, initidx);
10492
10493         /*
10494          * If the command has this flag set, it handles its own unit
10495          * attention reporting, we shouldn't do anything.  Otherwise we
10496          * check for any pending unit attentions, and send them back to the
10497          * initiator.  We only do this when a command initially comes in,
10498          * not when we pull it off the blocked queue.
10499          *
10500          * According to SAM-3, section 5.3.2, the order that things get
10501          * presented back to the host is basically unit attentions caused
10502          * by some sort of reset event, busy status, reservation conflicts
10503          * or task set full, and finally any other status.
10504          *
10505          * One issue here is that some of the unit attentions we report
10506          * don't fall into the "reset" category (e.g. "reported luns data
10507          * has changed").  So reporting it here, before the reservation
10508          * check, may be technically wrong.  I guess the only thing to do
10509          * would be to check for and report the reset events here, and then
10510          * check for the other unit attention types after we check for a
10511          * reservation conflict.
10512          *
10513          * XXX KDM need to fix this
10514          */
10515         if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) {
10516                 ctl_ua_type ua_type;
10517
10518                 ua_type = lun->pending_sense[initidx].ua_pending;
10519                 if (ua_type != CTL_UA_NONE) {
10520                         scsi_sense_data_type sense_format;
10521
10522                         if (lun != NULL)
10523                                 sense_format = (lun->flags &
10524                                     CTL_LUN_SENSE_DESC) ? SSD_TYPE_DESC :
10525                                     SSD_TYPE_FIXED;
10526                         else
10527                                 sense_format = SSD_TYPE_FIXED;
10528
10529                         ua_type = ctl_build_ua(ua_type, &ctsio->sense_data,
10530                                                sense_format);
10531                         if (ua_type != CTL_UA_NONE) {
10532                                 ctsio->scsi_status = SCSI_STATUS_CHECK_COND;
10533                                 ctsio->io_hdr.status = CTL_SCSI_ERROR |
10534                                                        CTL_AUTOSENSE;
10535                                 ctsio->sense_len = SSD_FULL_SIZE;
10536                                 lun->pending_sense[initidx].ua_pending &=
10537                                         ~ua_type;
10538                                 mtx_unlock(&ctl_softc->ctl_lock);
10539                                 ctl_done((union ctl_io *)ctsio);
10540                                 goto bailout;
10541                         }
10542                 }
10543         }
10544
10545
10546         if (ctl_scsiio_lun_check(ctl_softc, lun, entry, ctsio) != 0) {
10547                 mtx_unlock(&ctl_softc->ctl_lock);
10548                 ctl_done((union ctl_io *)ctsio);
10549                 goto bailout;
10550         }
10551
10552         /*
10553          * XXX CHD this is where we want to send IO to other side if
10554          * this LUN is secondary on this SC. We will need to make a copy
10555          * of the IO and flag the IO on this side as SENT_2OTHER and the flag
10556          * the copy we send as FROM_OTHER.
10557          * We also need to stuff the address of the original IO so we can
10558          * find it easily. Something similar will need be done on the other
10559          * side so when we are done we can find the copy.
10560          */
10561         if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) {
10562                 union ctl_ha_msg msg_info;
10563                 int isc_retval;
10564
10565                 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
10566
10567                 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE;
10568                 msg_info.hdr.original_sc = (union ctl_io *)ctsio;
10569 #if 0
10570                 printf("1. ctsio %p\n", ctsio);
10571 #endif
10572                 msg_info.hdr.serializing_sc = NULL;
10573                 msg_info.hdr.nexus = ctsio->io_hdr.nexus;
10574                 msg_info.scsi.tag_num = ctsio->tag_num;
10575                 msg_info.scsi.tag_type = ctsio->tag_type;
10576                 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN);
10577
10578                 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
10579
10580                 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
10581                     (void *)&msg_info, sizeof(msg_info), 0)) >
10582                     CTL_HA_STATUS_SUCCESS) {
10583                         printf("CTL:precheck, ctl_ha_msg_send returned %d\n",
10584                                isc_retval);
10585                         printf("CTL:opcode is %x\n",opcode);
10586                 } else {
10587 #if 0
10588                         printf("CTL:Precheck sent msg, opcode is %x\n",opcode);
10589 #endif
10590                 }
10591
10592                 /*
10593                  * XXX KDM this I/O is off the incoming queue, but hasn't
10594                  * been inserted on any other queue.  We may need to come
10595                  * up with a holding queue while we wait for serialization
10596                  * so that we have an idea of what we're waiting for from
10597                  * the other side.
10598                  */
10599                 goto bailout_unlock;
10600         }
10601
10602         switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
10603                               (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr,
10604                               ctl_ooaq, ooa_links))) {
10605         case CTL_ACTION_BLOCK:
10606                 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
10607                 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
10608                                   blocked_links);
10609                 goto bailout_unlock;
10610                 break; /* NOTREACHED */
10611         case CTL_ACTION_PASS:
10612         case CTL_ACTION_SKIP:
10613                 goto queue_rtr;
10614                 break; /* NOTREACHED */
10615         case CTL_ACTION_OVERLAP:
10616                 ctl_set_overlapped_cmd(ctsio);
10617                 mtx_unlock(&ctl_softc->ctl_lock);
10618                 ctl_done((union ctl_io *)ctsio);
10619                 goto bailout;
10620                 break; /* NOTREACHED */
10621         case CTL_ACTION_OVERLAP_TAG:
10622                 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff);
10623                 mtx_unlock(&ctl_softc->ctl_lock);
10624                 ctl_done((union ctl_io *)ctsio);
10625                 goto bailout;
10626                 break; /* NOTREACHED */
10627         case CTL_ACTION_ERROR:
10628         default:
10629                 ctl_set_internal_failure(ctsio,
10630                                          /*sks_valid*/ 0,
10631                                          /*retry_count*/ 0);
10632                 mtx_unlock(&ctl_softc->ctl_lock);
10633                 ctl_done((union ctl_io *)ctsio);
10634                 goto bailout;
10635                 break; /* NOTREACHED */
10636         }
10637
10638         goto bailout_unlock;
10639
10640 queue_rtr:
10641         ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
10642         STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue, &ctsio->io_hdr, links);
10643
10644 bailout_unlock:
10645         mtx_unlock(&ctl_softc->ctl_lock);
10646
10647 bailout:
10648         return (retval);
10649 }
10650
10651 static int
10652 ctl_scsiio(struct ctl_scsiio *ctsio)
10653 {
10654         int retval;
10655         struct ctl_cmd_entry *entry;
10656
10657         retval = CTL_RETVAL_COMPLETE;
10658
10659         CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0]));
10660
10661         entry = &ctl_cmd_table[ctsio->cdb[0]];
10662
10663         /*
10664          * If this I/O has been aborted, just send it straight to
10665          * ctl_done() without executing it.
10666          */
10667         if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) {
10668                 ctl_done((union ctl_io *)ctsio);
10669                 goto bailout;
10670         }
10671
10672         /*
10673          * All the checks should have been handled by ctl_scsiio_precheck().
10674          * We should be clear now to just execute the I/O.
10675          */
10676         retval = entry->execute(ctsio);
10677
10678 bailout:
10679         return (retval);
10680 }
10681
10682 /*
10683  * Since we only implement one target right now, a bus reset simply resets
10684  * our single target.
10685  */
10686 static int
10687 ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io)
10688 {
10689         return(ctl_target_reset(ctl_softc, io, CTL_UA_BUS_RESET));
10690 }
10691
10692 static int
10693 ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
10694                  ctl_ua_type ua_type)
10695 {
10696         struct ctl_lun *lun;
10697         int retval;
10698
10699         if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
10700                 union ctl_ha_msg msg_info;
10701
10702                 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
10703                 msg_info.hdr.nexus = io->io_hdr.nexus;
10704                 if (ua_type==CTL_UA_TARG_RESET)
10705                         msg_info.task.task_action = CTL_TASK_TARGET_RESET;
10706                 else
10707                         msg_info.task.task_action = CTL_TASK_BUS_RESET;
10708                 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
10709                 msg_info.hdr.original_sc = NULL;
10710                 msg_info.hdr.serializing_sc = NULL;
10711                 if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL,
10712                     (void *)&msg_info, sizeof(msg_info), 0)) {
10713                 }
10714         }
10715         retval = 0;
10716
10717         STAILQ_FOREACH(lun, &ctl_softc->lun_list, links)
10718                 retval += ctl_lun_reset(lun, io, ua_type);
10719
10720         return (retval);
10721 }
10722
10723 /*
10724  * The LUN should always be set.  The I/O is optional, and is used to
10725  * distinguish between I/Os sent by this initiator, and by other
10726  * initiators.  We set unit attention for initiators other than this one.
10727  * SAM-3 is vague on this point.  It does say that a unit attention should
10728  * be established for other initiators when a LUN is reset (see section
10729  * 5.7.3), but it doesn't specifically say that the unit attention should
10730  * be established for this particular initiator when a LUN is reset.  Here
10731  * is the relevant text, from SAM-3 rev 8:
10732  *
10733  * 5.7.2 When a SCSI initiator port aborts its own tasks
10734  *
10735  * When a SCSI initiator port causes its own task(s) to be aborted, no
10736  * notification that the task(s) have been aborted shall be returned to
10737  * the SCSI initiator port other than the completion response for the
10738  * command or task management function action that caused the task(s) to
10739  * be aborted and notification(s) associated with related effects of the
10740  * action (e.g., a reset unit attention condition).
10741  *
10742  * XXX KDM for now, we're setting unit attention for all initiators.
10743  */
10744 static int
10745 ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
10746 {
10747         union ctl_io *xio;
10748 #if 0
10749         uint32_t initindex;
10750 #endif
10751         int i;
10752
10753         /*
10754          * Run through the OOA queue and abort each I/O.
10755          */
10756 #if 0
10757         TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) {
10758 #endif
10759         for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
10760              xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
10761                 xio->io_hdr.flags |= CTL_FLAG_ABORT;
10762         }
10763
10764         /*
10765          * This version sets unit attention for every
10766          */
10767 #if 0
10768         initindex = ctl_get_initindex(&io->io_hdr.nexus);
10769         for (i = 0; i < CTL_MAX_INITIATORS; i++) {
10770                 if (initindex == i)
10771                         continue;
10772                 lun->pending_sense[i].ua_pending |= ua_type;
10773         }
10774 #endif
10775
10776         /*
10777          * A reset (any kind, really) clears reservations established with
10778          * RESERVE/RELEASE.  It does not clear reservations established
10779          * with PERSISTENT RESERVE OUT, but we don't support that at the
10780          * moment anyway.  See SPC-2, section 5.6.  SPC-3 doesn't address
10781          * reservations made with the RESERVE/RELEASE commands, because
10782          * those commands are obsolete in SPC-3.
10783          */
10784         lun->flags &= ~CTL_LUN_RESERVED;
10785
10786         for (i = 0; i < CTL_MAX_INITIATORS; i++) {
10787                 ctl_clear_mask(lun->have_ca, i);
10788                 lun->pending_sense[i].ua_pending |= ua_type;
10789         }
10790
10791         return (0);
10792 }
10793
10794 static int
10795 ctl_abort_task(union ctl_io *io)
10796 {
10797         union ctl_io *xio;
10798         struct ctl_lun *lun;
10799         struct ctl_softc *ctl_softc;
10800 #if 0
10801         struct sbuf sb;
10802         char printbuf[128];
10803 #endif
10804         int found;
10805         uint32_t targ_lun;
10806
10807         ctl_softc = control_softc;
10808         found = 0;
10809
10810         /*
10811          * Look up the LUN.
10812          */
10813         targ_lun = io->io_hdr.nexus.targ_lun;
10814         if (io->io_hdr.nexus.lun_map_fn != NULL)
10815                 targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
10816         if ((targ_lun < CTL_MAX_LUNS)
10817          && (ctl_softc->ctl_luns[targ_lun] != NULL))
10818                 lun = ctl_softc->ctl_luns[targ_lun];
10819         else
10820                 goto bailout;
10821
10822 #if 0
10823         printf("ctl_abort_task: called for lun %lld, tag %d type %d\n",
10824                lun->lun, io->taskio.tag_num, io->taskio.tag_type);
10825 #endif
10826
10827         /*
10828          * Run through the OOA queue and attempt to find the given I/O.
10829          * The target port, initiator ID, tag type and tag number have to
10830          * match the values that we got from the initiator.  If we have an
10831          * untagged command to abort, simply abort the first untagged command
10832          * we come to.  We only allow one untagged command at a time of course.
10833          */
10834 #if 0
10835         TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) {
10836 #endif
10837         for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
10838              xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
10839 #if 0
10840                 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN);
10841
10842                 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ",
10843                             lun->lun, xio->scsiio.tag_num,
10844                             xio->scsiio.tag_type,
10845                             (xio->io_hdr.blocked_links.tqe_prev
10846                             == NULL) ? "" : " BLOCKED",
10847                             (xio->io_hdr.flags &
10848                             CTL_FLAG_DMA_INPROG) ? " DMA" : "",
10849                             (xio->io_hdr.flags &
10850                             CTL_FLAG_ABORT) ? " ABORT" : "",
10851                             (xio->io_hdr.flags &
10852                             CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : ""));
10853                 ctl_scsi_command_string(&xio->scsiio, NULL, &sb);
10854                 sbuf_finish(&sb);
10855                 printf("%s\n", sbuf_data(&sb));
10856 #endif
10857
10858                 if ((xio->io_hdr.nexus.targ_port == io->io_hdr.nexus.targ_port)
10859                  && (xio->io_hdr.nexus.initid.id ==
10860                      io->io_hdr.nexus.initid.id)) {
10861                         /*
10862                          * If the abort says that the task is untagged, the
10863                          * task in the queue must be untagged.  Otherwise,
10864                          * we just check to see whether the tag numbers
10865                          * match.  This is because the QLogic firmware
10866                          * doesn't pass back the tag type in an abort
10867                          * request.
10868                          */
10869 #if 0
10870                         if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED)
10871                           && (io->taskio.tag_type == CTL_TAG_UNTAGGED))
10872                          || (xio->scsiio.tag_num == io->taskio.tag_num)) {
10873 #endif
10874                         /*
10875                          * XXX KDM we've got problems with FC, because it
10876                          * doesn't send down a tag type with aborts.  So we
10877                          * can only really go by the tag number...
10878                          * This may cause problems with parallel SCSI.
10879                          * Need to figure that out!!
10880                          */
10881                         if (xio->scsiio.tag_num == io->taskio.tag_num) {
10882                                 xio->io_hdr.flags |= CTL_FLAG_ABORT;
10883                                 found = 1;
10884                                 if ((io->io_hdr.flags &
10885                                      CTL_FLAG_FROM_OTHER_SC) == 0 &&
10886                                     !(lun->flags & CTL_LUN_PRIMARY_SC)) {
10887                                         union ctl_ha_msg msg_info;
10888
10889                                         io->io_hdr.flags |=
10890                                                         CTL_FLAG_SENT_2OTHER_SC;
10891                                         msg_info.hdr.nexus = io->io_hdr.nexus;
10892                                         msg_info.task.task_action =
10893                                                 CTL_TASK_ABORT_TASK;
10894                                         msg_info.task.tag_num =
10895                                                 io->taskio.tag_num;
10896                                         msg_info.task.tag_type =
10897                                                 io->taskio.tag_type;
10898                                         msg_info.hdr.msg_type =
10899                                                 CTL_MSG_MANAGE_TASKS;
10900                                         msg_info.hdr.original_sc = NULL;
10901                                         msg_info.hdr.serializing_sc = NULL;
10902 #if 0
10903                                         printf("Sent Abort to other side\n");
10904 #endif
10905                                         if (CTL_HA_STATUS_SUCCESS !=
10906                                                 ctl_ha_msg_send(CTL_HA_CHAN_CTL,
10907                                                 (void *)&msg_info,
10908                                                 sizeof(msg_info), 0)) {
10909                                         }
10910                                 }
10911 #if 0
10912                                 printf("ctl_abort_task: found I/O to abort\n");
10913 #endif
10914                                 break;
10915                         }
10916                 }
10917         }
10918
10919 bailout:
10920
10921         if (found == 0) {
10922                 /*
10923                  * This isn't really an error.  It's entirely possible for
10924                  * the abort and command completion to cross on the wire.
10925                  * This is more of an informative/diagnostic error.
10926                  */
10927 #if 0
10928                 printf("ctl_abort_task: ABORT sent for nonexistent I/O: "
10929                        "%d:%d:%d:%d tag %d type %d\n",
10930                        io->io_hdr.nexus.initid.id,
10931                        io->io_hdr.nexus.targ_port,
10932                        io->io_hdr.nexus.targ_target.id,
10933                        io->io_hdr.nexus.targ_lun, io->taskio.tag_num,
10934                        io->taskio.tag_type);
10935 #endif
10936                 return (1);
10937         } else
10938                 return (0);
10939 }
10940
10941 /*
10942  * This routine cannot block!  It must be callable from an interrupt
10943  * handler as well as from the work thread.
10944  */
10945 static void
10946 ctl_run_task_queue(struct ctl_softc *ctl_softc)
10947 {
10948         union ctl_io *io, *next_io;
10949
10950         mtx_assert(&ctl_softc->ctl_lock, MA_OWNED);
10951
10952         CTL_DEBUG_PRINT(("ctl_run_task_queue\n"));
10953
10954         for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->task_queue);
10955              io != NULL; io = next_io) {
10956                 int retval;
10957                 const char *task_desc;
10958
10959                 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
10960
10961                 retval = 0;
10962
10963                 switch (io->io_hdr.io_type) {
10964                 case CTL_IO_TASK: {
10965                         task_desc = ctl_scsi_task_string(&io->taskio);
10966                         if (task_desc != NULL) {
10967 #ifdef NEEDTOPORT
10968                                 csevent_log(CSC_CTL | CSC_SHELF_SW |
10969                                             CTL_TASK_REPORT,
10970                                             csevent_LogType_Trace,
10971                                             csevent_Severity_Information,
10972                                             csevent_AlertLevel_Green,
10973                                             csevent_FRU_Firmware,
10974                                             csevent_FRU_Unknown,
10975                                             "CTL: received task: %s",task_desc);
10976 #endif
10977                         } else {
10978 #ifdef NEEDTOPORT
10979                                 csevent_log(CSC_CTL | CSC_SHELF_SW |
10980                                             CTL_TASK_REPORT,
10981                                             csevent_LogType_Trace,
10982                                             csevent_Severity_Information,
10983                                             csevent_AlertLevel_Green,
10984                                             csevent_FRU_Firmware,
10985                                             csevent_FRU_Unknown,
10986                                             "CTL: received unknown task "
10987                                             "type: %d (%#x)",
10988                                             io->taskio.task_action,
10989                                             io->taskio.task_action);
10990 #endif
10991                         }
10992                         switch (io->taskio.task_action) {
10993                         case CTL_TASK_ABORT_TASK:
10994                                 retval = ctl_abort_task(io);
10995                                 break;
10996                         case CTL_TASK_ABORT_TASK_SET:
10997                                 break;
10998                         case CTL_TASK_CLEAR_ACA:
10999                                 break;
11000                         case CTL_TASK_CLEAR_TASK_SET:
11001                                 break;
11002                         case CTL_TASK_LUN_RESET: {
11003                                 struct ctl_lun *lun;
11004                                 uint32_t targ_lun;
11005                                 int retval;
11006
11007                                 targ_lun = io->io_hdr.nexus.targ_lun;
11008                                 if (io->io_hdr.nexus.lun_map_fn != NULL)
11009                                         targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
11010
11011                                 if ((targ_lun < CTL_MAX_LUNS)
11012                                  && (ctl_softc->ctl_luns[targ_lun] != NULL))
11013                                         lun = ctl_softc->ctl_luns[targ_lun];
11014                                 else {
11015                                         retval = 1;
11016                                         break;
11017                                 }
11018
11019                                 if (!(io->io_hdr.flags &
11020                                     CTL_FLAG_FROM_OTHER_SC)) {
11021                                         union ctl_ha_msg msg_info;
11022
11023                                         io->io_hdr.flags |=
11024                                                 CTL_FLAG_SENT_2OTHER_SC;
11025                                         msg_info.hdr.msg_type =
11026                                                 CTL_MSG_MANAGE_TASKS;
11027                                         msg_info.hdr.nexus = io->io_hdr.nexus;
11028                                         msg_info.task.task_action =
11029                                                 CTL_TASK_LUN_RESET;
11030                                         msg_info.hdr.original_sc = NULL;
11031                                         msg_info.hdr.serializing_sc = NULL;
11032                                         if (CTL_HA_STATUS_SUCCESS !=
11033                                             ctl_ha_msg_send(CTL_HA_CHAN_CTL,
11034                                             (void *)&msg_info,
11035                                             sizeof(msg_info), 0)) {
11036                                         }
11037                                 }
11038
11039                                 retval = ctl_lun_reset(lun, io,
11040                                                        CTL_UA_LUN_RESET);
11041                                 break;
11042                         }
11043                         case CTL_TASK_TARGET_RESET:
11044                                 retval = ctl_target_reset(ctl_softc, io,
11045                                                           CTL_UA_TARG_RESET);
11046                                 break;
11047                         case CTL_TASK_BUS_RESET:
11048                                 retval = ctl_bus_reset(ctl_softc, io);
11049                                 break;
11050                         case CTL_TASK_PORT_LOGIN:
11051                                 break;
11052                         case CTL_TASK_PORT_LOGOUT:
11053                                 break;
11054                         default:
11055                                 printf("ctl_run_task_queue: got unknown task "
11056                                        "management event %d\n",
11057                                        io->taskio.task_action);
11058                                 break;
11059                         }
11060                         if (retval == 0)
11061                                 io->io_hdr.status = CTL_SUCCESS;
11062                         else
11063                                 io->io_hdr.status = CTL_ERROR;
11064
11065                         STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr,
11066                                       ctl_io_hdr, links);
11067                         /*
11068                          * This will queue this I/O to the done queue, but the
11069                          * work thread won't be able to process it until we
11070                          * return and the lock is released.
11071                          */
11072                         ctl_done_lock(io, /*have_lock*/ 1);
11073                         break;
11074                 }
11075                 default: {
11076
11077                         printf("%s: invalid I/O type %d msg %d cdb %x"
11078                                " iptl: %ju:%d:%ju:%d tag 0x%04x\n",
11079                                __func__, io->io_hdr.io_type,
11080                                io->io_hdr.msg_type, io->scsiio.cdb[0],
11081                                (uintmax_t)io->io_hdr.nexus.initid.id,
11082                                io->io_hdr.nexus.targ_port,
11083                                (uintmax_t)io->io_hdr.nexus.targ_target.id,
11084                                io->io_hdr.nexus.targ_lun /* XXX */,
11085                                (io->io_hdr.io_type == CTL_IO_TASK) ?
11086                                io->taskio.tag_num : io->scsiio.tag_num);
11087                         STAILQ_REMOVE(&ctl_softc->task_queue, &io->io_hdr,
11088                                       ctl_io_hdr, links);
11089                         ctl_free_io(io);
11090                         break;
11091                 }
11092                 }
11093         }
11094
11095         ctl_softc->flags &= ~CTL_FLAG_TASK_PENDING;
11096 }
11097
11098 /*
11099  * For HA operation.  Handle commands that come in from the other
11100  * controller.
11101  */
11102 static void
11103 ctl_handle_isc(union ctl_io *io)
11104 {
11105         int free_io;
11106         struct ctl_lun *lun;
11107         struct ctl_softc *ctl_softc;
11108         uint32_t targ_lun;
11109
11110         ctl_softc = control_softc;
11111
11112         targ_lun = io->io_hdr.nexus.targ_lun;
11113         if (io->io_hdr.nexus.lun_map_fn != NULL)
11114                 targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
11115         lun = ctl_softc->ctl_luns[targ_lun];
11116
11117         switch (io->io_hdr.msg_type) {
11118         case CTL_MSG_SERIALIZE:
11119                 free_io = ctl_serialize_other_sc_cmd(&io->scsiio,
11120                                                      /*have_lock*/ 0);
11121                 break;
11122         case CTL_MSG_R2R: {
11123                 uint8_t opcode;
11124                 struct ctl_cmd_entry *entry;
11125
11126                 /*
11127                  * This is only used in SER_ONLY mode.
11128                  */
11129                 free_io = 0;
11130                 opcode = io->scsiio.cdb[0];
11131                 entry = &ctl_cmd_table[opcode];
11132                 mtx_lock(&ctl_softc->ctl_lock);
11133                 if (ctl_scsiio_lun_check(ctl_softc, lun,
11134                     entry, (struct ctl_scsiio *)io) != 0) {
11135                         ctl_done_lock(io, /*have_lock*/ 1);
11136                         mtx_unlock(&ctl_softc->ctl_lock);
11137                         break;
11138                 }
11139                 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
11140                 STAILQ_INSERT_TAIL(&ctl_softc->rtr_queue,
11141                                    &io->io_hdr, links);
11142                 mtx_unlock(&ctl_softc->ctl_lock);
11143                 break;
11144         }
11145         case CTL_MSG_FINISH_IO:
11146                 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) {
11147                         free_io = 0;
11148                         ctl_done_lock(io, /*have_lock*/ 0);
11149                 } else {
11150                         free_io = 1;
11151                         mtx_lock(&ctl_softc->ctl_lock);
11152                         TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr,
11153                                      ooa_links);
11154                         STAILQ_REMOVE(&ctl_softc->task_queue,
11155                                       &io->io_hdr, ctl_io_hdr, links);
11156                         ctl_check_blocked(lun);
11157                         mtx_unlock(&ctl_softc->ctl_lock);
11158                 }
11159                 break;
11160         case CTL_MSG_PERS_ACTION:
11161                 ctl_hndl_per_res_out_on_other_sc(
11162                         (union ctl_ha_msg *)&io->presio.pr_msg);
11163                 free_io = 1;
11164                 break;
11165         case CTL_MSG_BAD_JUJU:
11166                 free_io = 0;
11167                 ctl_done_lock(io, /*have_lock*/ 0);
11168                 break;
11169         case CTL_MSG_DATAMOVE:
11170                 /* Only used in XFER mode */
11171                 free_io = 0;
11172                 ctl_datamove_remote(io);
11173                 break;
11174         case CTL_MSG_DATAMOVE_DONE:
11175                 /* Only used in XFER mode */
11176                 free_io = 0;
11177                 io->scsiio.be_move_done(io);
11178                 break;
11179         default:
11180                 free_io = 1;
11181                 printf("%s: Invalid message type %d\n",
11182                        __func__, io->io_hdr.msg_type);
11183                 break;
11184         }
11185         if (free_io)
11186                 ctl_free_io(io);
11187
11188 }
11189
11190
11191 /*
11192  * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if
11193  * there is no match.
11194  */
11195 static ctl_lun_error_pattern
11196 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc)
11197 {
11198         struct ctl_cmd_entry *entry;
11199         ctl_lun_error_pattern filtered_pattern, pattern;
11200         uint8_t opcode;
11201
11202         pattern = desc->error_pattern;
11203
11204         /*
11205          * XXX KDM we need more data passed into this function to match a
11206          * custom pattern, and we actually need to implement custom pattern
11207          * matching.
11208          */
11209         if (pattern & CTL_LUN_PAT_CMD)
11210                 return (CTL_LUN_PAT_CMD);
11211
11212         if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY)
11213                 return (CTL_LUN_PAT_ANY);
11214
11215         opcode = ctsio->cdb[0];
11216         entry = &ctl_cmd_table[opcode];
11217
11218         filtered_pattern = entry->pattern & pattern;
11219
11220         /*
11221          * If the user requested specific flags in the pattern (e.g.
11222          * CTL_LUN_PAT_RANGE), make sure the command supports all of those
11223          * flags.
11224          *
11225          * If the user did not specify any flags, it doesn't matter whether
11226          * or not the command supports the flags.
11227          */
11228         if ((filtered_pattern & ~CTL_LUN_PAT_MASK) !=
11229              (pattern & ~CTL_LUN_PAT_MASK))
11230                 return (CTL_LUN_PAT_NONE);
11231
11232         /*
11233          * If the user asked for a range check, see if the requested LBA
11234          * range overlaps with this command's LBA range.
11235          */
11236         if (filtered_pattern & CTL_LUN_PAT_RANGE) {
11237                 uint64_t lba1;
11238                 uint32_t len1;
11239                 ctl_action action;
11240                 int retval;
11241
11242                 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1);
11243                 if (retval != 0)
11244                         return (CTL_LUN_PAT_NONE);
11245
11246                 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba,
11247                                               desc->lba_range.len);
11248                 /*
11249                  * A "pass" means that the LBA ranges don't overlap, so
11250                  * this doesn't match the user's range criteria.
11251                  */
11252                 if (action == CTL_ACTION_PASS)
11253                         return (CTL_LUN_PAT_NONE);
11254         }
11255
11256         return (filtered_pattern);
11257 }
11258
11259 static void
11260 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io)
11261 {
11262         struct ctl_error_desc *desc, *desc2;
11263
11264         mtx_assert(&control_softc->ctl_lock, MA_OWNED);
11265
11266         STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
11267                 ctl_lun_error_pattern pattern;
11268                 /*
11269                  * Check to see whether this particular command matches
11270                  * the pattern in the descriptor.
11271                  */
11272                 pattern = ctl_cmd_pattern_match(&io->scsiio, desc);
11273                 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE)
11274                         continue;
11275
11276                 switch (desc->lun_error & CTL_LUN_INJ_TYPE) {
11277                 case CTL_LUN_INJ_ABORTED:
11278                         ctl_set_aborted(&io->scsiio);
11279                         break;
11280                 case CTL_LUN_INJ_MEDIUM_ERR:
11281                         ctl_set_medium_error(&io->scsiio);
11282                         break;
11283                 case CTL_LUN_INJ_UA:
11284                         /* 29h/00h  POWER ON, RESET, OR BUS DEVICE RESET
11285                          * OCCURRED */
11286                         ctl_set_ua(&io->scsiio, 0x29, 0x00);
11287                         break;
11288                 case CTL_LUN_INJ_CUSTOM:
11289                         /*
11290                          * We're assuming the user knows what he is doing.
11291                          * Just copy the sense information without doing
11292                          * checks.
11293                          */
11294                         bcopy(&desc->custom_sense, &io->scsiio.sense_data,
11295                               ctl_min(sizeof(desc->custom_sense),
11296                                       sizeof(io->scsiio.sense_data)));
11297                         io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND;
11298                         io->scsiio.sense_len = SSD_FULL_SIZE;
11299                         io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
11300                         break;
11301                 case CTL_LUN_INJ_NONE:
11302                 default:
11303                         /*
11304                          * If this is an error injection type we don't know
11305                          * about, clear the continuous flag (if it is set)
11306                          * so it will get deleted below.
11307                          */
11308                         desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS;
11309                         break;
11310                 }
11311                 /*
11312                  * By default, each error injection action is a one-shot
11313                  */
11314                 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS)
11315                         continue;
11316
11317                 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links);
11318
11319                 free(desc, M_CTL);
11320         }
11321 }
11322
11323 #ifdef CTL_IO_DELAY
11324 static void
11325 ctl_datamove_timer_wakeup(void *arg)
11326 {
11327         union ctl_io *io;
11328
11329         io = (union ctl_io *)arg;
11330
11331         ctl_datamove(io);
11332 }
11333 #endif /* CTL_IO_DELAY */
11334
11335 void
11336 ctl_datamove(union ctl_io *io)
11337 {
11338         void (*fe_datamove)(union ctl_io *io);
11339
11340         mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED);
11341
11342         CTL_DEBUG_PRINT(("ctl_datamove\n"));
11343
11344 #ifdef CTL_TIME_IO
11345         if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
11346                 char str[256];
11347                 char path_str[64];
11348                 struct sbuf sb;
11349
11350                 ctl_scsi_path_string(io, path_str, sizeof(path_str));
11351                 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
11352
11353                 sbuf_cat(&sb, path_str);
11354                 switch (io->io_hdr.io_type) {
11355                 case CTL_IO_SCSI:
11356                         ctl_scsi_command_string(&io->scsiio, NULL, &sb);
11357                         sbuf_printf(&sb, "\n");
11358                         sbuf_cat(&sb, path_str);
11359                         sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
11360                                     io->scsiio.tag_num, io->scsiio.tag_type);
11361                         break;
11362                 case CTL_IO_TASK:
11363                         sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, "
11364                                     "Tag Type: %d\n", io->taskio.task_action,
11365                                     io->taskio.tag_num, io->taskio.tag_type);
11366                         break;
11367                 default:
11368                         printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
11369                         panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
11370                         break;
11371                 }
11372                 sbuf_cat(&sb, path_str);
11373                 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n",
11374                             (intmax_t)time_uptime - io->io_hdr.start_time);
11375                 sbuf_finish(&sb);
11376                 printf("%s", sbuf_data(&sb));
11377         }
11378 #endif /* CTL_TIME_IO */
11379
11380         mtx_lock(&control_softc->ctl_lock);
11381 #ifdef CTL_IO_DELAY
11382         if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
11383                 struct ctl_lun *lun;
11384
11385                 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
11386
11387                 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
11388         } else {
11389                 struct ctl_lun *lun;
11390
11391                 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
11392                 if ((lun != NULL)
11393                  && (lun->delay_info.datamove_delay > 0)) {
11394                         struct callout *callout;
11395
11396                         callout = (struct callout *)&io->io_hdr.timer_bytes;
11397                         callout_init(callout, /*mpsafe*/ 1);
11398                         io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
11399                         callout_reset(callout,
11400                                       lun->delay_info.datamove_delay * hz,
11401                                       ctl_datamove_timer_wakeup, io);
11402                         if (lun->delay_info.datamove_type ==
11403                             CTL_DELAY_TYPE_ONESHOT)
11404                                 lun->delay_info.datamove_delay = 0;
11405                         mtx_unlock(&control_softc->ctl_lock);
11406                         return;
11407                 }
11408         }
11409 #endif
11410         /*
11411          * If we have any pending task management commands, process them
11412          * first.  This is necessary to eliminate a race condition with the
11413          * FETD:
11414          *
11415          * - FETD submits a task management command, like an abort.
11416          * - Back end calls fe_datamove() to move the data for the aborted
11417          *   command.  The FETD can't really accept it, but if it did, it
11418          *   would end up transmitting data for a command that the initiator
11419          *   told us to abort.
11420          *
11421          * We close the race by processing all pending task management
11422          * commands here (we can't block!), and then check this I/O to see
11423          * if it has been aborted.  If so, return it to the back end with
11424          * bad status, so the back end can say return an error to the back end
11425          * and then when the back end returns an error, we can return the
11426          * aborted command to the FETD, so it can clean up its resources.
11427          */
11428         if (control_softc->flags & CTL_FLAG_TASK_PENDING)
11429                 ctl_run_task_queue(control_softc);
11430
11431         /*
11432          * This command has been aborted.  Set the port status, so we fail
11433          * the data move.
11434          */
11435         if (io->io_hdr.flags & CTL_FLAG_ABORT) {
11436                 printf("ctl_datamove: tag 0x%04x on (%ju:%d:%ju:%d) aborted\n",
11437                        io->scsiio.tag_num,(uintmax_t)io->io_hdr.nexus.initid.id,
11438                        io->io_hdr.nexus.targ_port,
11439                        (uintmax_t)io->io_hdr.nexus.targ_target.id,
11440                        io->io_hdr.nexus.targ_lun);
11441                 io->io_hdr.status = CTL_CMD_ABORTED;
11442                 io->io_hdr.port_status = 31337;
11443                 mtx_unlock(&control_softc->ctl_lock);
11444                 /*
11445                  * Note that the backend, in this case, will get the
11446                  * callback in its context.  In other cases it may get
11447                  * called in the frontend's interrupt thread context.
11448                  */
11449                 io->scsiio.be_move_done(io);
11450                 return;
11451         }
11452
11453         /*
11454          * If we're in XFER mode and this I/O is from the other shelf
11455          * controller, we need to send the DMA to the other side to
11456          * actually transfer the data to/from the host.  In serialize only
11457          * mode the transfer happens below CTL and ctl_datamove() is only
11458          * called on the machine that originally received the I/O.
11459          */
11460         if ((control_softc->ha_mode == CTL_HA_MODE_XFER)
11461          && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
11462                 union ctl_ha_msg msg;
11463                 uint32_t sg_entries_sent;
11464                 int do_sg_copy;
11465                 int i;
11466
11467                 memset(&msg, 0, sizeof(msg));
11468                 msg.hdr.msg_type = CTL_MSG_DATAMOVE;
11469                 msg.hdr.original_sc = io->io_hdr.original_sc;
11470                 msg.hdr.serializing_sc = io;
11471                 msg.hdr.nexus = io->io_hdr.nexus;
11472                 msg.dt.flags = io->io_hdr.flags;
11473                 /*
11474                  * We convert everything into a S/G list here.  We can't
11475                  * pass by reference, only by value between controllers.
11476                  * So we can't pass a pointer to the S/G list, only as many
11477                  * S/G entries as we can fit in here.  If it's possible for
11478                  * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries,
11479                  * then we need to break this up into multiple transfers.
11480                  */
11481                 if (io->scsiio.kern_sg_entries == 0) {
11482                         msg.dt.kern_sg_entries = 1;
11483                         /*
11484                          * If this is in cached memory, flush the cache
11485                          * before we send the DMA request to the other
11486                          * controller.  We want to do this in either the
11487                          * read or the write case.  The read case is
11488                          * straightforward.  In the write case, we want to
11489                          * make sure nothing is in the local cache that
11490                          * could overwrite the DMAed data.
11491                          */
11492                         if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
11493                                 /*
11494                                  * XXX KDM use bus_dmamap_sync() here.
11495                                  */
11496                         }
11497
11498                         /*
11499                          * Convert to a physical address if this is a
11500                          * virtual address.
11501                          */
11502                         if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
11503                                 msg.dt.sg_list[0].addr =
11504                                         io->scsiio.kern_data_ptr;
11505                         } else {
11506                                 /*
11507                                  * XXX KDM use busdma here!
11508                                  */
11509 #if 0
11510                                 msg.dt.sg_list[0].addr = (void *)
11511                                         vtophys(io->scsiio.kern_data_ptr);
11512 #endif
11513                         }
11514
11515                         msg.dt.sg_list[0].len = io->scsiio.kern_data_len;
11516                         do_sg_copy = 0;
11517                 } else {
11518                         struct ctl_sg_entry *sgl;
11519
11520                         do_sg_copy = 1;
11521                         msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries;
11522                         sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
11523                         if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
11524                                 /*
11525                                  * XXX KDM use bus_dmamap_sync() here.
11526                                  */
11527                         }
11528                 }
11529
11530                 msg.dt.kern_data_len = io->scsiio.kern_data_len;
11531                 msg.dt.kern_total_len = io->scsiio.kern_total_len;
11532                 msg.dt.kern_data_resid = io->scsiio.kern_data_resid;
11533                 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset;
11534                 msg.dt.sg_sequence = 0;
11535
11536                 /*
11537                  * Loop until we've sent all of the S/G entries.  On the
11538                  * other end, we'll recompose these S/G entries into one
11539                  * contiguous list before passing it to the
11540                  */
11541                 for (sg_entries_sent = 0; sg_entries_sent <
11542                      msg.dt.kern_sg_entries; msg.dt.sg_sequence++) {
11543                         msg.dt.cur_sg_entries = ctl_min((sizeof(msg.dt.sg_list)/
11544                                 sizeof(msg.dt.sg_list[0])),
11545                                 msg.dt.kern_sg_entries - sg_entries_sent);
11546
11547                         if (do_sg_copy != 0) {
11548                                 struct ctl_sg_entry *sgl;
11549                                 int j;
11550
11551                                 sgl = (struct ctl_sg_entry *)
11552                                         io->scsiio.kern_data_ptr;
11553                                 /*
11554                                  * If this is in cached memory, flush the cache
11555                                  * before we send the DMA request to the other
11556                                  * controller.  We want to do this in either
11557                                  * the * read or the write case.  The read
11558                                  * case is straightforward.  In the write
11559                                  * case, we want to make sure nothing is
11560                                  * in the local cache that could overwrite
11561                                  * the DMAed data.
11562                                  */
11563
11564                                 for (i = sg_entries_sent, j = 0;
11565                                      i < msg.dt.cur_sg_entries; i++, j++) {
11566                                         if ((io->io_hdr.flags &
11567                                              CTL_FLAG_NO_DATASYNC) == 0) {
11568                                                 /*
11569                                                  * XXX KDM use bus_dmamap_sync()
11570                                                  */
11571                                         }
11572                                         if ((io->io_hdr.flags &
11573                                              CTL_FLAG_BUS_ADDR) == 0) {
11574                                                 /*
11575                                                  * XXX KDM use busdma.
11576                                                  */
11577 #if 0
11578                                                 msg.dt.sg_list[j].addr =(void *)
11579                                                        vtophys(sgl[i].addr);
11580 #endif
11581                                         } else {
11582                                                 msg.dt.sg_list[j].addr =
11583                                                         sgl[i].addr;
11584                                         }
11585                                         msg.dt.sg_list[j].len = sgl[i].len;
11586                                 }
11587                         }
11588
11589                         sg_entries_sent += msg.dt.cur_sg_entries;
11590                         if (sg_entries_sent >= msg.dt.kern_sg_entries)
11591                                 msg.dt.sg_last = 1;
11592                         else
11593                                 msg.dt.sg_last = 0;
11594
11595                         /*
11596                          * XXX KDM drop and reacquire the lock here?
11597                          */
11598                         if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
11599                             sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) {
11600                                 /*
11601                                  * XXX do something here.
11602                                  */
11603                         }
11604
11605                         msg.dt.sent_sg_entries = sg_entries_sent;
11606                 }
11607                 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
11608                 if (io->io_hdr.flags & CTL_FLAG_FAILOVER)
11609                         ctl_failover_io(io, /*have_lock*/ 1);
11610
11611         } else {
11612
11613                 /*
11614                  * Lookup the fe_datamove() function for this particular
11615                  * front end.
11616                  */
11617                 fe_datamove =
11618                     control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
11619                 mtx_unlock(&control_softc->ctl_lock);
11620
11621                 fe_datamove(io);
11622         }
11623 }
11624
11625 static void
11626 ctl_send_datamove_done(union ctl_io *io, int have_lock)
11627 {
11628         union ctl_ha_msg msg;
11629         int isc_status;
11630
11631         memset(&msg, 0, sizeof(msg));
11632
11633         msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
11634         msg.hdr.original_sc = io;
11635         msg.hdr.serializing_sc = io->io_hdr.serializing_sc;
11636         msg.hdr.nexus = io->io_hdr.nexus;
11637         msg.hdr.status = io->io_hdr.status;
11638         msg.scsi.tag_num = io->scsiio.tag_num;
11639         msg.scsi.tag_type = io->scsiio.tag_type;
11640         msg.scsi.scsi_status = io->scsiio.scsi_status;
11641         memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
11642                sizeof(io->scsiio.sense_data));
11643         msg.scsi.sense_len = io->scsiio.sense_len;
11644         msg.scsi.sense_residual = io->scsiio.sense_residual;
11645         msg.scsi.fetd_status = io->io_hdr.port_status;
11646         msg.scsi.residual = io->scsiio.residual;
11647         io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
11648
11649         if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
11650                 ctl_failover_io(io, /*have_lock*/ have_lock);
11651                 return;
11652         }
11653
11654         isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0);
11655         if (isc_status > CTL_HA_STATUS_SUCCESS) {
11656                 /* XXX do something if this fails */
11657         }
11658
11659 }
11660
11661 /*
11662  * The DMA to the remote side is done, now we need to tell the other side
11663  * we're done so it can continue with its data movement.
11664  */
11665 static void
11666 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq)
11667 {
11668         union ctl_io *io;
11669
11670         io = rq->context;
11671
11672         if (rq->ret != CTL_HA_STATUS_SUCCESS) {
11673                 printf("%s: ISC DMA write failed with error %d", __func__,
11674                        rq->ret);
11675                 ctl_set_internal_failure(&io->scsiio,
11676                                          /*sks_valid*/ 1,
11677                                          /*retry_count*/ rq->ret);
11678         }
11679
11680         ctl_dt_req_free(rq);
11681
11682         /*
11683          * In this case, we had to malloc the memory locally.  Free it.
11684          */
11685         if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) {
11686                 int i;
11687                 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
11688                         free(io->io_hdr.local_sglist[i].addr, M_CTL);
11689         }
11690         /*
11691          * The data is in local and remote memory, so now we need to send
11692          * status (good or back) back to the other side.
11693          */
11694         ctl_send_datamove_done(io, /*have_lock*/ 0);
11695 }
11696
11697 /*
11698  * We've moved the data from the host/controller into local memory.  Now we
11699  * need to push it over to the remote controller's memory.
11700  */
11701 static int
11702 ctl_datamove_remote_dm_write_cb(union ctl_io *io)
11703 {
11704         int retval;
11705
11706         retval = 0;
11707
11708         retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE,
11709                                           ctl_datamove_remote_write_cb);
11710
11711         return (retval);
11712 }
11713
11714 static void
11715 ctl_datamove_remote_write(union ctl_io *io)
11716 {
11717         int retval;
11718         void (*fe_datamove)(union ctl_io *io);
11719
11720         /*
11721          * - Get the data from the host/HBA into local memory.
11722          * - DMA memory from the local controller to the remote controller.
11723          * - Send status back to the remote controller.
11724          */
11725
11726         retval = ctl_datamove_remote_sgl_setup(io);
11727         if (retval != 0)
11728                 return;
11729
11730         /* Switch the pointer over so the FETD knows what to do */
11731         io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist;
11732
11733         /*
11734          * Use a custom move done callback, since we need to send completion
11735          * back to the other controller, not to the backend on this side.
11736          */
11737         io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb;
11738
11739         fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
11740
11741         fe_datamove(io);
11742
11743         return;
11744
11745 }
11746
11747 static int
11748 ctl_datamove_remote_dm_read_cb(union ctl_io *io)
11749 {
11750 #if 0
11751         char str[256];
11752         char path_str[64];
11753         struct sbuf sb;
11754 #endif
11755
11756         /*
11757          * In this case, we had to malloc the memory locally.  Free it.
11758          */
11759         if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) {
11760                 int i;
11761                 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
11762                         free(io->io_hdr.local_sglist[i].addr, M_CTL);
11763         }
11764
11765 #if 0
11766         scsi_path_string(io, path_str, sizeof(path_str));
11767         sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
11768         sbuf_cat(&sb, path_str);
11769         scsi_command_string(&io->scsiio, NULL, &sb);
11770         sbuf_printf(&sb, "\n");
11771         sbuf_cat(&sb, path_str);
11772         sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
11773                     io->scsiio.tag_num, io->scsiio.tag_type);
11774         sbuf_cat(&sb, path_str);
11775         sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__,
11776                     io->io_hdr.flags, io->io_hdr.status);
11777         sbuf_finish(&sb);
11778         printk("%s", sbuf_data(&sb));
11779 #endif
11780
11781
11782         /*
11783          * The read is done, now we need to send status (good or bad) back
11784          * to the other side.
11785          */
11786         ctl_send_datamove_done(io, /*have_lock*/ 0);
11787
11788         return (0);
11789 }
11790
11791 static void
11792 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq)
11793 {
11794         union ctl_io *io;
11795         void (*fe_datamove)(union ctl_io *io);
11796
11797         io = rq->context;
11798
11799         if (rq->ret != CTL_HA_STATUS_SUCCESS) {
11800                 printf("%s: ISC DMA read failed with error %d", __func__,
11801                        rq->ret);
11802                 ctl_set_internal_failure(&io->scsiio,
11803                                          /*sks_valid*/ 1,
11804                                          /*retry_count*/ rq->ret);
11805         }
11806
11807         ctl_dt_req_free(rq);
11808
11809         /* Switch the pointer over so the FETD knows what to do */
11810         io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist;
11811
11812         /*
11813          * Use a custom move done callback, since we need to send completion
11814          * back to the other controller, not to the backend on this side.
11815          */
11816         io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb;
11817
11818         /* XXX KDM add checks like the ones in ctl_datamove? */
11819
11820         fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
11821
11822         fe_datamove(io);
11823 }
11824
11825 static int
11826 ctl_datamove_remote_sgl_setup(union ctl_io *io)
11827 {
11828         struct ctl_sg_entry *local_sglist, *remote_sglist;
11829         struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist;
11830         struct ctl_softc *softc;
11831         int retval;
11832         int i;
11833
11834         retval = 0;
11835         softc = control_softc;
11836
11837         local_sglist = io->io_hdr.local_sglist;
11838         local_dma_sglist = io->io_hdr.local_dma_sglist;
11839         remote_sglist = io->io_hdr.remote_sglist;
11840         remote_dma_sglist = io->io_hdr.remote_dma_sglist;
11841
11842         if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) {
11843                 for (i = 0; i < io->scsiio.kern_sg_entries; i++) {
11844                         local_sglist[i].len = remote_sglist[i].len;
11845
11846                         /*
11847                          * XXX Detect the situation where the RS-level I/O
11848                          * redirector on the other side has already read the
11849                          * data off of the AOR RS on this side, and
11850                          * transferred it to remote (mirror) memory on the
11851                          * other side.  Since we already have the data in
11852                          * memory here, we just need to use it.
11853                          *
11854                          * XXX KDM this can probably be removed once we
11855                          * get the cache device code in and take the
11856                          * current AOR implementation out.
11857                          */
11858 #ifdef NEEDTOPORT
11859                         if ((remote_sglist[i].addr >=
11860                              (void *)vtophys(softc->mirr->addr))
11861                          && (remote_sglist[i].addr <
11862                              ((void *)vtophys(softc->mirr->addr) +
11863                              CacheMirrorOffset))) {
11864                                 local_sglist[i].addr = remote_sglist[i].addr -
11865                                         CacheMirrorOffset;
11866                                 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
11867                                      CTL_FLAG_DATA_IN)
11868                                         io->io_hdr.flags |= CTL_FLAG_REDIR_DONE;
11869                         } else {
11870                                 local_sglist[i].addr = remote_sglist[i].addr +
11871                                         CacheMirrorOffset;
11872                         }
11873 #endif
11874 #if 0
11875                         printf("%s: local %p, remote %p, len %d\n",
11876                                __func__, local_sglist[i].addr,
11877                                remote_sglist[i].addr, local_sglist[i].len);
11878 #endif
11879                 }
11880         } else {
11881                 uint32_t len_to_go;
11882
11883                 /*
11884                  * In this case, we don't have automatically allocated
11885                  * memory for this I/O on this controller.  This typically
11886                  * happens with internal CTL I/O -- e.g. inquiry, mode
11887                  * sense, etc.  Anything coming from RAIDCore will have
11888                  * a mirror area available.
11889                  */
11890                 len_to_go = io->scsiio.kern_data_len;
11891
11892                 /*
11893                  * Clear the no datasync flag, we have to use malloced
11894                  * buffers.
11895                  */
11896                 io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC;
11897
11898                 /*
11899                  * The difficult thing here is that the size of the various
11900                  * S/G segments may be different than the size from the
11901                  * remote controller.  That'll make it harder when DMAing
11902                  * the data back to the other side.
11903                  */
11904                 for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) /
11905                      sizeof(io->io_hdr.remote_sglist[0])) &&
11906                      (len_to_go > 0); i++) {
11907                         local_sglist[i].len = ctl_min(len_to_go, 131072);
11908                         CTL_SIZE_8B(local_dma_sglist[i].len,
11909                                     local_sglist[i].len);
11910                         local_sglist[i].addr =
11911                                 malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK);
11912
11913                         local_dma_sglist[i].addr = local_sglist[i].addr;
11914
11915                         if (local_sglist[i].addr == NULL) {
11916                                 int j;
11917
11918                                 printf("malloc failed for %zd bytes!",
11919                                        local_dma_sglist[i].len);
11920                                 for (j = 0; j < i; j++) {
11921                                         free(local_sglist[j].addr, M_CTL);
11922                                 }
11923                                 ctl_set_internal_failure(&io->scsiio,
11924                                                          /*sks_valid*/ 1,
11925                                                          /*retry_count*/ 4857);
11926                                 retval = 1;
11927                                 goto bailout_error;
11928                                 
11929                         }
11930                         /* XXX KDM do we need a sync here? */
11931
11932                         len_to_go -= local_sglist[i].len;
11933                 }
11934                 /*
11935                  * Reset the number of S/G entries accordingly.  The
11936                  * original number of S/G entries is available in
11937                  * rem_sg_entries.
11938                  */
11939                 io->scsiio.kern_sg_entries = i;
11940
11941 #if 0
11942                 printf("%s: kern_sg_entries = %d\n", __func__,
11943                        io->scsiio.kern_sg_entries);
11944                 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
11945                         printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i,
11946                                local_sglist[i].addr, local_sglist[i].len,
11947                                local_dma_sglist[i].len);
11948 #endif
11949         }
11950
11951
11952         return (retval);
11953
11954 bailout_error:
11955
11956         ctl_send_datamove_done(io, /*have_lock*/ 0);
11957
11958         return (retval);
11959 }
11960
11961 static int
11962 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
11963                          ctl_ha_dt_cb callback)
11964 {
11965         struct ctl_ha_dt_req *rq;
11966         struct ctl_sg_entry *remote_sglist, *local_sglist;
11967         struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist;
11968         uint32_t local_used, remote_used, total_used;
11969         int retval;
11970         int i, j;
11971
11972         retval = 0;
11973
11974         rq = ctl_dt_req_alloc();
11975
11976         /*
11977          * If we failed to allocate the request, and if the DMA didn't fail
11978          * anyway, set busy status.  This is just a resource allocation
11979          * failure.
11980          */
11981         if ((rq == NULL)
11982          && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE))
11983                 ctl_set_busy(&io->scsiio);
11984
11985         if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) {
11986
11987                 if (rq != NULL)
11988                         ctl_dt_req_free(rq);
11989
11990                 /*
11991                  * The data move failed.  We need to return status back
11992                  * to the other controller.  No point in trying to DMA
11993                  * data to the remote controller.
11994                  */
11995
11996                 ctl_send_datamove_done(io, /*have_lock*/ 0);
11997
11998                 retval = 1;
11999
12000                 goto bailout;
12001         }
12002
12003         local_sglist = io->io_hdr.local_sglist;
12004         local_dma_sglist = io->io_hdr.local_dma_sglist;
12005         remote_sglist = io->io_hdr.remote_sglist;
12006         remote_dma_sglist = io->io_hdr.remote_dma_sglist;
12007         local_used = 0;
12008         remote_used = 0;
12009         total_used = 0;
12010
12011         if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) {
12012                 rq->ret = CTL_HA_STATUS_SUCCESS;
12013                 rq->context = io;
12014                 callback(rq);
12015                 goto bailout;
12016         }
12017
12018         /*
12019          * Pull/push the data over the wire from/to the other controller.
12020          * This takes into account the possibility that the local and
12021          * remote sglists may not be identical in terms of the size of
12022          * the elements and the number of elements.
12023          *
12024          * One fundamental assumption here is that the length allocated for
12025          * both the local and remote sglists is identical.  Otherwise, we've
12026          * essentially got a coding error of some sort.
12027          */
12028         for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) {
12029                 int isc_ret;
12030                 uint32_t cur_len, dma_length;
12031                 uint8_t *tmp_ptr;
12032
12033                 rq->id = CTL_HA_DATA_CTL;
12034                 rq->command = command;
12035                 rq->context = io;
12036
12037                 /*
12038                  * Both pointers should be aligned.  But it is possible
12039                  * that the allocation length is not.  They should both
12040                  * also have enough slack left over at the end, though,
12041                  * to round up to the next 8 byte boundary.
12042                  */
12043                 cur_len = ctl_min(local_sglist[i].len - local_used,
12044                                   remote_sglist[j].len - remote_used);
12045
12046                 /*
12047                  * In this case, we have a size issue and need to decrease
12048                  * the size, except in the case where we actually have less
12049                  * than 8 bytes left.  In that case, we need to increase
12050                  * the DMA length to get the last bit.
12051                  */
12052                 if ((cur_len & 0x7) != 0) {
12053                         if (cur_len > 0x7) {
12054                                 cur_len = cur_len - (cur_len & 0x7);
12055                                 dma_length = cur_len;
12056                         } else {
12057                                 CTL_SIZE_8B(dma_length, cur_len);
12058                         }
12059
12060                 } else
12061                         dma_length = cur_len;
12062
12063                 /*
12064                  * If we had to allocate memory for this I/O, instead of using
12065                  * the non-cached mirror memory, we'll need to flush the cache
12066                  * before trying to DMA to the other controller.
12067                  *
12068                  * We could end up doing this multiple times for the same
12069                  * segment if we have a larger local segment than remote
12070                  * segment.  That shouldn't be an issue.
12071                  */
12072                 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
12073                         /*
12074                          * XXX KDM use bus_dmamap_sync() here.
12075                          */
12076                 }
12077
12078                 rq->size = dma_length;
12079
12080                 tmp_ptr = (uint8_t *)local_sglist[i].addr;
12081                 tmp_ptr += local_used;
12082
12083                 /* Use physical addresses when talking to ISC hardware */
12084                 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) {
12085                         /* XXX KDM use busdma */
12086 #if 0
12087                         rq->local = vtophys(tmp_ptr);
12088 #endif
12089                 } else
12090                         rq->local = tmp_ptr;
12091
12092                 tmp_ptr = (uint8_t *)remote_sglist[j].addr;
12093                 tmp_ptr += remote_used;
12094                 rq->remote = tmp_ptr;
12095
12096                 rq->callback = NULL;
12097
12098                 local_used += cur_len;
12099                 if (local_used >= local_sglist[i].len) {
12100                         i++;
12101                         local_used = 0;
12102                 }
12103
12104                 remote_used += cur_len;
12105                 if (remote_used >= remote_sglist[j].len) {
12106                         j++;
12107                         remote_used = 0;
12108                 }
12109                 total_used += cur_len;
12110
12111                 if (total_used >= io->scsiio.kern_data_len)
12112                         rq->callback = callback;
12113
12114                 if ((rq->size & 0x7) != 0) {
12115                         printf("%s: warning: size %d is not on 8b boundary\n",
12116                                __func__, rq->size);
12117                 }
12118                 if (((uintptr_t)rq->local & 0x7) != 0) {
12119                         printf("%s: warning: local %p not on 8b boundary\n",
12120                                __func__, rq->local);
12121                 }
12122                 if (((uintptr_t)rq->remote & 0x7) != 0) {
12123                         printf("%s: warning: remote %p not on 8b boundary\n",
12124                                __func__, rq->local);
12125                 }
12126 #if 0
12127                 printf("%s: %s: local %#x remote %#x size %d\n", __func__,
12128                        (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ",
12129                        rq->local, rq->remote, rq->size);
12130 #endif
12131
12132                 isc_ret = ctl_dt_single(rq);
12133                 if (isc_ret == CTL_HA_STATUS_WAIT)
12134                         continue;
12135
12136                 if (isc_ret == CTL_HA_STATUS_DISCONNECT) {
12137                         rq->ret = CTL_HA_STATUS_SUCCESS;
12138                 } else {
12139                         rq->ret = isc_ret;
12140                 }
12141                 callback(rq);
12142                 goto bailout;
12143         }
12144
12145 bailout:
12146         return (retval);
12147
12148 }
12149
12150 static void
12151 ctl_datamove_remote_read(union ctl_io *io)
12152 {
12153         int retval;
12154         int i;
12155
12156         /*
12157          * This will send an error to the other controller in the case of a
12158          * failure.
12159          */
12160         retval = ctl_datamove_remote_sgl_setup(io);
12161         if (retval != 0)
12162                 return;
12163
12164         retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ,
12165                                           ctl_datamove_remote_read_cb);
12166         if ((retval != 0)
12167          && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) {
12168                 /*
12169                  * Make sure we free memory if there was an error..  The
12170                  * ctl_datamove_remote_xfer() function will send the
12171                  * datamove done message, or call the callback with an
12172                  * error if there is a problem.
12173                  */
12174                 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
12175                         free(io->io_hdr.local_sglist[i].addr, M_CTL);
12176         }
12177
12178         return;
12179 }
12180
12181 /*
12182  * Process a datamove request from the other controller.  This is used for
12183  * XFER mode only, not SER_ONLY mode.  For writes, we DMA into local memory
12184  * first.  Once that is complete, the data gets DMAed into the remote
12185  * controller's memory.  For reads, we DMA from the remote controller's
12186  * memory into our memory first, and then move it out to the FETD.
12187  */
12188 static void
12189 ctl_datamove_remote(union ctl_io *io)
12190 {
12191         struct ctl_softc *softc;
12192
12193         softc = control_softc;
12194
12195         mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
12196
12197         /*
12198          * Note that we look for an aborted I/O here, but don't do some of
12199          * the other checks that ctl_datamove() normally does.  We don't
12200          * need to run the task queue, because this I/O is on the ISC
12201          * queue, which is executed by the work thread after the task queue.
12202          * We don't need to run the datamove delay code, since that should
12203          * have been done if need be on the other controller.
12204          */
12205         mtx_lock(&softc->ctl_lock);
12206
12207         if (io->io_hdr.flags & CTL_FLAG_ABORT) {
12208
12209                 printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__,
12210                        io->scsiio.tag_num, io->io_hdr.nexus.initid.id,
12211                        io->io_hdr.nexus.targ_port,
12212                        io->io_hdr.nexus.targ_target.id,
12213                        io->io_hdr.nexus.targ_lun);
12214                 io->io_hdr.status = CTL_CMD_ABORTED;
12215                 io->io_hdr.port_status = 31338;
12216
12217                 mtx_unlock(&softc->ctl_lock);
12218
12219                 ctl_send_datamove_done(io, /*have_lock*/ 0);
12220
12221                 return;
12222         }
12223
12224         if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) {
12225                 mtx_unlock(&softc->ctl_lock);
12226                 ctl_datamove_remote_write(io);
12227         } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){
12228                 mtx_unlock(&softc->ctl_lock);
12229                 ctl_datamove_remote_read(io);
12230         } else {
12231                 union ctl_ha_msg msg;
12232                 struct scsi_sense_data *sense;
12233                 uint8_t sks[3];
12234                 int retry_count;
12235
12236                 memset(&msg, 0, sizeof(msg));
12237
12238                 msg.hdr.msg_type = CTL_MSG_BAD_JUJU;
12239                 msg.hdr.status = CTL_SCSI_ERROR;
12240                 msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
12241
12242                 retry_count = 4243;
12243
12244                 sense = &msg.scsi.sense_data;
12245                 sks[0] = SSD_SCS_VALID;
12246                 sks[1] = (retry_count >> 8) & 0xff;
12247                 sks[2] = retry_count & 0xff;
12248
12249                 /* "Internal target failure" */
12250                 scsi_set_sense_data(sense,
12251                                     /*sense_format*/ SSD_TYPE_NONE,
12252                                     /*current_error*/ 1,
12253                                     /*sense_key*/ SSD_KEY_HARDWARE_ERROR,
12254                                     /*asc*/ 0x44,
12255                                     /*ascq*/ 0x00,
12256                                     /*type*/ SSD_ELEM_SKS,
12257                                     /*size*/ sizeof(sks),
12258                                     /*data*/ sks,
12259                                     SSD_ELEM_NONE);
12260
12261                 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
12262                 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
12263                         ctl_failover_io(io, /*have_lock*/ 1);
12264                         mtx_unlock(&softc->ctl_lock);
12265                         return;
12266                 }
12267
12268                 mtx_unlock(&softc->ctl_lock);
12269
12270                 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) >
12271                     CTL_HA_STATUS_SUCCESS) {
12272                         /* XXX KDM what to do if this fails? */
12273                 }
12274                 return;
12275         }
12276         
12277 }
12278
12279 static int
12280 ctl_process_done(union ctl_io *io, int have_lock)
12281 {
12282         struct ctl_lun *lun;
12283         struct ctl_softc *ctl_softc;
12284         void (*fe_done)(union ctl_io *io);
12285         uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port);
12286
12287         CTL_DEBUG_PRINT(("ctl_process_done\n"));
12288
12289         fe_done =
12290             control_softc->ctl_ports[targ_port]->fe_done;
12291
12292 #ifdef CTL_TIME_IO
12293         if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
12294                 char str[256];
12295                 char path_str[64];
12296                 struct sbuf sb;
12297
12298                 ctl_scsi_path_string(io, path_str, sizeof(path_str));
12299                 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
12300
12301                 sbuf_cat(&sb, path_str);
12302                 switch (io->io_hdr.io_type) {
12303                 case CTL_IO_SCSI:
12304                         ctl_scsi_command_string(&io->scsiio, NULL, &sb);
12305                         sbuf_printf(&sb, "\n");
12306                         sbuf_cat(&sb, path_str);
12307                         sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
12308                                     io->scsiio.tag_num, io->scsiio.tag_type);
12309                         break;
12310                 case CTL_IO_TASK:
12311                         sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, "
12312                                     "Tag Type: %d\n", io->taskio.task_action,
12313                                     io->taskio.tag_num, io->taskio.tag_type);
12314                         break;
12315                 default:
12316                         printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
12317                         panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
12318                         break;
12319                 }
12320                 sbuf_cat(&sb, path_str);
12321                 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n",
12322                             (intmax_t)time_uptime - io->io_hdr.start_time);
12323                 sbuf_finish(&sb);
12324                 printf("%s", sbuf_data(&sb));
12325         }
12326 #endif /* CTL_TIME_IO */
12327
12328         switch (io->io_hdr.io_type) {
12329         case CTL_IO_SCSI:
12330                 break;
12331         case CTL_IO_TASK:
12332                 ctl_io_error_print(io, NULL);
12333                 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
12334                         ctl_free_io(io);
12335                 else
12336                         fe_done(io);
12337                 return (CTL_RETVAL_COMPLETE);
12338                 break;
12339         default:
12340                 printf("ctl_process_done: invalid io type %d\n",
12341                        io->io_hdr.io_type);
12342                 panic("ctl_process_done: invalid io type %d\n",
12343                       io->io_hdr.io_type);
12344                 break; /* NOTREACHED */
12345         }
12346
12347         lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
12348         if (lun == NULL) {
12349                 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n",
12350                                  io->io_hdr.nexus.targ_lun));
12351                 fe_done(io);
12352                 goto bailout;
12353         }
12354         ctl_softc = lun->ctl_softc;
12355
12356         /*
12357          * Remove this from the OOA queue.
12358          */
12359         if (have_lock == 0)
12360                 mtx_lock(&ctl_softc->ctl_lock);
12361
12362         /*
12363          * Check to see if we have any errors to inject here.  We only
12364          * inject errors for commands that don't already have errors set.
12365          */
12366         if ((STAILQ_FIRST(&lun->error_list) != NULL)
12367          && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS))
12368                 ctl_inject_error(lun, io);
12369
12370         /*
12371          * XXX KDM how do we treat commands that aren't completed
12372          * successfully?
12373          *
12374          * XXX KDM should we also track I/O latency?
12375          */
12376         if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
12377                 uint32_t blocksize;
12378 #ifdef CTL_TIME_IO
12379                 struct bintime cur_bt;
12380 #endif
12381
12382                 if ((lun->be_lun != NULL)
12383                  && (lun->be_lun->blocksize != 0))
12384                         blocksize = lun->be_lun->blocksize;
12385                 else
12386                         blocksize = 512;
12387
12388                 switch (io->io_hdr.io_type) {
12389                 case CTL_IO_SCSI: {
12390                         int isread;
12391                         struct ctl_lba_len lbalen;
12392
12393                         isread = 0;
12394                         switch (io->scsiio.cdb[0]) {
12395                         case READ_6:
12396                         case READ_10:
12397                         case READ_12:
12398                         case READ_16:
12399                                 isread = 1;
12400                                 /* FALLTHROUGH */
12401                         case WRITE_6:
12402                         case WRITE_10:
12403                         case WRITE_12:
12404                         case WRITE_16:
12405                         case WRITE_VERIFY_10:
12406                         case WRITE_VERIFY_12:
12407                         case WRITE_VERIFY_16:
12408                                 memcpy(&lbalen, io->io_hdr.ctl_private[
12409                                        CTL_PRIV_LBA_LEN].bytes, sizeof(lbalen));
12410
12411                                 if (isread) {
12412                                         lun->stats.ports[targ_port].bytes[CTL_STATS_READ] +=
12413                                                 lbalen.len * blocksize;
12414                                         lun->stats.ports[targ_port].operations[CTL_STATS_READ]++;
12415
12416 #ifdef CTL_TIME_IO
12417                                         bintime_add(
12418                                            &lun->stats.ports[targ_port].dma_time[CTL_STATS_READ],
12419                                            &io->io_hdr.dma_bt);
12420                                         lun->stats.ports[targ_port].num_dmas[CTL_STATS_READ] +=
12421                                                 io->io_hdr.num_dmas;
12422                                         getbintime(&cur_bt);
12423                                         bintime_sub(&cur_bt,
12424                                                     &io->io_hdr.start_bt);
12425
12426                                         bintime_add(
12427                                             &lun->stats.ports[targ_port].time[CTL_STATS_READ],
12428                                             &cur_bt);
12429
12430 #if 0
12431                                         cs_prof_gettime(&cur_ticks);
12432                                         lun->stats.time[CTL_STATS_READ] +=
12433                                                 cur_ticks -
12434                                                 io->io_hdr.start_ticks;
12435 #endif
12436 #if 0
12437                                         lun->stats.time[CTL_STATS_READ] +=
12438                                                 jiffies - io->io_hdr.start_time;
12439 #endif
12440 #endif /* CTL_TIME_IO */
12441                                 } else {
12442                                         lun->stats.ports[targ_port].bytes[CTL_STATS_WRITE] +=
12443                                                 lbalen.len * blocksize;
12444                                         lun->stats.ports[targ_port].operations[
12445                                                 CTL_STATS_WRITE]++;
12446
12447 #ifdef CTL_TIME_IO
12448                                         bintime_add(
12449                                           &lun->stats.ports[targ_port].dma_time[CTL_STATS_WRITE],
12450                                           &io->io_hdr.dma_bt);
12451                                         lun->stats.ports[targ_port].num_dmas[CTL_STATS_WRITE] +=
12452                                                 io->io_hdr.num_dmas;
12453                                         getbintime(&cur_bt);
12454                                         bintime_sub(&cur_bt,
12455                                                     &io->io_hdr.start_bt);
12456
12457                                         bintime_add(
12458                                             &lun->stats.ports[targ_port].time[CTL_STATS_WRITE],
12459                                             &cur_bt);
12460 #if 0
12461                                         cs_prof_gettime(&cur_ticks);
12462                                         lun->stats.ports[targ_port].time[CTL_STATS_WRITE] +=
12463                                                 cur_ticks -
12464                                                 io->io_hdr.start_ticks;
12465                                         lun->stats.ports[targ_port].time[CTL_STATS_WRITE] +=
12466                                                 jiffies - io->io_hdr.start_time;
12467 #endif
12468 #endif /* CTL_TIME_IO */
12469                                 }
12470                                 break;
12471                         default:
12472                                 lun->stats.ports[targ_port].operations[CTL_STATS_NO_IO]++;
12473
12474 #ifdef CTL_TIME_IO
12475                                 bintime_add(
12476                                   &lun->stats.ports[targ_port].dma_time[CTL_STATS_NO_IO],
12477                                   &io->io_hdr.dma_bt);
12478                                 lun->stats.ports[targ_port].num_dmas[CTL_STATS_NO_IO] +=
12479                                         io->io_hdr.num_dmas;
12480                                 getbintime(&cur_bt);
12481                                 bintime_sub(&cur_bt, &io->io_hdr.start_bt);
12482
12483                                 bintime_add(&lun->stats.ports[targ_port].time[CTL_STATS_NO_IO],
12484                                             &cur_bt);
12485
12486 #if 0
12487                                 cs_prof_gettime(&cur_ticks);
12488                                 lun->stats.ports[targ_port].time[CTL_STATS_NO_IO] +=
12489                                         cur_ticks -
12490                                         io->io_hdr.start_ticks;
12491                                 lun->stats.ports[targ_port].time[CTL_STATS_NO_IO] +=
12492                                         jiffies - io->io_hdr.start_time;
12493 #endif
12494 #endif /* CTL_TIME_IO */
12495                                 break;
12496                         }
12497                         break;
12498                 }
12499                 default:
12500                         break;
12501                 }
12502         }
12503
12504         TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
12505
12506         /*
12507          * Run through the blocked queue on this LUN and see if anything
12508          * has become unblocked, now that this transaction is done.
12509          */
12510         ctl_check_blocked(lun);
12511
12512         /*
12513          * If the LUN has been invalidated, free it if there is nothing
12514          * left on its OOA queue.
12515          */
12516         if ((lun->flags & CTL_LUN_INVALID)
12517          && (TAILQ_FIRST(&lun->ooa_queue) == NULL))
12518                 ctl_free_lun(lun);
12519
12520         /*
12521          * If this command has been aborted, make sure we set the status
12522          * properly.  The FETD is responsible for freeing the I/O and doing
12523          * whatever it needs to do to clean up its state.
12524          */
12525         if (io->io_hdr.flags & CTL_FLAG_ABORT)
12526                 io->io_hdr.status = CTL_CMD_ABORTED;
12527
12528         /*
12529          * We print out status for every task management command.  For SCSI
12530          * commands, we filter out any unit attention errors; they happen
12531          * on every boot, and would clutter up the log.  Note:  task
12532          * management commands aren't printed here, they are printed above,
12533          * since they should never even make it down here.
12534          */
12535         switch (io->io_hdr.io_type) {
12536         case CTL_IO_SCSI: {
12537                 int error_code, sense_key, asc, ascq;
12538
12539                 sense_key = 0;
12540
12541                 if (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR)
12542                  && (io->scsiio.scsi_status == SCSI_STATUS_CHECK_COND)) {
12543                         /*
12544                          * Since this is just for printing, no need to
12545                          * show errors here.
12546                          */
12547                         scsi_extract_sense_len(&io->scsiio.sense_data,
12548                                                io->scsiio.sense_len,
12549                                                &error_code,
12550                                                &sense_key,
12551                                                &asc,
12552                                                &ascq,
12553                                                /*show_errors*/ 0);
12554                 }
12555
12556                 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
12557                  && (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SCSI_ERROR)
12558                   || (io->scsiio.scsi_status != SCSI_STATUS_CHECK_COND)
12559                   || (sense_key != SSD_KEY_UNIT_ATTENTION))) {
12560
12561                         if ((time_uptime - ctl_softc->last_print_jiffies) <= 0){
12562                                 ctl_softc->skipped_prints++;
12563                                 if (have_lock == 0)
12564                                         mtx_unlock(&ctl_softc->ctl_lock);
12565                         } else {
12566                                 uint32_t skipped_prints;
12567
12568                                 skipped_prints = ctl_softc->skipped_prints;
12569
12570                                 ctl_softc->skipped_prints = 0;
12571                                 ctl_softc->last_print_jiffies = time_uptime;
12572
12573                                 if (have_lock == 0)
12574                                         mtx_unlock(&ctl_softc->ctl_lock);
12575                                 if (skipped_prints > 0) {
12576 #ifdef NEEDTOPORT
12577                                         csevent_log(CSC_CTL | CSC_SHELF_SW |
12578                                             CTL_ERROR_REPORT,
12579                                             csevent_LogType_Trace,
12580                                             csevent_Severity_Information,
12581                                             csevent_AlertLevel_Green,
12582                                             csevent_FRU_Firmware,
12583                                             csevent_FRU_Unknown,
12584                                             "High CTL error volume, %d prints "
12585                                             "skipped", skipped_prints);
12586 #endif
12587                                 }
12588                                 ctl_io_error_print(io, NULL);
12589                         }
12590                 } else {
12591                         if (have_lock == 0)
12592                                 mtx_unlock(&ctl_softc->ctl_lock);
12593                 }
12594                 break;
12595         }
12596         case CTL_IO_TASK:
12597                 if (have_lock == 0)
12598                         mtx_unlock(&ctl_softc->ctl_lock);
12599                 ctl_io_error_print(io, NULL);
12600                 break;
12601         default:
12602                 if (have_lock == 0)
12603                         mtx_unlock(&ctl_softc->ctl_lock);
12604                 break;
12605         }
12606
12607         /*
12608          * Tell the FETD or the other shelf controller we're done with this
12609          * command.  Note that only SCSI commands get to this point.  Task
12610          * management commands are completed above.
12611          *
12612          * We only send status to the other controller if we're in XFER
12613          * mode.  In SER_ONLY mode, the I/O is done on the controller that
12614          * received the I/O (from CTL's perspective), and so the status is
12615          * generated there.
12616          * 
12617          * XXX KDM if we hold the lock here, we could cause a deadlock
12618          * if the frontend comes back in in this context to queue
12619          * something.
12620          */
12621         if ((ctl_softc->ha_mode == CTL_HA_MODE_XFER)
12622          && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
12623                 union ctl_ha_msg msg;
12624
12625                 memset(&msg, 0, sizeof(msg));
12626                 msg.hdr.msg_type = CTL_MSG_FINISH_IO;
12627                 msg.hdr.original_sc = io->io_hdr.original_sc;
12628                 msg.hdr.nexus = io->io_hdr.nexus;
12629                 msg.hdr.status = io->io_hdr.status;
12630                 msg.scsi.scsi_status = io->scsiio.scsi_status;
12631                 msg.scsi.tag_num = io->scsiio.tag_num;
12632                 msg.scsi.tag_type = io->scsiio.tag_type;
12633                 msg.scsi.sense_len = io->scsiio.sense_len;
12634                 msg.scsi.sense_residual = io->scsiio.sense_residual;
12635                 msg.scsi.residual = io->scsiio.residual;
12636                 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
12637                        sizeof(io->scsiio.sense_data));
12638                 /*
12639                  * We copy this whether or not this is an I/O-related
12640                  * command.  Otherwise, we'd have to go and check to see
12641                  * whether it's a read/write command, and it really isn't
12642                  * worth it.
12643                  */
12644                 memcpy(&msg.scsi.lbalen,
12645                        &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
12646                        sizeof(msg.scsi.lbalen));
12647
12648                 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
12649                                 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) {
12650                         /* XXX do something here */
12651                 }
12652
12653                 ctl_free_io(io);
12654         } else 
12655                 fe_done(io);
12656
12657 bailout:
12658
12659         return (CTL_RETVAL_COMPLETE);
12660 }
12661
12662 /*
12663  * Front end should call this if it doesn't do autosense.  When the request
12664  * sense comes back in from the initiator, we'll dequeue this and send it.
12665  */
12666 int
12667 ctl_queue_sense(union ctl_io *io)
12668 {
12669         struct ctl_lun *lun;
12670         struct ctl_softc *ctl_softc;
12671         uint32_t initidx, targ_lun;
12672
12673         ctl_softc = control_softc;
12674
12675         CTL_DEBUG_PRINT(("ctl_queue_sense\n"));
12676
12677         /*
12678          * LUN lookup will likely move to the ctl_work_thread() once we
12679          * have our new queueing infrastructure (that doesn't put things on
12680          * a per-LUN queue initially).  That is so that we can handle
12681          * things like an INQUIRY to a LUN that we don't have enabled.  We
12682          * can't deal with that right now.
12683          */
12684         mtx_lock(&ctl_softc->ctl_lock);
12685
12686         /*
12687          * If we don't have a LUN for this, just toss the sense
12688          * information.
12689          */
12690         targ_lun = io->io_hdr.nexus.targ_lun;
12691         if (io->io_hdr.nexus.lun_map_fn != NULL)
12692                 targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun);
12693         if ((targ_lun < CTL_MAX_LUNS)
12694          && (ctl_softc->ctl_luns[targ_lun] != NULL))
12695                 lun = ctl_softc->ctl_luns[targ_lun];
12696         else
12697                 goto bailout;
12698
12699         initidx = ctl_get_initindex(&io->io_hdr.nexus);
12700
12701         /*
12702          * Already have CA set for this LUN...toss the sense information.
12703          */
12704         if (ctl_is_set(lun->have_ca, initidx))
12705                 goto bailout;
12706
12707         memcpy(&lun->pending_sense[initidx].sense, &io->scsiio.sense_data,
12708                ctl_min(sizeof(lun->pending_sense[initidx].sense),
12709                sizeof(io->scsiio.sense_data)));
12710         ctl_set_mask(lun->have_ca, initidx);
12711
12712 bailout:
12713         mtx_unlock(&ctl_softc->ctl_lock);
12714
12715         ctl_free_io(io);
12716
12717         return (CTL_RETVAL_COMPLETE);
12718 }
12719
12720 /*
12721  * Primary command inlet from frontend ports.  All SCSI and task I/O
12722  * requests must go through this function.
12723  */
12724 int
12725 ctl_queue(union ctl_io *io)
12726 {
12727         struct ctl_softc *ctl_softc;
12728
12729         CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0]));
12730
12731         ctl_softc = control_softc;
12732
12733 #ifdef CTL_TIME_IO
12734         io->io_hdr.start_time = time_uptime;
12735         getbintime(&io->io_hdr.start_bt);
12736 #endif /* CTL_TIME_IO */
12737
12738         mtx_lock(&ctl_softc->ctl_lock);
12739
12740         switch (io->io_hdr.io_type) {
12741         case CTL_IO_SCSI:
12742                 STAILQ_INSERT_TAIL(&ctl_softc->incoming_queue, &io->io_hdr,
12743                                    links);
12744                 break;
12745         case CTL_IO_TASK:
12746                 STAILQ_INSERT_TAIL(&ctl_softc->task_queue, &io->io_hdr, links);
12747                 /*
12748                  * Set the task pending flag.  This is necessary to close a
12749                  * race condition with the FETD:
12750                  *
12751                  * - FETD submits a task management command, like an abort.
12752                  * - Back end calls fe_datamove() to move the data for the
12753                  *   aborted command.  The FETD can't really accept it, but
12754                  *   if it did, it would end up transmitting data for a
12755                  *   command that the initiator told us to abort.
12756                  *
12757                  * We close the race condition by setting the flag here,
12758                  * and checking it in ctl_datamove(), before calling the
12759                  * FETD's fe_datamove routine.  If we've got a task
12760                  * pending, we run the task queue and then check to see
12761                  * whether our particular I/O has been aborted.
12762                  */
12763                 ctl_softc->flags |= CTL_FLAG_TASK_PENDING;
12764                 break;
12765         default:
12766                 mtx_unlock(&ctl_softc->ctl_lock);
12767                 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type);
12768                 return (-EINVAL);
12769                 break; /* NOTREACHED */
12770         }
12771         mtx_unlock(&ctl_softc->ctl_lock);
12772
12773         ctl_wakeup_thread();
12774
12775         return (CTL_RETVAL_COMPLETE);
12776 }
12777
12778 #ifdef CTL_IO_DELAY
12779 static void
12780 ctl_done_timer_wakeup(void *arg)
12781 {
12782         union ctl_io *io;
12783
12784         io = (union ctl_io *)arg;
12785         ctl_done_lock(io, /*have_lock*/ 0);
12786 }
12787 #endif /* CTL_IO_DELAY */
12788
12789 void
12790 ctl_done_lock(union ctl_io *io, int have_lock)
12791 {
12792         struct ctl_softc *ctl_softc;
12793 #ifndef CTL_DONE_THREAD
12794         union ctl_io *xio;
12795 #endif /* !CTL_DONE_THREAD */
12796
12797         ctl_softc = control_softc;
12798
12799         if (have_lock == 0)
12800                 mtx_lock(&ctl_softc->ctl_lock);
12801
12802         /*
12803          * Enable this to catch duplicate completion issues.
12804          */
12805 #if 0
12806         if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) {
12807                 printf("%s: type %d msg %d cdb %x iptl: "
12808                        "%d:%d:%d:%d tag 0x%04x "
12809                        "flag %#x status %x\n",
12810                         __func__,
12811                         io->io_hdr.io_type,
12812                         io->io_hdr.msg_type,
12813                         io->scsiio.cdb[0],
12814                         io->io_hdr.nexus.initid.id,
12815                         io->io_hdr.nexus.targ_port,
12816                         io->io_hdr.nexus.targ_target.id,
12817                         io->io_hdr.nexus.targ_lun,
12818                         (io->io_hdr.io_type ==
12819                         CTL_IO_TASK) ?
12820                         io->taskio.tag_num :
12821                         io->scsiio.tag_num,
12822                         io->io_hdr.flags,
12823                         io->io_hdr.status);
12824         } else
12825                 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE;
12826 #endif
12827
12828         /*
12829          * This is an internal copy of an I/O, and should not go through
12830          * the normal done processing logic.
12831          */
12832         if (io->io_hdr.flags & CTL_FLAG_INT_COPY) {
12833                 if (have_lock == 0)
12834                         mtx_unlock(&ctl_softc->ctl_lock);
12835                 return;
12836         }
12837
12838         /*
12839          * We need to send a msg to the serializing shelf to finish the IO
12840          * as well.  We don't send a finish message to the other shelf if
12841          * this is a task management command.  Task management commands
12842          * aren't serialized in the OOA queue, but rather just executed on
12843          * both shelf controllers for commands that originated on that
12844          * controller.
12845          */
12846         if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)
12847          && (io->io_hdr.io_type != CTL_IO_TASK)) {
12848                 union ctl_ha_msg msg_io;
12849
12850                 msg_io.hdr.msg_type = CTL_MSG_FINISH_IO;
12851                 msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc;
12852                 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io,
12853                     sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) {
12854                 }
12855                 /* continue on to finish IO */
12856         }
12857 #ifdef CTL_IO_DELAY
12858         if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
12859                 struct ctl_lun *lun;
12860
12861                 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
12862
12863                 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
12864         } else {
12865                 struct ctl_lun *lun;
12866
12867                 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
12868
12869                 if ((lun != NULL)
12870                  && (lun->delay_info.done_delay > 0)) {
12871                         struct callout *callout;
12872
12873                         callout = (struct callout *)&io->io_hdr.timer_bytes;
12874                         callout_init(callout, /*mpsafe*/ 1);
12875                         io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
12876                         callout_reset(callout,
12877                                       lun->delay_info.done_delay * hz,
12878                                       ctl_done_timer_wakeup, io);
12879                         if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT)
12880                                 lun->delay_info.done_delay = 0;
12881                         if (have_lock == 0)
12882                                 mtx_unlock(&ctl_softc->ctl_lock);
12883                         return;
12884                 }
12885         }
12886 #endif /* CTL_IO_DELAY */
12887
12888         STAILQ_INSERT_TAIL(&ctl_softc->done_queue, &io->io_hdr, links);
12889
12890 #ifdef CTL_DONE_THREAD
12891         if (have_lock == 0)
12892                 mtx_unlock(&ctl_softc->ctl_lock);
12893
12894         ctl_wakeup_thread();
12895 #else /* CTL_DONE_THREAD */
12896         for (xio = (union ctl_io *)STAILQ_FIRST(&ctl_softc->done_queue);
12897              xio != NULL;
12898              xio =(union ctl_io *)STAILQ_FIRST(&ctl_softc->done_queue)) {
12899
12900                 STAILQ_REMOVE_HEAD(&ctl_softc->done_queue, links);
12901
12902                 ctl_process_done(xio, /*have_lock*/ 1);
12903         }
12904         if (have_lock == 0)
12905                 mtx_unlock(&ctl_softc->ctl_lock);
12906 #endif /* CTL_DONE_THREAD */
12907 }
12908
12909 void
12910 ctl_done(union ctl_io *io)
12911 {
12912         ctl_done_lock(io, /*have_lock*/ 0);
12913 }
12914
12915 int
12916 ctl_isc(struct ctl_scsiio *ctsio)
12917 {
12918         struct ctl_lun *lun;
12919         int retval;
12920
12921         lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
12922
12923         CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0]));
12924
12925         CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n"));
12926
12927         retval = lun->backend->data_submit((union ctl_io *)ctsio);
12928
12929         return (retval);
12930 }
12931
12932
12933 static void
12934 ctl_work_thread(void *arg)
12935 {
12936         struct ctl_softc *softc;
12937         union ctl_io *io;
12938         struct ctl_be_lun *be_lun;
12939         int retval;
12940
12941         CTL_DEBUG_PRINT(("ctl_work_thread starting\n"));
12942
12943         softc = (struct ctl_softc *)arg;
12944         if (softc == NULL)
12945                 return;
12946
12947         mtx_lock(&softc->ctl_lock);
12948         for (;;) {
12949                 retval = 0;
12950
12951                 /*
12952                  * We handle the queues in this order:
12953                  * - task management
12954                  * - ISC
12955                  * - done queue (to free up resources, unblock other commands)
12956                  * - RtR queue
12957                  * - incoming queue
12958                  *
12959                  * If those queues are empty, we break out of the loop and
12960                  * go to sleep.
12961                  */
12962                 io = (union ctl_io *)STAILQ_FIRST(&softc->task_queue);
12963                 if (io != NULL) {
12964                         ctl_run_task_queue(softc);
12965                         continue;
12966                 }
12967                 io = (union ctl_io *)STAILQ_FIRST(&softc->isc_queue);
12968                 if (io != NULL) {
12969                         STAILQ_REMOVE_HEAD(&softc->isc_queue, links);
12970                         ctl_handle_isc(io);
12971                         continue;
12972                 }
12973                 io = (union ctl_io *)STAILQ_FIRST(&softc->done_queue);
12974                 if (io != NULL) {
12975                         STAILQ_REMOVE_HEAD(&softc->done_queue, links);
12976                         /* clear any blocked commands, call fe_done */
12977                         mtx_unlock(&softc->ctl_lock);
12978                         /*
12979                          * XXX KDM
12980                          * Call this without a lock for now.  This will
12981                          * depend on whether there is any way the FETD can
12982                          * sleep or deadlock if called with the CTL lock
12983                          * held.
12984                          */
12985                         retval = ctl_process_done(io, /*have_lock*/ 0);
12986                         mtx_lock(&softc->ctl_lock);
12987                         continue;
12988                 }
12989                 if (!ctl_pause_rtr) {
12990                         io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue);
12991                         if (io != NULL) {
12992                                 STAILQ_REMOVE_HEAD(&softc->rtr_queue, links);
12993                                 mtx_unlock(&softc->ctl_lock);
12994                                 goto execute;
12995                         }
12996                 }
12997                 io = (union ctl_io *)STAILQ_FIRST(&softc->incoming_queue);
12998                 if (io != NULL) {
12999                         STAILQ_REMOVE_HEAD(&softc->incoming_queue, links);
13000                         mtx_unlock(&softc->ctl_lock);
13001                         ctl_scsiio_precheck(softc, &io->scsiio);
13002                         mtx_lock(&softc->ctl_lock);
13003                         continue;
13004                 }
13005                 /*
13006                  * We might want to move this to a separate thread, so that
13007                  * configuration requests (in this case LUN creations)
13008                  * won't impact the I/O path.
13009                  */
13010                 be_lun = STAILQ_FIRST(&softc->pending_lun_queue);
13011                 if (be_lun != NULL) {
13012                         STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links);
13013                         mtx_unlock(&softc->ctl_lock);
13014                         ctl_create_lun(be_lun);
13015                         mtx_lock(&softc->ctl_lock);
13016                         continue;
13017                 }
13018
13019                 /* XXX KDM use the PDROP flag?? */
13020                 /* Sleep until we have something to do. */
13021                 mtx_sleep(softc, &softc->ctl_lock, PRIBIO, "ctl_work", 0);
13022
13023                 /* Back to the top of the loop to see what woke us up. */
13024                 continue;
13025
13026 execute:
13027                 retval = ctl_scsiio(&io->scsiio);
13028                 switch (retval) {
13029                 case CTL_RETVAL_COMPLETE:
13030                         break;
13031                 default:
13032                         /*
13033                          * Probably need to make sure this doesn't happen.
13034                          */
13035                         break;
13036                 }
13037                 mtx_lock(&softc->ctl_lock);
13038         }
13039 }
13040
13041 void
13042 ctl_wakeup_thread()
13043 {
13044         struct ctl_softc *softc;
13045
13046         softc = control_softc;
13047
13048         wakeup(softc);
13049 }
13050
13051 /* Initialization and failover */
13052
13053 void
13054 ctl_init_isc_msg(void)
13055 {
13056         printf("CTL: Still calling this thing\n");
13057 }
13058
13059 /*
13060  * Init component
13061  *      Initializes component into configuration defined by bootMode
13062  *      (see hasc-sv.c)
13063  *      returns hasc_Status:
13064  *              OK
13065  *              ERROR - fatal error
13066  */
13067 static ctl_ha_comp_status
13068 ctl_isc_init(struct ctl_ha_component *c)
13069 {
13070         ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK;
13071
13072         c->status = ret;
13073         return ret;
13074 }
13075
13076 /* Start component
13077  *      Starts component in state requested. If component starts successfully,
13078  *      it must set its own state to the requestrd state
13079  *      When requested state is HASC_STATE_HA, the component may refine it
13080  *      by adding _SLAVE or _MASTER flags.
13081  *      Currently allowed state transitions are:
13082  *      UNKNOWN->HA             - initial startup
13083  *      UNKNOWN->SINGLE - initial startup when no parter detected
13084  *      HA->SINGLE              - failover
13085  * returns ctl_ha_comp_status:
13086  *              OK      - component successfully started in requested state
13087  *              FAILED  - could not start the requested state, failover may
13088  *                        be possible
13089  *              ERROR   - fatal error detected, no future startup possible
13090  */
13091 static ctl_ha_comp_status
13092 ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state)
13093 {
13094         ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK;
13095
13096         printf("%s: go\n", __func__);
13097
13098         // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap)
13099         if (c->state == CTL_HA_STATE_UNKNOWN ) {
13100                 ctl_is_single = 0;
13101                 if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler)
13102                     != CTL_HA_STATUS_SUCCESS) {
13103                         printf("ctl_isc_start: ctl_ha_msg_create failed.\n");
13104                         ret = CTL_HA_COMP_STATUS_ERROR;
13105                 }
13106         } else if (CTL_HA_STATE_IS_HA(c->state)
13107                 && CTL_HA_STATE_IS_SINGLE(state)){
13108                 // HA->SINGLE transition
13109                 ctl_failover();
13110                 ctl_is_single = 1;
13111         } else {
13112                 printf("ctl_isc_start:Invalid state transition %X->%X\n",
13113                        c->state, state);
13114                 ret = CTL_HA_COMP_STATUS_ERROR;
13115         }
13116         if (CTL_HA_STATE_IS_SINGLE(state))
13117                 ctl_is_single = 1;
13118
13119         c->state = state;
13120         c->status = ret;
13121         return ret;
13122 }
13123
13124 /*
13125  * Quiesce component
13126  * The component must clear any error conditions (set status to OK) and
13127  * prepare itself to another Start call
13128  * returns ctl_ha_comp_status:
13129  *      OK
13130  *      ERROR
13131  */
13132 static ctl_ha_comp_status
13133 ctl_isc_quiesce(struct ctl_ha_component *c)
13134 {
13135         int ret = CTL_HA_COMP_STATUS_OK;
13136
13137         ctl_pause_rtr = 1;
13138         c->status = ret;
13139         return ret;
13140 }
13141
13142 struct ctl_ha_component ctl_ha_component_ctlisc =
13143 {
13144         .name = "CTL ISC",
13145         .state = CTL_HA_STATE_UNKNOWN,
13146         .init = ctl_isc_init,
13147         .start = ctl_isc_start,
13148         .quiesce = ctl_isc_quiesce
13149 };
13150
13151 /*
13152  *  vim: ts=8
13153  */