2 * Copyright (c) 2014 Alexander Motin <mav@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/types.h>
35 #include <sys/module.h>
36 #include <sys/mutex.h>
37 #include <sys/condvar.h>
38 #include <sys/malloc.h>
40 #include <sys/queue.h>
41 #include <sys/sysctl.h>
42 #include <machine/atomic.h>
45 #include <cam/scsi/scsi_all.h>
46 #include <cam/scsi/scsi_da.h>
47 #include <cam/ctl/ctl_io.h>
48 #include <cam/ctl/ctl.h>
49 #include <cam/ctl/ctl_frontend.h>
50 #include <cam/ctl/ctl_frontend_internal.h>
51 #include <cam/ctl/ctl_util.h>
52 #include <cam/ctl/ctl_backend.h>
53 #include <cam/ctl/ctl_ioctl.h>
54 #include <cam/ctl/ctl_ha.h>
55 #include <cam/ctl/ctl_private.h>
56 #include <cam/ctl/ctl_debug.h>
57 #include <cam/ctl/ctl_scsi_all.h>
58 #include <cam/ctl/ctl_tpc.h>
59 #include <cam/ctl/ctl_error.h>
61 #define TPC_MAX_CSCDS 64
62 #define TPC_MAX_SEGS 64
64 #define TPC_MAX_LIST 8192
65 #define TPC_MAX_INLINE 0
66 #define TPC_MAX_LISTS 255
67 #define TPC_MAX_IO_SIZE (1024 * 1024)
69 MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC");
72 TPC_ERR_RETRY = 0x000,
75 TPC_ERR_NO_DECREMENT = 0x100
79 TAILQ_HEAD(runl, tpc_io);
83 struct tpc_list *list;
85 TAILQ_ENTRY(tpc_io) rlinks;
86 TAILQ_ENTRY(tpc_io) links;
90 uint8_t service_action;
96 struct scsi_ec_cscd *cscd;
97 struct scsi_ec_segment *seg[TPC_MAX_SEGS];
112 TAILQ_HEAD(, tpc_io) allio;
113 struct scsi_sense_data sense_data;
116 struct ctl_scsiio *ctsio;
118 TAILQ_ENTRY(tpc_list) links;
122 ctl_tpc_init(struct ctl_lun *lun)
125 TAILQ_INIT(&lun->tpc_lists);
129 ctl_tpc_shutdown(struct ctl_lun *lun)
131 struct tpc_list *list;
133 while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) {
134 TAILQ_REMOVE(&lun->tpc_lists, list, links);
135 KASSERT(list->completed,
136 ("Not completed TPC (%p) on shutdown", list));
142 ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
144 struct scsi_vpd_tpc *tpc_ptr;
145 struct scsi_vpd_tpc_descriptor *d_ptr;
146 struct scsi_vpd_tpc_descriptor_sc *sc_ptr;
147 struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr;
148 struct scsi_vpd_tpc_descriptor_pd *pd_ptr;
149 struct scsi_vpd_tpc_descriptor_sd *sd_ptr;
150 struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr;
151 struct scsi_vpd_tpc_descriptor_gco *gco_ptr;
155 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
157 data_len = sizeof(struct scsi_vpd_tpc) +
158 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) +
159 2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 7, 4) +
160 sizeof(struct scsi_vpd_tpc_descriptor_pd) +
161 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) +
162 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) +
163 sizeof(struct scsi_vpd_tpc_descriptor_gco);
165 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
166 tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr;
167 ctsio->kern_sg_entries = 0;
169 if (data_len < alloc_len) {
170 ctsio->residual = alloc_len - data_len;
171 ctsio->kern_data_len = data_len;
172 ctsio->kern_total_len = data_len;
175 ctsio->kern_data_len = alloc_len;
176 ctsio->kern_total_len = alloc_len;
178 ctsio->kern_data_resid = 0;
179 ctsio->kern_rel_offset = 0;
180 ctsio->kern_sg_entries = 0;
183 * The control device is always connected. The disk device, on the
184 * other hand, may not be online all the time.
187 tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
188 lun->be_lun->lun_type;
190 tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
191 tpc_ptr->page_code = SVPD_SCSI_TPC;
192 scsi_ulto2b(data_len - 4, tpc_ptr->page_length);
194 /* Supported commands */
195 d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0];
196 sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr;
197 scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type);
198 sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 7;
199 scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length);
200 scd_ptr = &sc_ptr->descr[0];
201 scd_ptr->opcode = EXTENDED_COPY;
202 scd_ptr->sa_length = 3;
203 scd_ptr->supported_service_actions[0] = EC_EC_LID1;
204 scd_ptr->supported_service_actions[1] = EC_EC_LID4;
205 scd_ptr->supported_service_actions[2] = EC_COA;
206 scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *)
207 &scd_ptr->supported_service_actions[scd_ptr->sa_length];
208 scd_ptr->opcode = RECEIVE_COPY_STATUS;
209 scd_ptr->sa_length = 4;
210 scd_ptr->supported_service_actions[0] = RCS_RCS_LID1;
211 scd_ptr->supported_service_actions[1] = RCS_RCFD;
212 scd_ptr->supported_service_actions[2] = RCS_RCS_LID4;
213 scd_ptr->supported_service_actions[3] = RCS_RCOP;
215 /* Parameter data. */
216 d_ptr = (struct scsi_vpd_tpc_descriptor *)
217 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
218 pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr;
219 scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type);
220 scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length);
221 scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count);
222 scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count);
223 scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length);
224 scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length);
226 /* Supported Descriptors */
227 d_ptr = (struct scsi_vpd_tpc_descriptor *)
228 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
229 sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr;
230 scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type);
231 scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length);
232 sd_ptr->list_length = 4;
233 sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B;
234 sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY;
235 sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY;
236 sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID;
238 /* Supported CSCD Descriptor IDs */
239 d_ptr = (struct scsi_vpd_tpc_descriptor *)
240 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
241 sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr;
242 scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type);
243 scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length);
244 scsi_ulto2b(2, sdid_ptr->list_length);
245 scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]);
247 /* General Copy Operations */
248 d_ptr = (struct scsi_vpd_tpc_descriptor *)
249 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
250 gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr;
251 scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type);
252 scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length);
253 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies);
254 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies);
255 scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length);
256 gco_ptr->data_segment_granularity = 0;
257 gco_ptr->inline_data_granularity = 0;
259 ctsio->scsi_status = SCSI_STATUS_OK;
260 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
261 ctsio->be_move_done = ctl_config_move_done;
262 ctl_datamove((union ctl_io *)ctsio);
264 return (CTL_RETVAL_COMPLETE);
268 ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
271 struct scsi_receive_copy_operating_parameters *cdb;
272 struct scsi_receive_copy_operating_parameters_data *data;
274 int alloc_len, total_len;
276 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
278 cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb;
279 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
281 retval = CTL_RETVAL_COMPLETE;
283 total_len = sizeof(*data) + 4;
284 alloc_len = scsi_4btoul(cdb->length);
286 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
288 ctsio->kern_sg_entries = 0;
290 if (total_len < alloc_len) {
291 ctsio->residual = alloc_len - total_len;
292 ctsio->kern_data_len = total_len;
293 ctsio->kern_total_len = total_len;
296 ctsio->kern_data_len = alloc_len;
297 ctsio->kern_total_len = alloc_len;
299 ctsio->kern_data_resid = 0;
300 ctsio->kern_rel_offset = 0;
302 data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr;
303 scsi_ulto4b(sizeof(*data) - 4 + 4, data->length);
304 data->snlid = RCOP_SNLID;
305 scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count);
306 scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count);
307 scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length);
308 scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length);
309 scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length);
310 scsi_ulto4b(0, data->held_data_limit);
311 scsi_ulto4b(0, data->maximum_stream_device_transfer_size);
312 scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies);
313 data->maximum_concurrent_copies = TPC_MAX_LISTS;
314 data->data_segment_granularity = 0;
315 data->inline_data_granularity = 0;
316 data->held_data_granularity = 0;
317 data->implemented_descriptor_list_length = 4;
318 data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B;
319 data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY;
320 data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY;
321 data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID;
323 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
324 ctsio->be_move_done = ctl_config_move_done;
326 ctl_datamove((union ctl_io *)ctsio);
331 ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
334 struct scsi_receive_copy_status_lid1 *cdb;
335 struct scsi_receive_copy_status_lid1_data *data;
336 struct tpc_list *list;
337 struct tpc_list list_copy;
339 int alloc_len, total_len;
342 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n"));
344 cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb;
345 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
347 retval = CTL_RETVAL_COMPLETE;
349 list_id = cdb->list_identifier;
350 mtx_lock(&lun->lun_lock);
351 TAILQ_FOREACH(list, &lun->tpc_lists, links) {
352 if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
353 EC_LIST_ID_USAGE_NONE && list->list_id == list_id)
357 mtx_unlock(&lun->lun_lock);
358 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
359 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
361 ctl_done((union ctl_io *)ctsio);
365 if (list->completed) {
366 TAILQ_REMOVE(&lun->tpc_lists, list, links);
369 mtx_unlock(&lun->lun_lock);
371 total_len = sizeof(*data);
372 alloc_len = scsi_4btoul(cdb->length);
374 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
376 ctsio->kern_sg_entries = 0;
378 if (total_len < alloc_len) {
379 ctsio->residual = alloc_len - total_len;
380 ctsio->kern_data_len = total_len;
381 ctsio->kern_total_len = total_len;
384 ctsio->kern_data_len = alloc_len;
385 ctsio->kern_total_len = alloc_len;
387 ctsio->kern_data_resid = 0;
388 ctsio->kern_rel_offset = 0;
390 data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr;
391 scsi_ulto4b(sizeof(*data) - 4, data->available_data);
392 if (list_copy.completed) {
393 if (list_copy.error || list_copy.abort)
394 data->copy_command_status = RCS_CCS_ERROR;
396 data->copy_command_status = RCS_CCS_COMPLETED;
398 data->copy_command_status = RCS_CCS_INPROG;
399 scsi_ulto2b(list_copy.curseg, data->segments_processed);
400 if (list_copy.curbytes <= UINT32_MAX) {
401 data->transfer_count_units = RCS_TC_BYTES;
402 scsi_ulto4b(list_copy.curbytes, data->transfer_count);
404 data->transfer_count_units = RCS_TC_MBYTES;
405 scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count);
408 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
409 ctsio->be_move_done = ctl_config_move_done;
411 ctl_datamove((union ctl_io *)ctsio);
416 ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
419 struct scsi_receive_copy_failure_details *cdb;
420 struct scsi_receive_copy_failure_details_data *data;
421 struct tpc_list *list;
422 struct tpc_list list_copy;
424 int alloc_len, total_len;
427 CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n"));
429 cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb;
430 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
432 retval = CTL_RETVAL_COMPLETE;
434 list_id = cdb->list_identifier;
435 mtx_lock(&lun->lun_lock);
436 TAILQ_FOREACH(list, &lun->tpc_lists, links) {
437 if (list->completed && (list->flags & EC_LIST_ID_USAGE_MASK) !=
438 EC_LIST_ID_USAGE_NONE && list->list_id == list_id)
442 mtx_unlock(&lun->lun_lock);
443 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
444 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
446 ctl_done((union ctl_io *)ctsio);
450 TAILQ_REMOVE(&lun->tpc_lists, list, links);
452 mtx_unlock(&lun->lun_lock);
454 total_len = sizeof(*data) + list_copy.sense_len;
455 alloc_len = scsi_4btoul(cdb->length);
457 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
459 ctsio->kern_sg_entries = 0;
461 if (total_len < alloc_len) {
462 ctsio->residual = alloc_len - total_len;
463 ctsio->kern_data_len = total_len;
464 ctsio->kern_total_len = total_len;
467 ctsio->kern_data_len = alloc_len;
468 ctsio->kern_total_len = alloc_len;
470 ctsio->kern_data_resid = 0;
471 ctsio->kern_rel_offset = 0;
473 data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr;
474 if (list_copy.completed && (list_copy.error || list_copy.abort)) {
475 scsi_ulto4b(sizeof(*data) - 4, data->available_data);
476 data->copy_command_status = RCS_CCS_ERROR;
478 scsi_ulto4b(0, data->available_data);
479 scsi_ulto2b(list_copy.sense_len, data->sense_data_length);
480 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
482 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
483 ctsio->be_move_done = ctl_config_move_done;
485 ctl_datamove((union ctl_io *)ctsio);
490 ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
493 struct scsi_receive_copy_status_lid4 *cdb;
494 struct scsi_receive_copy_status_lid4_data *data;
495 struct tpc_list *list;
496 struct tpc_list list_copy;
498 int alloc_len, total_len;
501 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n"));
503 cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb;
504 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
506 retval = CTL_RETVAL_COMPLETE;
508 list_id = scsi_4btoul(cdb->list_identifier);
509 mtx_lock(&lun->lun_lock);
510 TAILQ_FOREACH(list, &lun->tpc_lists, links) {
511 if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
512 EC_LIST_ID_USAGE_NONE && list->list_id == list_id)
516 mtx_unlock(&lun->lun_lock);
517 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
518 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
520 ctl_done((union ctl_io *)ctsio);
524 if (list->completed) {
525 TAILQ_REMOVE(&lun->tpc_lists, list, links);
528 mtx_unlock(&lun->lun_lock);
530 total_len = sizeof(*data) + list_copy.sense_len;
531 alloc_len = scsi_4btoul(cdb->length);
533 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
535 ctsio->kern_sg_entries = 0;
537 if (total_len < alloc_len) {
538 ctsio->residual = alloc_len - total_len;
539 ctsio->kern_data_len = total_len;
540 ctsio->kern_total_len = total_len;
543 ctsio->kern_data_len = alloc_len;
544 ctsio->kern_total_len = alloc_len;
546 ctsio->kern_data_resid = 0;
547 ctsio->kern_rel_offset = 0;
549 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
550 scsi_ulto4b(sizeof(*data) - 4, data->available_data);
551 data->response_to_service_action = list_copy.service_action;
552 if (list_copy.completed) {
554 data->copy_command_status = RCS_CCS_ERROR;
555 else if (list_copy.abort)
556 data->copy_command_status = RCS_CCS_ABORTED;
558 data->copy_command_status = RCS_CCS_COMPLETED;
560 data->copy_command_status = RCS_CCS_INPROG_FG;
561 scsi_ulto2b(list_copy.curops, data->operation_counter);
562 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
563 if (list_copy.curbytes <= UINT32_MAX) {
564 data->transfer_count_units = RCS_TC_BYTES;
565 scsi_ulto4b(list_copy.curbytes, data->transfer_count);
567 data->transfer_count_units = RCS_TC_MBYTES;
568 scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count);
570 scsi_ulto2b(list_copy.curseg, data->segments_processed);
571 data->sense_data_length = list_copy.sense_len;
572 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
574 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
575 ctsio->be_move_done = ctl_config_move_done;
577 ctl_datamove((union ctl_io *)ctsio);
582 ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
585 struct scsi_copy_operation_abort *cdb;
586 struct tpc_list *list;
590 CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n"));
592 cdb = (struct scsi_copy_operation_abort *)ctsio->cdb;
593 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
595 retval = CTL_RETVAL_COMPLETE;
597 list_id = scsi_4btoul(cdb->list_identifier);
598 mtx_lock(&lun->lun_lock);
599 TAILQ_FOREACH(list, &lun->tpc_lists, links) {
600 if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
601 EC_LIST_ID_USAGE_NONE && list->list_id == list_id)
605 mtx_unlock(&lun->lun_lock);
606 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
607 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
609 ctl_done((union ctl_io *)ctsio);
613 mtx_unlock(&lun->lun_lock);
615 ctl_set_success(ctsio);
616 ctl_done((union ctl_io *)ctsio);
621 tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss)
625 if (ss && list->lun->be_lun)
626 *ss = list->lun->be_lun->blocksize;
627 return (list->lun->lun);
629 if (idx >= list->ncscd)
631 return (tpcl_resolve(list->init_port, &list->cscd[idx], ss));
635 tpc_process_b2b(struct tpc_list *list)
637 struct scsi_ec_segment_b2b *seg;
638 struct scsi_ec_cscd_dtsp *sdstp, *ddstp;
639 struct tpc_io *tior, *tiow;
640 struct runl run, *prun;
642 off_t srclba, dstlba, numbytes, donebytes, roundbytes;
644 uint32_t srcblock, dstblock;
646 if (list->stage == 1) {
648 while ((tior = TAILQ_FIRST(&list->allio)) != NULL) {
649 TAILQ_REMOVE(&list->allio, tior, links);
650 ctl_free_io(tior->io);
653 free(list->buf, M_CTL);
655 ctl_set_task_aborted(list->ctsio);
656 return (CTL_RETVAL_ERROR);
657 } else if (list->error) {
658 ctl_set_sense(list->ctsio, /*current_error*/ 1,
659 /*sense_key*/ SSD_KEY_COPY_ABORTED,
660 /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
661 return (CTL_RETVAL_ERROR);
663 list->curbytes += list->segbytes;
664 return (CTL_RETVAL_COMPLETE);
668 TAILQ_INIT(&list->allio);
669 seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg];
670 sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), &srcblock);
671 dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), &dstblock);
672 if (sl >= CTL_MAX_LUNS || dl >= CTL_MAX_LUNS) {
673 ctl_set_sense(list->ctsio, /*current_error*/ 1,
674 /*sense_key*/ SSD_KEY_COPY_ABORTED,
675 /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
676 return (CTL_RETVAL_ERROR);
678 sdstp = &list->cscd[scsi_2btoul(seg->src_cscd)].dtsp;
679 if (scsi_3btoul(sdstp->block_length) != 0)
680 srcblock = scsi_3btoul(sdstp->block_length);
681 ddstp = &list->cscd[scsi_2btoul(seg->dst_cscd)].dtsp;
682 if (scsi_3btoul(ddstp->block_length) != 0)
683 dstblock = scsi_3btoul(ddstp->block_length);
684 numlba = scsi_2btoul(seg->number_of_blocks);
685 if (seg->flags & EC_SEG_DC)
686 numbytes = (off_t)numlba * dstblock;
688 numbytes = (off_t)numlba * srcblock;
689 srclba = scsi_8btou64(seg->src_lba);
690 dstlba = scsi_8btou64(seg->dst_lba);
692 // printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n",
693 // (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba),
694 // dl, scsi_8btou64(seg->dst_lba));
697 return (CTL_RETVAL_COMPLETE);
699 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
700 ctl_set_sense(list->ctsio, /*current_error*/ 1,
701 /*sense_key*/ SSD_KEY_COPY_ABORTED,
702 /*asc*/ 0x26, /*ascq*/ 0x0A, SSD_ELEM_NONE);
703 return (CTL_RETVAL_ERROR);
706 list->buf = malloc(numbytes, M_CTL, M_WAITOK);
707 list->segbytes = numbytes;
712 while (donebytes < numbytes) {
713 roundbytes = MIN(numbytes - donebytes, TPC_MAX_IO_SIZE);
715 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
716 TAILQ_INIT(&tior->run);
718 TAILQ_INSERT_TAIL(&list->allio, tior, links);
719 tior->io = tpcl_alloc_io();
720 if (tior->io == NULL) {
724 ctl_scsi_read_write(tior->io,
725 /*data_ptr*/ &list->buf[donebytes],
726 /*data_len*/ roundbytes,
729 /*minimum_cdb_size*/ 0,
730 /*lba*/ srclba + donebytes / srcblock,
731 /*num_blocks*/ roundbytes / srcblock,
732 /*tag_type*/ CTL_TAG_SIMPLE,
734 tior->io->io_hdr.retries = 3;
736 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
738 tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
739 TAILQ_INIT(&tiow->run);
741 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
742 tiow->io = tpcl_alloc_io();
743 if (tiow->io == NULL) {
747 ctl_scsi_read_write(tiow->io,
748 /*data_ptr*/ &list->buf[donebytes],
749 /*data_len*/ roundbytes,
752 /*minimum_cdb_size*/ 0,
753 /*lba*/ dstlba + donebytes / dstblock,
754 /*num_blocks*/ roundbytes / dstblock,
755 /*tag_type*/ CTL_TAG_SIMPLE,
757 tiow->io->io_hdr.retries = 3;
759 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
761 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
762 TAILQ_INSERT_TAIL(prun, tior, rlinks);
764 donebytes += roundbytes;
767 while ((tior = TAILQ_FIRST(&run)) != NULL) {
768 TAILQ_REMOVE(&run, tior, rlinks);
769 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
770 panic("tpcl_queue() error");
774 return (CTL_RETVAL_QUEUED);
778 tpc_process_verify(struct tpc_list *list)
780 struct scsi_ec_segment_verify *seg;
784 if (list->stage == 1) {
786 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
787 TAILQ_REMOVE(&list->allio, tio, links);
788 ctl_free_io(tio->io);
792 ctl_set_task_aborted(list->ctsio);
793 return (CTL_RETVAL_ERROR);
794 } else if (list->error) {
795 ctl_set_sense(list->ctsio, /*current_error*/ 1,
796 /*sense_key*/ SSD_KEY_COPY_ABORTED,
797 /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
798 return (CTL_RETVAL_ERROR);
800 return (CTL_RETVAL_COMPLETE);
803 TAILQ_INIT(&list->allio);
804 seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg];
805 sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), NULL);
806 if (sl >= CTL_MAX_LUNS) {
807 ctl_set_sense(list->ctsio, /*current_error*/ 1,
808 /*sense_key*/ SSD_KEY_COPY_ABORTED,
809 /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
810 return (CTL_RETVAL_ERROR);
813 // printf("Verify %ju\n", sl);
815 if ((seg->tur & 0x01) == 0)
816 return (CTL_RETVAL_COMPLETE);
819 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
820 TAILQ_INIT(&tio->run);
822 TAILQ_INSERT_TAIL(&list->allio, tio, links);
823 tio->io = tpcl_alloc_io();
824 if (tio->io == NULL) {
828 ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
829 tio->io->io_hdr.retries = 3;
831 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
833 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
834 panic("tpcl_queue() error");
835 return (CTL_RETVAL_QUEUED);
839 tpc_process_register_key(struct tpc_list *list)
841 struct scsi_ec_segment_register_key *seg;
846 if (list->stage == 1) {
848 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
849 TAILQ_REMOVE(&list->allio, tio, links);
850 ctl_free_io(tio->io);
853 free(list->buf, M_CTL);
855 ctl_set_task_aborted(list->ctsio);
856 return (CTL_RETVAL_ERROR);
857 } else if (list->error) {
858 ctl_set_sense(list->ctsio, /*current_error*/ 1,
859 /*sense_key*/ SSD_KEY_COPY_ABORTED,
860 /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
861 return (CTL_RETVAL_ERROR);
863 return (CTL_RETVAL_COMPLETE);
866 TAILQ_INIT(&list->allio);
867 seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg];
868 dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), NULL);
869 if (dl >= CTL_MAX_LUNS) {
870 ctl_set_sense(list->ctsio, /*current_error*/ 1,
871 /*sense_key*/ SSD_KEY_COPY_ABORTED,
872 /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
873 return (CTL_RETVAL_ERROR);
876 // printf("Register Key %ju\n", dl);
879 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
880 TAILQ_INIT(&tio->run);
882 TAILQ_INSERT_TAIL(&list->allio, tio, links);
883 tio->io = tpcl_alloc_io();
884 if (tio->io == NULL) {
888 datalen = sizeof(struct scsi_per_res_out_parms);
889 list->buf = malloc(datalen, M_CTL, M_WAITOK);
890 ctl_scsi_persistent_res_out(tio->io,
891 list->buf, datalen, SPRO_REGISTER, -1,
892 scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key),
893 /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
894 tio->io->io_hdr.retries = 3;
896 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
898 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
899 panic("tpcl_queue() error");
900 return (CTL_RETVAL_QUEUED);
904 tpc_process(struct tpc_list *list)
906 struct ctl_lun *lun = list->lun;
907 struct scsi_ec_segment *seg;
908 struct ctl_scsiio *ctsio = list->ctsio;
909 int retval = CTL_RETVAL_COMPLETE;
911 //printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg);
912 while (list->curseg < list->nseg) {
913 seg = list->seg[list->curseg];
914 switch (seg->type_code) {
916 retval = tpc_process_b2b(list);
919 retval = tpc_process_verify(list);
921 case EC_SEG_REGISTER_KEY:
922 retval = tpc_process_register_key(list);
925 ctl_set_sense(ctsio, /*current_error*/ 1,
926 /*sense_key*/ SSD_KEY_COPY_ABORTED,
927 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
930 if (retval == CTL_RETVAL_QUEUED)
932 if (retval == CTL_RETVAL_ERROR) {
940 ctl_set_success(ctsio);
943 //printf("ZZZ done\n");
944 mtx_lock(&lun->lun_lock);
945 if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) {
946 TAILQ_REMOVE(&lun->tpc_lists, list, links);
950 list->sense_data = ctsio->sense_data;
951 list->sense_len = ctsio->sense_len;
952 list->scsi_status = ctsio->scsi_status;
954 mtx_unlock(&lun->lun_lock);
956 ctl_done((union ctl_io *)ctsio);
960 * For any sort of check condition, busy, etc., we just retry. We do not
961 * decrement the retry count for unit attention type errors. These are
962 * normal, and we want to save the retry count for "real" errors. Otherwise,
963 * we could end up with situations where a command will succeed in some
964 * situations and fail in others, depending on whether a unit attention is
965 * pending. Also, some of our error recovery actions, most notably the
966 * LUN reset action, will cause a unit attention.
968 * We can add more detail here later if necessary.
970 static tpc_error_action
971 tpc_checkcond_parse(union ctl_io *io)
973 tpc_error_action error_action;
974 int error_code, sense_key, asc, ascq;
977 * Default to retrying the command.
979 error_action = TPC_ERR_RETRY;
981 scsi_extract_sense_len(&io->scsiio.sense_data,
982 io->scsiio.sense_len,
989 switch (error_code) {
990 case SSD_DEFERRED_ERROR:
991 case SSD_DESC_DEFERRED_ERROR:
992 error_action |= TPC_ERR_NO_DECREMENT;
994 case SSD_CURRENT_ERROR:
995 case SSD_DESC_CURRENT_ERROR:
998 case SSD_KEY_UNIT_ATTENTION:
999 error_action |= TPC_ERR_NO_DECREMENT;
1001 case SSD_KEY_HARDWARE_ERROR:
1003 * This is our generic "something bad happened"
1004 * error code. It often isn't recoverable.
1006 if ((asc == 0x44) && (ascq == 0x00))
1007 error_action = TPC_ERR_FAIL;
1009 case SSD_KEY_NOT_READY:
1011 * If the LUN is powered down, there likely isn't
1012 * much point in retrying right now.
1014 if ((asc == 0x04) && (ascq == 0x02))
1015 error_action = TPC_ERR_FAIL;
1017 * If the LUN is offline, there probably isn't much
1018 * point in retrying, either.
1020 if ((asc == 0x04) && (ascq == 0x03))
1021 error_action = TPC_ERR_FAIL;
1025 return (error_action);
1028 static tpc_error_action
1029 tpc_error_parse(union ctl_io *io)
1031 tpc_error_action error_action = TPC_ERR_RETRY;
1033 switch (io->io_hdr.io_type) {
1035 switch (io->io_hdr.status & CTL_STATUS_MASK) {
1036 case CTL_SCSI_ERROR:
1037 switch (io->scsiio.scsi_status) {
1038 case SCSI_STATUS_CHECK_COND:
1039 error_action = tpc_checkcond_parse(io);
1052 panic("%s: invalid ctl_io type %d\n", __func__,
1053 io->io_hdr.io_type);
1056 return (error_action);
1060 tpc_done(union ctl_io *io)
1062 struct tpc_io *tio, *tior;
1065 * Very minimal retry logic. We basically retry if we got an error
1066 * back, and the retry count is greater than 0. If we ever want
1067 * more sophisticated initiator type behavior, the CAM error
1068 * recovery code in ../common might be helpful.
1070 // if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1071 // ctl_io_error_print(io, NULL);
1072 tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1073 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1074 && (io->io_hdr.retries > 0)) {
1075 ctl_io_status old_status;
1076 tpc_error_action error_action;
1078 error_action = tpc_error_parse(io);
1079 switch (error_action & TPC_ERR_MASK) {
1084 if ((error_action & TPC_ERR_NO_DECREMENT) == 0)
1085 io->io_hdr.retries--;
1086 old_status = io->io_hdr.status;
1087 io->io_hdr.status = CTL_STATUS_NONE;
1088 io->io_hdr.flags &= ~CTL_FLAG_ABORT;
1089 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
1090 if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) {
1091 printf("%s: error returned from ctl_queue()!\n",
1093 io->io_hdr.status = old_status;
1099 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1100 tio->list->error = 1;
1102 atomic_add_int(&tio->list->curops, 1);
1103 if (!tio->list->error && !tio->list->abort) {
1104 while ((tior = TAILQ_FIRST(&tio->run)) != NULL) {
1105 TAILQ_REMOVE(&tio->run, tior, rlinks);
1106 atomic_add_int(&tio->list->tbdio, 1);
1107 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1108 panic("tpcl_queue() error");
1111 if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1)
1112 tpc_process(tio->list);
1116 ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
1118 struct scsi_extended_copy *cdb;
1119 struct scsi_extended_copy_lid1_data *data;
1120 struct ctl_lun *lun;
1121 struct tpc_list *list, *tlist;
1124 int len, off, lencscd, lenseg, leninl, nseg;
1126 CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n"));
1128 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1129 cdb = (struct scsi_extended_copy *)ctsio->cdb;
1130 len = scsi_4btoul(cdb->length);
1132 if (len < sizeof(struct scsi_extended_copy_lid1_data) ||
1133 len > sizeof(struct scsi_extended_copy_lid1_data) +
1134 TPC_MAX_LIST + TPC_MAX_INLINE) {
1135 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1136 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1141 * If we've got a kernel request that hasn't been malloced yet,
1142 * malloc it and tell the caller the data buffer is here.
1144 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1145 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1146 ctsio->kern_data_len = len;
1147 ctsio->kern_total_len = len;
1148 ctsio->kern_data_resid = 0;
1149 ctsio->kern_rel_offset = 0;
1150 ctsio->kern_sg_entries = 0;
1151 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1152 ctsio->be_move_done = ctl_config_move_done;
1153 ctl_datamove((union ctl_io *)ctsio);
1155 return (CTL_RETVAL_COMPLETE);
1158 data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr;
1159 lencscd = scsi_2btoul(data->cscd_list_length);
1160 lenseg = scsi_4btoul(data->segment_list_length);
1161 leninl = scsi_4btoul(data->inline_data_length);
1162 if (len < sizeof(struct scsi_extended_copy_lid1_data) +
1163 lencscd + lenseg + leninl ||
1164 leninl > TPC_MAX_INLINE) {
1165 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1166 /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1169 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1170 ctl_set_sense(ctsio, /*current_error*/ 1,
1171 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1172 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1175 if (lencscd + lenseg > TPC_MAX_LIST) {
1176 ctl_set_param_len_error(ctsio);
1180 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1181 list->service_action = cdb->service_action;
1182 value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
1183 if (value != NULL && strcmp(value, "on") == 0)
1184 list->init_port = -1;
1186 list->init_port = ctsio->io_hdr.nexus.targ_port;
1187 list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
1188 list->list_id = data->list_identifier;
1189 list->flags = data->flags;
1190 list->params = ctsio->kern_data_ptr;
1191 list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1192 ptr = &data->data[lencscd];
1193 for (nseg = 0, off = 0; off < lenseg; nseg++) {
1194 if (nseg >= TPC_MAX_SEGS) {
1196 ctl_set_sense(ctsio, /*current_error*/ 1,
1197 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1198 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1201 list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off);
1202 off += sizeof(struct scsi_ec_segment) +
1203 scsi_2btoul(list->seg[nseg]->descr_length);
1205 list->inl = &data->data[lencscd + lenseg];
1206 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1208 list->leninl = leninl;
1209 list->ctsio = ctsio;
1211 mtx_lock(&lun->lun_lock);
1212 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1213 TAILQ_FOREACH(tlist, &lun->tpc_lists, links) {
1214 if ((tlist->flags & EC_LIST_ID_USAGE_MASK) !=
1215 EC_LIST_ID_USAGE_NONE &&
1216 tlist->list_id == list->list_id)
1219 if (tlist != NULL && !tlist->completed) {
1220 mtx_unlock(&lun->lun_lock);
1222 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1223 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1227 if (tlist != NULL) {
1228 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1232 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1233 mtx_unlock(&lun->lun_lock);
1236 return (CTL_RETVAL_COMPLETE);
1239 ctl_done((union ctl_io *)ctsio);
1240 return (CTL_RETVAL_COMPLETE);
1244 ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
1246 struct scsi_extended_copy *cdb;
1247 struct scsi_extended_copy_lid4_data *data;
1248 struct ctl_lun *lun;
1249 struct tpc_list *list, *tlist;
1252 int len, off, lencscd, lenseg, leninl, nseg;
1254 CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n"));
1256 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1257 cdb = (struct scsi_extended_copy *)ctsio->cdb;
1258 len = scsi_4btoul(cdb->length);
1260 if (len < sizeof(struct scsi_extended_copy_lid4_data) ||
1261 len > sizeof(struct scsi_extended_copy_lid4_data) +
1262 TPC_MAX_LIST + TPC_MAX_INLINE) {
1263 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1264 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1269 * If we've got a kernel request that hasn't been malloced yet,
1270 * malloc it and tell the caller the data buffer is here.
1272 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1273 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1274 ctsio->kern_data_len = len;
1275 ctsio->kern_total_len = len;
1276 ctsio->kern_data_resid = 0;
1277 ctsio->kern_rel_offset = 0;
1278 ctsio->kern_sg_entries = 0;
1279 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1280 ctsio->be_move_done = ctl_config_move_done;
1281 ctl_datamove((union ctl_io *)ctsio);
1283 return (CTL_RETVAL_COMPLETE);
1286 data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr;
1287 lencscd = scsi_2btoul(data->cscd_list_length);
1288 lenseg = scsi_2btoul(data->segment_list_length);
1289 leninl = scsi_2btoul(data->inline_data_length);
1290 if (len < sizeof(struct scsi_extended_copy_lid4_data) +
1291 lencscd + lenseg + leninl ||
1292 leninl > TPC_MAX_INLINE) {
1293 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1294 /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1297 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1298 ctl_set_sense(ctsio, /*current_error*/ 1,
1299 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1300 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1303 if (lencscd + lenseg > TPC_MAX_LIST) {
1304 ctl_set_param_len_error(ctsio);
1308 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1309 list->service_action = cdb->service_action;
1310 value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
1311 if (value != NULL && strcmp(value, "on") == 0)
1312 list->init_port = -1;
1314 list->init_port = ctsio->io_hdr.nexus.targ_port;
1315 list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
1316 list->list_id = scsi_4btoul(data->list_identifier);
1317 list->flags = data->flags;
1318 list->params = ctsio->kern_data_ptr;
1319 list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1320 ptr = &data->data[lencscd];
1321 for (nseg = 0, off = 0; off < lenseg; nseg++) {
1322 if (nseg >= TPC_MAX_SEGS) {
1324 ctl_set_sense(ctsio, /*current_error*/ 1,
1325 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1326 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1329 list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off);
1330 off += sizeof(struct scsi_ec_segment) +
1331 scsi_2btoul(list->seg[nseg]->descr_length);
1333 list->inl = &data->data[lencscd + lenseg];
1334 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1336 list->leninl = leninl;
1337 list->ctsio = ctsio;
1339 mtx_lock(&lun->lun_lock);
1340 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1341 TAILQ_FOREACH(tlist, &lun->tpc_lists, links) {
1342 if ((tlist->flags & EC_LIST_ID_USAGE_MASK) !=
1343 EC_LIST_ID_USAGE_NONE &&
1344 tlist->list_id == list->list_id)
1347 if (tlist != NULL && !tlist->completed) {
1348 mtx_unlock(&lun->lun_lock);
1350 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1351 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1355 if (tlist != NULL) {
1356 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1360 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1361 mtx_unlock(&lun->lun_lock);
1364 return (CTL_RETVAL_COMPLETE);
1367 ctl_done((union ctl_io *)ctsio);
1368 return (CTL_RETVAL_COMPLETE);