2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2014 Alexander Motin <mav@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/types.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/condvar.h>
40 #include <sys/malloc.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
46 #include <machine/atomic.h>
49 #include <cam/scsi/scsi_all.h>
50 #include <cam/scsi/scsi_da.h>
51 #include <cam/ctl/ctl_io.h>
52 #include <cam/ctl/ctl.h>
53 #include <cam/ctl/ctl_frontend.h>
54 #include <cam/ctl/ctl_util.h>
55 #include <cam/ctl/ctl_backend.h>
56 #include <cam/ctl/ctl_ioctl.h>
57 #include <cam/ctl/ctl_ha.h>
58 #include <cam/ctl/ctl_private.h>
59 #include <cam/ctl/ctl_debug.h>
60 #include <cam/ctl/ctl_scsi_all.h>
61 #include <cam/ctl/ctl_tpc.h>
62 #include <cam/ctl/ctl_error.h>
64 #define TPC_MAX_CSCDS 64
65 #define TPC_MAX_SEGS 64
67 #define TPC_MAX_LIST 8192
68 #define TPC_MAX_INLINE 0
69 #define TPC_MAX_LISTS 255
70 #define TPC_MAX_IO_SIZE (1024 * 1024)
71 #define TPC_MAX_IOCHUNK_SIZE (TPC_MAX_IO_SIZE * 16)
72 #define TPC_MIN_TOKEN_TIMEOUT 1
73 #define TPC_DFL_TOKEN_TIMEOUT 60
74 #define TPC_MAX_TOKEN_TIMEOUT 600
76 MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC");
79 TPC_ERR_RETRY = 0x000,
82 TPC_ERR_NO_DECREMENT = 0x100
86 TAILQ_HEAD(runl, tpc_io);
93 struct tpc_list *list;
95 TAILQ_ENTRY(tpc_io) rlinks;
96 TAILQ_ENTRY(tpc_io) links;
104 struct scsi_range_desc *range;
109 TAILQ_ENTRY(tpc_token) links;
113 uint8_t service_action;
119 struct scsi_ec_cscd *cscd;
120 struct scsi_ec_segment *seg[TPC_MAX_SEGS];
125 struct tpc_token *token;
126 struct scsi_range_desc *range;
128 off_t offset_into_rod;
142 TAILQ_HEAD(, tpc_io) allio;
143 struct scsi_sense_data fwd_sense_data;
144 uint8_t fwd_sense_len;
145 uint8_t fwd_scsi_status;
148 struct scsi_sense_data sense_data;
151 struct ctl_scsiio *ctsio;
154 uint8_t res_token[512];
155 TAILQ_ENTRY(tpc_list) links;
159 tpc_timeout(void *arg)
161 struct ctl_softc *softc = arg;
163 struct tpc_token *token, *ttoken;
164 struct tpc_list *list, *tlist;
166 /* Free completed lists with expired timeout. */
167 STAILQ_FOREACH(lun, &softc->lun_list, links) {
168 mtx_lock(&lun->lun_lock);
169 TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) {
170 if (!list->completed || time_uptime < list->last_active +
171 TPC_DFL_TOKEN_TIMEOUT)
173 TAILQ_REMOVE(&lun->tpc_lists, list, links);
176 mtx_unlock(&lun->lun_lock);
179 /* Free inactive ROD tokens with expired timeout. */
180 mtx_lock(&softc->tpc_lock);
181 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
183 time_uptime < token->last_active + token->timeout + 1)
185 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
186 free(token->params, M_CTL);
189 mtx_unlock(&softc->tpc_lock);
190 callout_schedule(&softc->tpc_timeout, hz);
194 ctl_tpc_init(struct ctl_softc *softc)
197 mtx_init(&softc->tpc_lock, "CTL TPC mutex", NULL, MTX_DEF);
198 TAILQ_INIT(&softc->tpc_tokens);
199 callout_init_mtx(&softc->tpc_timeout, &softc->ctl_lock, 0);
200 callout_reset(&softc->tpc_timeout, hz, tpc_timeout, softc);
204 ctl_tpc_shutdown(struct ctl_softc *softc)
206 struct tpc_token *token;
208 callout_drain(&softc->tpc_timeout);
210 /* Free ROD tokens. */
211 mtx_lock(&softc->tpc_lock);
212 while ((token = TAILQ_FIRST(&softc->tpc_tokens)) != NULL) {
213 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
214 free(token->params, M_CTL);
217 mtx_unlock(&softc->tpc_lock);
218 mtx_destroy(&softc->tpc_lock);
222 ctl_tpc_lun_init(struct ctl_lun *lun)
225 TAILQ_INIT(&lun->tpc_lists);
229 ctl_tpc_lun_clear(struct ctl_lun *lun, uint32_t initidx)
231 struct tpc_list *list, *tlist;
233 TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) {
234 if (initidx != -1 && list->init_idx != initidx)
236 if (!list->completed)
238 TAILQ_REMOVE(&lun->tpc_lists, list, links);
244 ctl_tpc_lun_shutdown(struct ctl_lun *lun)
246 struct ctl_softc *softc = lun->ctl_softc;
247 struct tpc_list *list;
248 struct tpc_token *token, *ttoken;
250 /* Free lists for this LUN. */
251 while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) {
252 TAILQ_REMOVE(&lun->tpc_lists, list, links);
253 KASSERT(list->completed,
254 ("Not completed TPC (%p) on shutdown", list));
258 /* Free ROD tokens for this LUN. */
259 mtx_lock(&softc->tpc_lock);
260 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
261 if (token->lun != lun->lun || token->active)
263 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
264 free(token->params, M_CTL);
267 mtx_unlock(&softc->tpc_lock);
271 ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
273 struct ctl_lun *lun = CTL_LUN(ctsio);
274 struct scsi_vpd_tpc *tpc_ptr;
275 struct scsi_vpd_tpc_descriptor *d_ptr;
276 struct scsi_vpd_tpc_descriptor_bdrl *bdrl_ptr;
277 struct scsi_vpd_tpc_descriptor_sc *sc_ptr;
278 struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr;
279 struct scsi_vpd_tpc_descriptor_pd *pd_ptr;
280 struct scsi_vpd_tpc_descriptor_sd *sd_ptr;
281 struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr;
282 struct scsi_vpd_tpc_descriptor_rtf *rtf_ptr;
283 struct scsi_vpd_tpc_descriptor_rtf_block *rtfb_ptr;
284 struct scsi_vpd_tpc_descriptor_srt *srt_ptr;
285 struct scsi_vpd_tpc_descriptor_srtd *srtd_ptr;
286 struct scsi_vpd_tpc_descriptor_gco *gco_ptr;
289 data_len = sizeof(struct scsi_vpd_tpc) +
290 sizeof(struct scsi_vpd_tpc_descriptor_bdrl) +
291 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) +
292 2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 11, 4) +
293 sizeof(struct scsi_vpd_tpc_descriptor_pd) +
294 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) +
295 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) +
296 sizeof(struct scsi_vpd_tpc_descriptor_rtf) +
297 sizeof(struct scsi_vpd_tpc_descriptor_rtf_block) +
298 sizeof(struct scsi_vpd_tpc_descriptor_srt) +
299 2*sizeof(struct scsi_vpd_tpc_descriptor_srtd) +
300 sizeof(struct scsi_vpd_tpc_descriptor_gco);
302 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
303 tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr;
304 ctsio->kern_rel_offset = 0;
305 ctsio->kern_sg_entries = 0;
306 ctsio->kern_data_len = min(data_len, alloc_len);
307 ctsio->kern_total_len = ctsio->kern_data_len;
310 * The control device is always connected. The disk device, on the
311 * other hand, may not be online all the time.
314 tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
315 lun->be_lun->lun_type;
317 tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
318 tpc_ptr->page_code = SVPD_SCSI_TPC;
319 scsi_ulto2b(data_len - 4, tpc_ptr->page_length);
321 /* Block Device ROD Limits */
322 d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0];
323 bdrl_ptr = (struct scsi_vpd_tpc_descriptor_bdrl *)d_ptr;
324 scsi_ulto2b(SVPD_TPC_BDRL, bdrl_ptr->desc_type);
325 scsi_ulto2b(sizeof(*bdrl_ptr) - 4, bdrl_ptr->desc_length);
326 scsi_ulto2b(TPC_MAX_SEGS, bdrl_ptr->maximum_ranges);
327 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
328 bdrl_ptr->maximum_inactivity_timeout);
329 scsi_ulto4b(TPC_DFL_TOKEN_TIMEOUT,
330 bdrl_ptr->default_inactivity_timeout);
331 scsi_u64to8b(0, bdrl_ptr->maximum_token_transfer_size);
332 scsi_u64to8b(0, bdrl_ptr->optimal_transfer_count);
334 /* Supported commands */
335 d_ptr = (struct scsi_vpd_tpc_descriptor *)
336 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
337 sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr;
338 scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type);
339 sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 11;
340 scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length);
341 scd_ptr = &sc_ptr->descr[0];
342 scd_ptr->opcode = EXTENDED_COPY;
343 scd_ptr->sa_length = 5;
344 scd_ptr->supported_service_actions[0] = EC_EC_LID1;
345 scd_ptr->supported_service_actions[1] = EC_EC_LID4;
346 scd_ptr->supported_service_actions[2] = EC_PT;
347 scd_ptr->supported_service_actions[3] = EC_WUT;
348 scd_ptr->supported_service_actions[4] = EC_COA;
349 scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *)
350 &scd_ptr->supported_service_actions[scd_ptr->sa_length];
351 scd_ptr->opcode = RECEIVE_COPY_STATUS;
352 scd_ptr->sa_length = 6;
353 scd_ptr->supported_service_actions[0] = RCS_RCS_LID1;
354 scd_ptr->supported_service_actions[1] = RCS_RCFD;
355 scd_ptr->supported_service_actions[2] = RCS_RCS_LID4;
356 scd_ptr->supported_service_actions[3] = RCS_RCOP;
357 scd_ptr->supported_service_actions[4] = RCS_RRTI;
358 scd_ptr->supported_service_actions[5] = RCS_RART;
360 /* Parameter data. */
361 d_ptr = (struct scsi_vpd_tpc_descriptor *)
362 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
363 pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr;
364 scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type);
365 scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length);
366 scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count);
367 scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count);
368 scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length);
369 scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length);
371 /* Supported Descriptors */
372 d_ptr = (struct scsi_vpd_tpc_descriptor *)
373 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
374 sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr;
375 scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type);
376 scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length);
377 sd_ptr->list_length = 4;
378 sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B;
379 sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY;
380 sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY;
381 sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID;
383 /* Supported CSCD Descriptor IDs */
384 d_ptr = (struct scsi_vpd_tpc_descriptor *)
385 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
386 sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr;
387 scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type);
388 scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length);
389 scsi_ulto2b(2, sdid_ptr->list_length);
390 scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]);
392 /* ROD Token Features */
393 d_ptr = (struct scsi_vpd_tpc_descriptor *)
394 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
395 rtf_ptr = (struct scsi_vpd_tpc_descriptor_rtf *)d_ptr;
396 scsi_ulto2b(SVPD_TPC_RTF, rtf_ptr->desc_type);
397 scsi_ulto2b(sizeof(*rtf_ptr) - 4 + sizeof(*rtfb_ptr), rtf_ptr->desc_length);
398 rtf_ptr->remote_tokens = 0;
399 scsi_ulto4b(TPC_MIN_TOKEN_TIMEOUT, rtf_ptr->minimum_token_lifetime);
400 scsi_ulto4b(UINT32_MAX, rtf_ptr->maximum_token_lifetime);
401 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
402 rtf_ptr->maximum_token_inactivity_timeout);
403 scsi_ulto2b(sizeof(*rtfb_ptr), rtf_ptr->type_specific_features_length);
404 rtfb_ptr = (struct scsi_vpd_tpc_descriptor_rtf_block *)
405 &rtf_ptr->type_specific_features;
406 rtfb_ptr->type_format = SVPD_TPC_RTF_BLOCK;
407 scsi_ulto2b(sizeof(*rtfb_ptr) - 4, rtfb_ptr->desc_length);
408 scsi_ulto2b(0, rtfb_ptr->optimal_length_granularity);
409 scsi_u64to8b(0, rtfb_ptr->maximum_bytes);
410 scsi_u64to8b(0, rtfb_ptr->optimal_bytes);
411 scsi_u64to8b(UINT64_MAX, rtfb_ptr->optimal_bytes_to_token_per_segment);
412 scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE,
413 rtfb_ptr->optimal_bytes_from_token_per_segment);
415 /* Supported ROD Tokens */
416 d_ptr = (struct scsi_vpd_tpc_descriptor *)
417 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
418 srt_ptr = (struct scsi_vpd_tpc_descriptor_srt *)d_ptr;
419 scsi_ulto2b(SVPD_TPC_SRT, srt_ptr->desc_type);
420 scsi_ulto2b(sizeof(*srt_ptr) - 4 + 2*sizeof(*srtd_ptr), srt_ptr->desc_length);
421 scsi_ulto2b(2*sizeof(*srtd_ptr), srt_ptr->rod_type_descriptors_length);
422 srtd_ptr = (struct scsi_vpd_tpc_descriptor_srtd *)
423 &srt_ptr->rod_type_descriptors;
424 scsi_ulto4b(ROD_TYPE_AUR, srtd_ptr->rod_type);
425 srtd_ptr->flags = SVPD_TPC_SRTD_TIN | SVPD_TPC_SRTD_TOUT;
426 scsi_ulto2b(0, srtd_ptr->preference_indicator);
428 scsi_ulto4b(ROD_TYPE_BLOCK_ZERO, srtd_ptr->rod_type);
429 srtd_ptr->flags = SVPD_TPC_SRTD_TIN;
430 scsi_ulto2b(0, srtd_ptr->preference_indicator);
432 /* General Copy Operations */
433 d_ptr = (struct scsi_vpd_tpc_descriptor *)
434 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
435 gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr;
436 scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type);
437 scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length);
438 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies);
439 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies);
440 scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length);
441 gco_ptr->data_segment_granularity = 0;
442 gco_ptr->inline_data_granularity = 0;
444 ctl_set_success(ctsio);
445 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
446 ctsio->be_move_done = ctl_config_move_done;
447 ctl_datamove((union ctl_io *)ctsio);
449 return (CTL_RETVAL_COMPLETE);
453 ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
455 struct scsi_receive_copy_operating_parameters *cdb;
456 struct scsi_receive_copy_operating_parameters_data *data;
458 int alloc_len, total_len;
460 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
462 cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb;
464 retval = CTL_RETVAL_COMPLETE;
466 total_len = sizeof(*data) + 4;
467 alloc_len = scsi_4btoul(cdb->length);
469 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
470 ctsio->kern_sg_entries = 0;
471 ctsio->kern_rel_offset = 0;
472 ctsio->kern_data_len = min(total_len, alloc_len);
473 ctsio->kern_total_len = ctsio->kern_data_len;
475 data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr;
476 scsi_ulto4b(sizeof(*data) - 4 + 4, data->length);
477 data->snlid = RCOP_SNLID;
478 scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count);
479 scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count);
480 scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length);
481 scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length);
482 scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length);
483 scsi_ulto4b(0, data->held_data_limit);
484 scsi_ulto4b(0, data->maximum_stream_device_transfer_size);
485 scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies);
486 data->maximum_concurrent_copies = TPC_MAX_LISTS;
487 data->data_segment_granularity = 0;
488 data->inline_data_granularity = 0;
489 data->held_data_granularity = 0;
490 data->implemented_descriptor_list_length = 4;
491 data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B;
492 data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY;
493 data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY;
494 data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID;
496 ctl_set_success(ctsio);
497 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
498 ctsio->be_move_done = ctl_config_move_done;
499 ctl_datamove((union ctl_io *)ctsio);
503 static struct tpc_list *
504 tpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx)
506 struct tpc_list *list;
508 mtx_assert(&lun->lun_lock, MA_OWNED);
509 TAILQ_FOREACH(list, &lun->tpc_lists, links) {
510 if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
511 EC_LIST_ID_USAGE_NONE && list->list_id == list_id &&
512 list->init_idx == init_idx)
519 ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
521 struct ctl_lun *lun = CTL_LUN(ctsio);
522 struct scsi_receive_copy_status_lid1 *cdb;
523 struct scsi_receive_copy_status_lid1_data *data;
524 struct tpc_list *list;
525 struct tpc_list list_copy;
527 int alloc_len, total_len;
530 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n"));
532 cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb;
533 retval = CTL_RETVAL_COMPLETE;
535 list_id = cdb->list_identifier;
536 mtx_lock(&lun->lun_lock);
537 list = tpc_find_list(lun, list_id,
538 ctl_get_initindex(&ctsio->io_hdr.nexus));
540 mtx_unlock(&lun->lun_lock);
541 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
542 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
544 ctl_done((union ctl_io *)ctsio);
548 if (list->completed) {
549 TAILQ_REMOVE(&lun->tpc_lists, list, links);
552 mtx_unlock(&lun->lun_lock);
554 total_len = sizeof(*data);
555 alloc_len = scsi_4btoul(cdb->length);
557 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
558 ctsio->kern_sg_entries = 0;
559 ctsio->kern_rel_offset = 0;
560 ctsio->kern_data_len = min(total_len, alloc_len);
561 ctsio->kern_total_len = ctsio->kern_data_len;
563 data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr;
564 scsi_ulto4b(sizeof(*data) - 4, data->available_data);
565 if (list_copy.completed) {
566 if (list_copy.error || list_copy.abort)
567 data->copy_command_status = RCS_CCS_ERROR;
569 data->copy_command_status = RCS_CCS_COMPLETED;
571 data->copy_command_status = RCS_CCS_INPROG;
572 scsi_ulto2b(list_copy.curseg, data->segments_processed);
573 if (list_copy.curbytes <= UINT32_MAX) {
574 data->transfer_count_units = RCS_TC_BYTES;
575 scsi_ulto4b(list_copy.curbytes, data->transfer_count);
577 data->transfer_count_units = RCS_TC_MBYTES;
578 scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count);
581 ctl_set_success(ctsio);
582 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
583 ctsio->be_move_done = ctl_config_move_done;
584 ctl_datamove((union ctl_io *)ctsio);
589 ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
591 struct ctl_lun *lun = CTL_LUN(ctsio);
592 struct scsi_receive_copy_failure_details *cdb;
593 struct scsi_receive_copy_failure_details_data *data;
594 struct tpc_list *list;
595 struct tpc_list list_copy;
597 int alloc_len, total_len;
600 CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n"));
602 cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb;
603 retval = CTL_RETVAL_COMPLETE;
605 list_id = cdb->list_identifier;
606 mtx_lock(&lun->lun_lock);
607 list = tpc_find_list(lun, list_id,
608 ctl_get_initindex(&ctsio->io_hdr.nexus));
609 if (list == NULL || !list->completed) {
610 mtx_unlock(&lun->lun_lock);
611 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
612 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
614 ctl_done((union ctl_io *)ctsio);
618 TAILQ_REMOVE(&lun->tpc_lists, list, links);
620 mtx_unlock(&lun->lun_lock);
622 total_len = sizeof(*data) + list_copy.sense_len;
623 alloc_len = scsi_4btoul(cdb->length);
625 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
626 ctsio->kern_sg_entries = 0;
627 ctsio->kern_rel_offset = 0;
628 ctsio->kern_data_len = min(total_len, alloc_len);
629 ctsio->kern_total_len = ctsio->kern_data_len;
631 data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr;
632 if (list_copy.completed && (list_copy.error || list_copy.abort)) {
633 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
634 data->available_data);
635 data->copy_command_status = RCS_CCS_ERROR;
637 scsi_ulto4b(0, data->available_data);
638 scsi_ulto2b(list_copy.sense_len, data->sense_data_length);
639 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
641 ctl_set_success(ctsio);
642 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
643 ctsio->be_move_done = ctl_config_move_done;
644 ctl_datamove((union ctl_io *)ctsio);
649 ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
651 struct ctl_lun *lun = CTL_LUN(ctsio);
652 struct scsi_receive_copy_status_lid4 *cdb;
653 struct scsi_receive_copy_status_lid4_data *data;
654 struct tpc_list *list;
655 struct tpc_list list_copy;
657 int alloc_len, total_len;
660 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n"));
662 cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb;
663 retval = CTL_RETVAL_COMPLETE;
665 list_id = scsi_4btoul(cdb->list_identifier);
666 mtx_lock(&lun->lun_lock);
667 list = tpc_find_list(lun, list_id,
668 ctl_get_initindex(&ctsio->io_hdr.nexus));
670 mtx_unlock(&lun->lun_lock);
671 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
672 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
674 ctl_done((union ctl_io *)ctsio);
678 if (list->completed) {
679 TAILQ_REMOVE(&lun->tpc_lists, list, links);
682 mtx_unlock(&lun->lun_lock);
684 total_len = sizeof(*data) + list_copy.sense_len;
685 alloc_len = scsi_4btoul(cdb->length);
687 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
688 ctsio->kern_sg_entries = 0;
689 ctsio->kern_rel_offset = 0;
690 ctsio->kern_data_len = min(total_len, alloc_len);
691 ctsio->kern_total_len = ctsio->kern_data_len;
693 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
694 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
695 data->available_data);
696 data->response_to_service_action = list_copy.service_action;
697 if (list_copy.completed) {
699 data->copy_command_status = RCS_CCS_ERROR;
700 else if (list_copy.abort)
701 data->copy_command_status = RCS_CCS_ABORTED;
703 data->copy_command_status = RCS_CCS_COMPLETED;
705 data->copy_command_status = RCS_CCS_INPROG_FG;
706 scsi_ulto2b(list_copy.curops, data->operation_counter);
707 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
708 data->transfer_count_units = RCS_TC_BYTES;
709 scsi_u64to8b(list_copy.curbytes, data->transfer_count);
710 scsi_ulto2b(list_copy.curseg, data->segments_processed);
711 data->length_of_the_sense_data_field = list_copy.sense_len;
712 data->sense_data_length = list_copy.sense_len;
713 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
715 ctl_set_success(ctsio);
716 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
717 ctsio->be_move_done = ctl_config_move_done;
718 ctl_datamove((union ctl_io *)ctsio);
723 ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
725 struct ctl_lun *lun = CTL_LUN(ctsio);
726 struct scsi_copy_operation_abort *cdb;
727 struct tpc_list *list;
731 CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n"));
733 cdb = (struct scsi_copy_operation_abort *)ctsio->cdb;
734 retval = CTL_RETVAL_COMPLETE;
736 list_id = scsi_4btoul(cdb->list_identifier);
737 mtx_lock(&lun->lun_lock);
738 list = tpc_find_list(lun, list_id,
739 ctl_get_initindex(&ctsio->io_hdr.nexus));
741 mtx_unlock(&lun->lun_lock);
742 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
743 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
745 ctl_done((union ctl_io *)ctsio);
749 mtx_unlock(&lun->lun_lock);
751 ctl_set_success(ctsio);
752 ctl_done((union ctl_io *)ctsio);
757 tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss,
758 uint32_t *pb, uint32_t *pbo)
762 if (ss && list->lun->be_lun)
763 *ss = list->lun->be_lun->blocksize;
764 if (pb && list->lun->be_lun)
765 *pb = list->lun->be_lun->blocksize <<
766 list->lun->be_lun->pblockexp;
767 if (pbo && list->lun->be_lun)
768 *pbo = list->lun->be_lun->blocksize *
769 list->lun->be_lun->pblockoff;
770 return (list->lun->lun);
772 if (idx >= list->ncscd)
774 return (tpcl_resolve(list->lun->ctl_softc,
775 list->init_port, &list->cscd[idx], ss, pb, pbo));
779 tpc_set_io_error_sense(struct tpc_list *list)
784 uint8_t fbuf[4 + 64];
786 scsi_ulto4b(list->curseg, csi);
787 if (list->fwd_cscd <= 0x07ff) {
788 sks[0] = SSD_SKS_SEGMENT_VALID;
789 scsi_ulto2b((uint8_t *)&list->cscd[list->fwd_cscd] -
790 list->params, &sks[1]);
793 if (list->fwd_scsi_status) {
795 fbuf[2] = list->fwd_target;
796 flen = list->fwd_sense_len;
799 fbuf[2] |= SSD_FORWARDED_FSDT;
802 fbuf[3] = list->fwd_scsi_status;
803 bcopy(&list->fwd_sense_data, &fbuf[4], flen);
807 ctl_set_sense(list->ctsio, /*current_error*/ 1,
808 /*sense_key*/ SSD_KEY_COPY_ABORTED,
809 /*asc*/ 0x0d, /*ascq*/ 0x01,
810 SSD_ELEM_COMMAND, sizeof(csi), csi,
811 sks[0] ? SSD_ELEM_SKS : SSD_ELEM_SKIP, sizeof(sks), sks,
812 flen ? SSD_ELEM_DESC : SSD_ELEM_SKIP, flen, fbuf,
817 tpc_process_b2b(struct tpc_list *list)
819 struct scsi_ec_segment_b2b *seg;
820 struct scsi_ec_cscd_dtsp *sdstp, *ddstp;
821 struct tpc_io *tior, *tiow;
824 off_t srclba, dstlba, numbytes, donebytes, roundbytes;
826 uint32_t srcblock, dstblock, pb, pbo, adj;
827 uint16_t scscd, dcscd;
830 scsi_ulto4b(list->curseg, csi);
831 if (list->stage == 1) {
832 while ((tior = TAILQ_FIRST(&list->allio)) != NULL) {
833 TAILQ_REMOVE(&list->allio, tior, links);
834 ctl_free_io(tior->io);
835 free(tior->buf, M_CTL);
839 ctl_set_task_aborted(list->ctsio);
840 return (CTL_RETVAL_ERROR);
841 } else if (list->error) {
842 tpc_set_io_error_sense(list);
843 return (CTL_RETVAL_ERROR);
845 list->cursectors += list->segsectors;
846 list->curbytes += list->segbytes;
847 return (CTL_RETVAL_COMPLETE);
850 TAILQ_INIT(&list->allio);
851 seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg];
852 scscd = scsi_2btoul(seg->src_cscd);
853 dcscd = scsi_2btoul(seg->dst_cscd);
854 sl = tpc_resolve(list, scscd, &srcblock, NULL, NULL);
855 dl = tpc_resolve(list, dcscd, &dstblock, &pb, &pbo);
856 if (sl == UINT64_MAX || dl == UINT64_MAX) {
857 ctl_set_sense(list->ctsio, /*current_error*/ 1,
858 /*sense_key*/ SSD_KEY_COPY_ABORTED,
859 /*asc*/ 0x08, /*ascq*/ 0x04,
860 SSD_ELEM_COMMAND, sizeof(csi), csi,
862 return (CTL_RETVAL_ERROR);
866 sdstp = &list->cscd[scscd].dtsp;
867 if (scsi_3btoul(sdstp->block_length) != 0)
868 srcblock = scsi_3btoul(sdstp->block_length);
869 ddstp = &list->cscd[dcscd].dtsp;
870 if (scsi_3btoul(ddstp->block_length) != 0)
871 dstblock = scsi_3btoul(ddstp->block_length);
872 numlba = scsi_2btoul(seg->number_of_blocks);
873 if (seg->flags & EC_SEG_DC)
874 numbytes = (off_t)numlba * dstblock;
876 numbytes = (off_t)numlba * srcblock;
877 srclba = scsi_8btou64(seg->src_lba);
878 dstlba = scsi_8btou64(seg->dst_lba);
880 // printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n",
881 // (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba),
882 // dl, scsi_8btou64(seg->dst_lba));
885 return (CTL_RETVAL_COMPLETE);
887 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
888 ctl_set_sense(list->ctsio, /*current_error*/ 1,
889 /*sense_key*/ SSD_KEY_COPY_ABORTED,
890 /*asc*/ 0x26, /*ascq*/ 0x0A,
891 SSD_ELEM_COMMAND, sizeof(csi), csi,
893 return (CTL_RETVAL_ERROR);
896 list->segbytes = numbytes;
897 list->segsectors = numbytes / dstblock;
901 while (donebytes < numbytes) {
902 roundbytes = numbytes - donebytes;
903 if (roundbytes > TPC_MAX_IO_SIZE) {
904 roundbytes = TPC_MAX_IO_SIZE;
905 roundbytes -= roundbytes % dstblock;
907 adj = (dstlba * dstblock + roundbytes - pbo) % pb;
908 if (roundbytes > adj)
913 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
914 TAILQ_INIT(&tior->run);
915 tior->buf = malloc(roundbytes, M_CTL, M_WAITOK);
917 TAILQ_INSERT_TAIL(&list->allio, tior, links);
918 tior->io = tpcl_alloc_io();
919 ctl_scsi_read_write(tior->io,
920 /*data_ptr*/ tior->buf,
921 /*data_len*/ roundbytes,
924 /*minimum_cdb_size*/ 0,
926 /*num_blocks*/ roundbytes / srcblock,
927 /*tag_type*/ CTL_TAG_SIMPLE,
929 tior->io->io_hdr.retries = 3;
930 tior->target = SSD_FORWARDED_SDS_EXSRC;
933 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
935 tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
936 TAILQ_INIT(&tiow->run);
938 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
939 tiow->io = tpcl_alloc_io();
940 ctl_scsi_read_write(tiow->io,
941 /*data_ptr*/ tior->buf,
942 /*data_len*/ roundbytes,
945 /*minimum_cdb_size*/ 0,
947 /*num_blocks*/ roundbytes / dstblock,
948 /*tag_type*/ CTL_TAG_SIMPLE,
950 tiow->io->io_hdr.retries = 3;
951 tiow->target = SSD_FORWARDED_SDS_EXDST;
954 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
956 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
957 TAILQ_INSERT_TAIL(&run, tior, rlinks);
959 donebytes += roundbytes;
960 srclba += roundbytes / srcblock;
961 dstlba += roundbytes / dstblock;
964 while ((tior = TAILQ_FIRST(&run)) != NULL) {
965 TAILQ_REMOVE(&run, tior, rlinks);
966 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
967 panic("tpcl_queue() error");
971 return (CTL_RETVAL_QUEUED);
975 tpc_process_verify(struct tpc_list *list)
977 struct scsi_ec_segment_verify *seg;
983 scsi_ulto4b(list->curseg, csi);
984 if (list->stage == 1) {
985 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
986 TAILQ_REMOVE(&list->allio, tio, links);
987 ctl_free_io(tio->io);
991 ctl_set_task_aborted(list->ctsio);
992 return (CTL_RETVAL_ERROR);
993 } else if (list->error) {
994 tpc_set_io_error_sense(list);
995 return (CTL_RETVAL_ERROR);
997 return (CTL_RETVAL_COMPLETE);
1000 TAILQ_INIT(&list->allio);
1001 seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg];
1002 cscd = scsi_2btoul(seg->src_cscd);
1003 sl = tpc_resolve(list, cscd, NULL, NULL, NULL);
1004 if (sl == UINT64_MAX) {
1005 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1006 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1007 /*asc*/ 0x08, /*ascq*/ 0x04,
1008 SSD_ELEM_COMMAND, sizeof(csi), csi,
1010 return (CTL_RETVAL_ERROR);
1013 // printf("Verify %ju\n", sl);
1015 if ((seg->tur & 0x01) == 0)
1016 return (CTL_RETVAL_COMPLETE);
1019 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
1020 TAILQ_INIT(&tio->run);
1022 TAILQ_INSERT_TAIL(&list->allio, tio, links);
1023 tio->io = tpcl_alloc_io();
1024 ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
1025 tio->io->io_hdr.retries = 3;
1026 tio->target = SSD_FORWARDED_SDS_EXSRC;
1029 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
1031 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
1032 panic("tpcl_queue() error");
1033 return (CTL_RETVAL_QUEUED);
1037 tpc_process_register_key(struct tpc_list *list)
1039 struct scsi_ec_segment_register_key *seg;
1046 scsi_ulto4b(list->curseg, csi);
1047 if (list->stage == 1) {
1048 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1049 TAILQ_REMOVE(&list->allio, tio, links);
1050 ctl_free_io(tio->io);
1051 free(tio->buf, M_CTL);
1055 ctl_set_task_aborted(list->ctsio);
1056 return (CTL_RETVAL_ERROR);
1057 } else if (list->error) {
1058 tpc_set_io_error_sense(list);
1059 return (CTL_RETVAL_ERROR);
1061 return (CTL_RETVAL_COMPLETE);
1064 TAILQ_INIT(&list->allio);
1065 seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg];
1066 cscd = scsi_2btoul(seg->dst_cscd);
1067 dl = tpc_resolve(list, cscd, NULL, NULL, NULL);
1068 if (dl == UINT64_MAX) {
1069 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1070 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1071 /*asc*/ 0x08, /*ascq*/ 0x04,
1072 SSD_ELEM_COMMAND, sizeof(csi), csi,
1074 return (CTL_RETVAL_ERROR);
1077 // printf("Register Key %ju\n", dl);
1080 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
1081 TAILQ_INIT(&tio->run);
1083 TAILQ_INSERT_TAIL(&list->allio, tio, links);
1084 tio->io = tpcl_alloc_io();
1085 datalen = sizeof(struct scsi_per_res_out_parms);
1086 tio->buf = malloc(datalen, M_CTL, M_WAITOK);
1087 ctl_scsi_persistent_res_out(tio->io,
1088 tio->buf, datalen, SPRO_REGISTER, -1,
1089 scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key),
1090 /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
1091 tio->io->io_hdr.retries = 3;
1092 tio->target = SSD_FORWARDED_SDS_EXDST;
1095 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
1097 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
1098 panic("tpcl_queue() error");
1099 return (CTL_RETVAL_QUEUED);
1103 tpc_ranges_length(struct scsi_range_desc *range, int nrange)
1108 for (r = 0; r < nrange; r++)
1109 length += scsi_4btoul(range[r].length);
1114 tpc_check_ranges_l(struct scsi_range_desc *range, int nrange, uint64_t maxlba,
1121 for (i = 0; i < nrange; i++) {
1122 b1 = scsi_8btou64(range[i].lba);
1123 l1 = scsi_4btoul(range[i].length);
1124 if (b1 + l1 < b1 || b1 + l1 > maxlba + 1) {
1125 *lba = MAX(b1, maxlba + 1);
1133 tpc_check_ranges_x(struct scsi_range_desc *range, int nrange)
1139 for (i = 0; i < nrange - 1; i++) {
1140 b1 = scsi_8btou64(range[i].lba);
1141 l1 = scsi_4btoul(range[i].length);
1142 for (j = i + 1; j < nrange; j++) {
1143 b2 = scsi_8btou64(range[j].lba);
1144 l2 = scsi_4btoul(range[j].length);
1145 if (b1 + l1 > b2 && b2 + l2 > b1)
1153 tpc_skip_ranges(struct scsi_range_desc *range, int nrange, off_t skip,
1154 int *srange, off_t *soffset)
1161 while (r < nrange) {
1162 if (skip - off < scsi_4btoul(range[r].length)) {
1164 *soffset = skip - off;
1167 off += scsi_4btoul(range[r].length);
1174 tpc_process_wut(struct tpc_list *list)
1176 struct tpc_io *tio, *tior, *tiow;
1179 off_t doffset, soffset;
1180 off_t srclba, dstlba, numbytes, donebytes, roundbytes;
1181 uint32_t srcblock, dstblock, pb, pbo, adj;
1183 if (list->stage > 0) {
1184 /* Cleanup after previous rounds. */
1185 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1186 TAILQ_REMOVE(&list->allio, tio, links);
1187 ctl_free_io(tio->io);
1188 free(tio->buf, M_CTL);
1192 ctl_set_task_aborted(list->ctsio);
1193 return (CTL_RETVAL_ERROR);
1194 } else if (list->error) {
1195 if (list->fwd_scsi_status) {
1196 list->ctsio->io_hdr.status =
1197 CTL_SCSI_ERROR | CTL_AUTOSENSE;
1198 list->ctsio->scsi_status = list->fwd_scsi_status;
1199 list->ctsio->sense_data = list->fwd_sense_data;
1200 list->ctsio->sense_len = list->fwd_sense_len;
1202 ctl_set_invalid_field(list->ctsio,
1203 /*sks_valid*/ 0, /*command*/ 0,
1204 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
1206 return (CTL_RETVAL_ERROR);
1208 list->cursectors += list->segsectors;
1209 list->curbytes += list->segbytes;
1212 /* Check where we are on destination ranges list. */
1213 if (tpc_skip_ranges(list->range, list->nrange, list->cursectors,
1214 &drange, &doffset) != 0)
1215 return (CTL_RETVAL_COMPLETE);
1216 dstblock = list->lun->be_lun->blocksize;
1217 pb = dstblock << list->lun->be_lun->pblockexp;
1218 if (list->lun->be_lun->pblockoff > 0)
1219 pbo = pb - dstblock * list->lun->be_lun->pblockoff;
1223 /* Check where we are on source ranges list. */
1224 srcblock = list->token->blocksize;
1225 if (tpc_skip_ranges(list->token->range, list->token->nrange,
1226 list->offset_into_rod + list->cursectors * dstblock / srcblock,
1227 &srange, &soffset) != 0) {
1228 ctl_set_invalid_field(list->ctsio, /*sks_valid*/ 0,
1229 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
1230 return (CTL_RETVAL_ERROR);
1233 srclba = scsi_8btou64(list->token->range[srange].lba) + soffset;
1234 dstlba = scsi_8btou64(list->range[drange].lba) + doffset;
1235 numbytes = srcblock *
1236 (scsi_4btoul(list->token->range[srange].length) - soffset);
1237 numbytes = omin(numbytes, dstblock *
1238 (scsi_4btoul(list->range[drange].length) - doffset));
1239 if (numbytes > TPC_MAX_IOCHUNK_SIZE) {
1240 numbytes = TPC_MAX_IOCHUNK_SIZE;
1241 numbytes -= numbytes % dstblock;
1242 if (pb > dstblock) {
1243 adj = (dstlba * dstblock + numbytes - pbo) % pb;
1249 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
1250 ctl_set_invalid_field(list->ctsio, /*sks_valid*/ 0,
1251 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
1252 return (CTL_RETVAL_ERROR);
1255 list->segbytes = numbytes;
1256 list->segsectors = numbytes / dstblock;
1257 //printf("Copy chunk of %ju sectors from %ju to %ju\n", list->segsectors,
1262 TAILQ_INIT(&list->allio);
1263 while (donebytes < numbytes) {
1264 roundbytes = numbytes - donebytes;
1265 if (roundbytes > TPC_MAX_IO_SIZE) {
1266 roundbytes = TPC_MAX_IO_SIZE;
1267 roundbytes -= roundbytes % dstblock;
1268 if (pb > dstblock) {
1269 adj = (dstlba * dstblock + roundbytes - pbo) % pb;
1270 if (roundbytes > adj)
1275 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
1276 TAILQ_INIT(&tior->run);
1277 tior->buf = malloc(roundbytes, M_CTL, M_WAITOK);
1279 TAILQ_INSERT_TAIL(&list->allio, tior, links);
1280 tior->io = tpcl_alloc_io();
1281 ctl_scsi_read_write(tior->io,
1282 /*data_ptr*/ tior->buf,
1283 /*data_len*/ roundbytes,
1286 /*minimum_cdb_size*/ 0,
1288 /*num_blocks*/ roundbytes / srcblock,
1289 /*tag_type*/ CTL_TAG_SIMPLE,
1291 tior->io->io_hdr.retries = 3;
1292 tior->lun = list->token->lun;
1293 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
1295 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
1296 TAILQ_INIT(&tiow->run);
1298 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
1299 tiow->io = tpcl_alloc_io();
1300 ctl_scsi_read_write(tiow->io,
1301 /*data_ptr*/ tior->buf,
1302 /*data_len*/ roundbytes,
1305 /*minimum_cdb_size*/ 0,
1307 /*num_blocks*/ roundbytes / dstblock,
1308 /*tag_type*/ CTL_TAG_SIMPLE,
1310 tiow->io->io_hdr.retries = 3;
1311 tiow->lun = list->lun->lun;
1312 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
1314 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
1315 TAILQ_INSERT_TAIL(&run, tior, rlinks);
1317 donebytes += roundbytes;
1318 srclba += roundbytes / srcblock;
1319 dstlba += roundbytes / dstblock;
1322 while ((tior = TAILQ_FIRST(&run)) != NULL) {
1323 TAILQ_REMOVE(&run, tior, rlinks);
1324 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1325 panic("tpcl_queue() error");
1329 return (CTL_RETVAL_QUEUED);
1333 tpc_process_zero_wut(struct tpc_list *list)
1335 struct tpc_io *tio, *tiow;
1336 struct runl run, *prun;
1338 uint32_t dstblock, len;
1340 if (list->stage > 0) {
1342 /* Cleanup after previous rounds. */
1343 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1344 TAILQ_REMOVE(&list->allio, tio, links);
1345 ctl_free_io(tio->io);
1349 ctl_set_task_aborted(list->ctsio);
1350 return (CTL_RETVAL_ERROR);
1351 } else if (list->error) {
1352 if (list->fwd_scsi_status) {
1353 list->ctsio->io_hdr.status =
1354 CTL_SCSI_ERROR | CTL_AUTOSENSE;
1355 list->ctsio->scsi_status = list->fwd_scsi_status;
1356 list->ctsio->sense_data = list->fwd_sense_data;
1357 list->ctsio->sense_len = list->fwd_sense_len;
1359 ctl_set_invalid_field(list->ctsio,
1360 /*sks_valid*/ 0, /*command*/ 0,
1361 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
1363 return (CTL_RETVAL_ERROR);
1365 list->cursectors += list->segsectors;
1366 list->curbytes += list->segbytes;
1367 return (CTL_RETVAL_COMPLETE);
1370 dstblock = list->lun->be_lun->blocksize;
1374 TAILQ_INIT(&list->allio);
1375 list->segsectors = 0;
1376 for (r = 0; r < list->nrange; r++) {
1377 len = scsi_4btoul(list->range[r].length);
1381 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
1382 TAILQ_INIT(&tiow->run);
1384 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
1385 tiow->io = tpcl_alloc_io();
1386 ctl_scsi_write_same(tiow->io,
1390 /*lba*/ scsi_8btou64(list->range[r].lba),
1392 /*tag_type*/ CTL_TAG_SIMPLE,
1394 tiow->io->io_hdr.retries = 3;
1395 tiow->lun = list->lun->lun;
1396 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
1398 TAILQ_INSERT_TAIL(prun, tiow, rlinks);
1400 list->segsectors += len;
1402 list->segbytes = list->segsectors * dstblock;
1404 if (TAILQ_EMPTY(&run))
1407 while ((tiow = TAILQ_FIRST(&run)) != NULL) {
1408 TAILQ_REMOVE(&run, tiow, rlinks);
1409 if (tpcl_queue(tiow->io, tiow->lun) != CTL_RETVAL_COMPLETE)
1410 panic("tpcl_queue() error");
1414 return (CTL_RETVAL_QUEUED);
1418 tpc_process(struct tpc_list *list)
1420 struct ctl_lun *lun = list->lun;
1421 struct ctl_softc *softc = lun->ctl_softc;
1422 struct scsi_ec_segment *seg;
1423 struct ctl_scsiio *ctsio = list->ctsio;
1424 int retval = CTL_RETVAL_COMPLETE;
1427 if (list->service_action == EC_WUT) {
1428 if (list->token != NULL)
1429 retval = tpc_process_wut(list);
1431 retval = tpc_process_zero_wut(list);
1432 if (retval == CTL_RETVAL_QUEUED)
1434 if (retval == CTL_RETVAL_ERROR) {
1439 //printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg);
1440 while (list->curseg < list->nseg) {
1441 seg = list->seg[list->curseg];
1442 switch (seg->type_code) {
1444 retval = tpc_process_b2b(list);
1447 retval = tpc_process_verify(list);
1449 case EC_SEG_REGISTER_KEY:
1450 retval = tpc_process_register_key(list);
1453 scsi_ulto4b(list->curseg, csi);
1454 ctl_set_sense(ctsio, /*current_error*/ 1,
1455 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1456 /*asc*/ 0x26, /*ascq*/ 0x09,
1457 SSD_ELEM_COMMAND, sizeof(csi), csi,
1461 if (retval == CTL_RETVAL_QUEUED)
1463 if (retval == CTL_RETVAL_ERROR) {
1472 ctl_set_success(ctsio);
1475 //printf("ZZZ done\n");
1476 free(list->params, M_CTL);
1477 list->params = NULL;
1479 mtx_lock(&softc->tpc_lock);
1480 if (--list->token->active == 0)
1481 list->token->last_active = time_uptime;
1482 mtx_unlock(&softc->tpc_lock);
1485 mtx_lock(&lun->lun_lock);
1486 if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) {
1487 TAILQ_REMOVE(&lun->tpc_lists, list, links);
1490 list->completed = 1;
1491 list->last_active = time_uptime;
1492 list->sense_data = ctsio->sense_data;
1493 list->sense_len = ctsio->sense_len;
1494 list->scsi_status = ctsio->scsi_status;
1496 mtx_unlock(&lun->lun_lock);
1498 ctl_done((union ctl_io *)ctsio);
1502 * For any sort of check condition, busy, etc., we just retry. We do not
1503 * decrement the retry count for unit attention type errors. These are
1504 * normal, and we want to save the retry count for "real" errors. Otherwise,
1505 * we could end up with situations where a command will succeed in some
1506 * situations and fail in others, depending on whether a unit attention is
1507 * pending. Also, some of our error recovery actions, most notably the
1508 * LUN reset action, will cause a unit attention.
1510 * We can add more detail here later if necessary.
1512 static tpc_error_action
1513 tpc_checkcond_parse(union ctl_io *io)
1515 tpc_error_action error_action;
1516 int error_code, sense_key, asc, ascq;
1519 * Default to retrying the command.
1521 error_action = TPC_ERR_RETRY;
1523 scsi_extract_sense_len(&io->scsiio.sense_data,
1524 io->scsiio.sense_len,
1531 switch (error_code) {
1532 case SSD_DEFERRED_ERROR:
1533 case SSD_DESC_DEFERRED_ERROR:
1534 error_action |= TPC_ERR_NO_DECREMENT;
1536 case SSD_CURRENT_ERROR:
1537 case SSD_DESC_CURRENT_ERROR:
1539 switch (sense_key) {
1540 case SSD_KEY_UNIT_ATTENTION:
1541 error_action |= TPC_ERR_NO_DECREMENT;
1543 case SSD_KEY_HARDWARE_ERROR:
1545 * This is our generic "something bad happened"
1546 * error code. It often isn't recoverable.
1548 if ((asc == 0x44) && (ascq == 0x00))
1549 error_action = TPC_ERR_FAIL;
1551 case SSD_KEY_NOT_READY:
1553 * If the LUN is powered down, there likely isn't
1554 * much point in retrying right now.
1556 if ((asc == 0x04) && (ascq == 0x02))
1557 error_action = TPC_ERR_FAIL;
1559 * If the LUN is offline, there probably isn't much
1560 * point in retrying, either.
1562 if ((asc == 0x04) && (ascq == 0x03))
1563 error_action = TPC_ERR_FAIL;
1567 return (error_action);
1570 static tpc_error_action
1571 tpc_error_parse(union ctl_io *io)
1573 tpc_error_action error_action = TPC_ERR_RETRY;
1575 switch (io->io_hdr.io_type) {
1577 switch (io->io_hdr.status & CTL_STATUS_MASK) {
1578 case CTL_SCSI_ERROR:
1579 switch (io->scsiio.scsi_status) {
1580 case SCSI_STATUS_CHECK_COND:
1581 error_action = tpc_checkcond_parse(io);
1594 panic("%s: invalid ctl_io type %d\n", __func__,
1595 io->io_hdr.io_type);
1598 return (error_action);
1602 tpc_done(union ctl_io *io)
1604 struct tpc_io *tio, *tior;
1607 * Very minimal retry logic. We basically retry if we got an error
1608 * back, and the retry count is greater than 0. If we ever want
1609 * more sophisticated initiator type behavior, the CAM error
1610 * recovery code in ../common might be helpful.
1612 tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1613 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1614 && (io->io_hdr.retries > 0)) {
1615 ctl_io_status old_status;
1616 tpc_error_action error_action;
1618 error_action = tpc_error_parse(io);
1619 switch (error_action & TPC_ERR_MASK) {
1624 if ((error_action & TPC_ERR_NO_DECREMENT) == 0)
1625 io->io_hdr.retries--;
1626 old_status = io->io_hdr.status;
1627 io->io_hdr.status = CTL_STATUS_NONE;
1628 io->io_hdr.flags &= ~CTL_FLAG_ABORT;
1629 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
1630 if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) {
1631 printf("%s: error returned from ctl_queue()!\n",
1633 io->io_hdr.status = old_status;
1639 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
1640 tio->list->error = 1;
1641 if (io->io_hdr.io_type == CTL_IO_SCSI &&
1642 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR) {
1643 tio->list->fwd_scsi_status = io->scsiio.scsi_status;
1644 tio->list->fwd_sense_data = io->scsiio.sense_data;
1645 tio->list->fwd_sense_len = io->scsiio.sense_len;
1646 tio->list->fwd_target = tio->target;
1647 tio->list->fwd_cscd = tio->cscd;
1650 atomic_add_int(&tio->list->curops, 1);
1651 if (!tio->list->error && !tio->list->abort) {
1652 while ((tior = TAILQ_FIRST(&tio->run)) != NULL) {
1653 TAILQ_REMOVE(&tio->run, tior, rlinks);
1654 atomic_add_int(&tio->list->tbdio, 1);
1655 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1656 panic("tpcl_queue() error");
1659 if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1)
1660 tpc_process(tio->list);
1664 ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
1666 struct ctl_lun *lun = CTL_LUN(ctsio);
1667 struct scsi_extended_copy *cdb;
1668 struct scsi_extended_copy_lid1_data *data;
1669 struct scsi_ec_cscd *cscd;
1670 struct scsi_ec_segment *seg;
1671 struct tpc_list *list, *tlist;
1674 int len, off, lencscd, lenseg, leninl, nseg;
1676 CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n"));
1678 cdb = (struct scsi_extended_copy *)ctsio->cdb;
1679 len = scsi_4btoul(cdb->length);
1682 ctl_set_success(ctsio);
1685 if (len < sizeof(struct scsi_extended_copy_lid1_data) ||
1686 len > sizeof(struct scsi_extended_copy_lid1_data) +
1687 TPC_MAX_LIST + TPC_MAX_INLINE) {
1688 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1689 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1694 * If we've got a kernel request that hasn't been malloced yet,
1695 * malloc it and tell the caller the data buffer is here.
1697 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1698 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1699 ctsio->kern_data_len = len;
1700 ctsio->kern_total_len = len;
1701 ctsio->kern_rel_offset = 0;
1702 ctsio->kern_sg_entries = 0;
1703 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1704 ctsio->be_move_done = ctl_config_move_done;
1705 ctl_datamove((union ctl_io *)ctsio);
1707 return (CTL_RETVAL_COMPLETE);
1710 data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr;
1711 lencscd = scsi_2btoul(data->cscd_list_length);
1712 lenseg = scsi_4btoul(data->segment_list_length);
1713 leninl = scsi_4btoul(data->inline_data_length);
1714 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1715 ctl_set_sense(ctsio, /*current_error*/ 1,
1716 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1717 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1720 if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) {
1721 ctl_set_sense(ctsio, /*current_error*/ 1,
1722 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1723 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1726 if (lencscd + lenseg > TPC_MAX_LIST ||
1727 leninl > TPC_MAX_INLINE ||
1728 len < sizeof(struct scsi_extended_copy_lid1_data) +
1729 lencscd + lenseg + leninl) {
1730 ctl_set_param_len_error(ctsio);
1734 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1735 list->service_action = cdb->service_action;
1736 value = dnvlist_get_string(lun->be_lun->options, "insecure_tpc", NULL);
1737 if (value != NULL && strcmp(value, "on") == 0)
1738 list->init_port = -1;
1740 list->init_port = ctsio->io_hdr.nexus.targ_port;
1741 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
1742 list->list_id = data->list_identifier;
1743 list->flags = data->flags;
1744 list->params = ctsio->kern_data_ptr;
1745 list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1746 ptr = &data->data[0];
1747 for (off = 0; off < lencscd; off += sizeof(struct scsi_ec_cscd)) {
1748 cscd = (struct scsi_ec_cscd *)(ptr + off);
1749 if (cscd->type_code != EC_CSCD_ID) {
1751 ctl_set_sense(ctsio, /*current_error*/ 1,
1752 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1753 /*asc*/ 0x26, /*ascq*/ 0x07, SSD_ELEM_NONE);
1757 ptr = &data->data[lencscd];
1758 for (nseg = 0, off = 0; off < lenseg; nseg++) {
1759 if (nseg >= TPC_MAX_SEGS) {
1761 ctl_set_sense(ctsio, /*current_error*/ 1,
1762 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1763 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1766 seg = (struct scsi_ec_segment *)(ptr + off);
1767 if (seg->type_code != EC_SEG_B2B &&
1768 seg->type_code != EC_SEG_VERIFY &&
1769 seg->type_code != EC_SEG_REGISTER_KEY) {
1771 ctl_set_sense(ctsio, /*current_error*/ 1,
1772 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1773 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
1776 list->seg[nseg] = seg;
1777 off += sizeof(struct scsi_ec_segment) +
1778 scsi_2btoul(seg->descr_length);
1780 list->inl = &data->data[lencscd + lenseg];
1781 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1783 list->leninl = leninl;
1784 list->ctsio = ctsio;
1786 mtx_lock(&lun->lun_lock);
1787 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1788 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1789 if (tlist != NULL && !tlist->completed) {
1790 mtx_unlock(&lun->lun_lock);
1792 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1793 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1797 if (tlist != NULL) {
1798 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1802 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1803 mtx_unlock(&lun->lun_lock);
1806 return (CTL_RETVAL_COMPLETE);
1809 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
1810 free(ctsio->kern_data_ptr, M_CTL);
1811 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
1813 ctl_done((union ctl_io *)ctsio);
1814 return (CTL_RETVAL_COMPLETE);
1818 ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
1820 struct ctl_lun *lun = CTL_LUN(ctsio);
1821 struct scsi_extended_copy *cdb;
1822 struct scsi_extended_copy_lid4_data *data;
1823 struct scsi_ec_cscd *cscd;
1824 struct scsi_ec_segment *seg;
1825 struct tpc_list *list, *tlist;
1828 int len, off, lencscd, lenseg, leninl, nseg;
1830 CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n"));
1832 cdb = (struct scsi_extended_copy *)ctsio->cdb;
1833 len = scsi_4btoul(cdb->length);
1836 ctl_set_success(ctsio);
1839 if (len < sizeof(struct scsi_extended_copy_lid4_data) ||
1840 len > sizeof(struct scsi_extended_copy_lid4_data) +
1841 TPC_MAX_LIST + TPC_MAX_INLINE) {
1842 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1843 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1848 * If we've got a kernel request that hasn't been malloced yet,
1849 * malloc it and tell the caller the data buffer is here.
1851 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1852 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1853 ctsio->kern_data_len = len;
1854 ctsio->kern_total_len = len;
1855 ctsio->kern_rel_offset = 0;
1856 ctsio->kern_sg_entries = 0;
1857 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1858 ctsio->be_move_done = ctl_config_move_done;
1859 ctl_datamove((union ctl_io *)ctsio);
1861 return (CTL_RETVAL_COMPLETE);
1864 data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr;
1865 lencscd = scsi_2btoul(data->cscd_list_length);
1866 lenseg = scsi_2btoul(data->segment_list_length);
1867 leninl = scsi_2btoul(data->inline_data_length);
1868 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1869 ctl_set_sense(ctsio, /*current_error*/ 1,
1870 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1871 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1874 if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) {
1875 ctl_set_sense(ctsio, /*current_error*/ 1,
1876 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1877 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1880 if (lencscd + lenseg > TPC_MAX_LIST ||
1881 leninl > TPC_MAX_INLINE ||
1882 len < sizeof(struct scsi_extended_copy_lid1_data) +
1883 lencscd + lenseg + leninl) {
1884 ctl_set_param_len_error(ctsio);
1888 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1889 list->service_action = cdb->service_action;
1890 value = dnvlist_get_string(lun->be_lun->options, "insecure_tpc", NULL);
1891 if (value != NULL && strcmp(value, "on") == 0)
1892 list->init_port = -1;
1894 list->init_port = ctsio->io_hdr.nexus.targ_port;
1895 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
1896 list->list_id = scsi_4btoul(data->list_identifier);
1897 list->flags = data->flags;
1898 list->params = ctsio->kern_data_ptr;
1899 list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1900 ptr = &data->data[0];
1901 for (off = 0; off < lencscd; off += sizeof(struct scsi_ec_cscd)) {
1902 cscd = (struct scsi_ec_cscd *)(ptr + off);
1903 if (cscd->type_code != EC_CSCD_ID) {
1905 ctl_set_sense(ctsio, /*current_error*/ 1,
1906 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1907 /*asc*/ 0x26, /*ascq*/ 0x07, SSD_ELEM_NONE);
1911 ptr = &data->data[lencscd];
1912 for (nseg = 0, off = 0; off < lenseg; nseg++) {
1913 if (nseg >= TPC_MAX_SEGS) {
1915 ctl_set_sense(ctsio, /*current_error*/ 1,
1916 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1917 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1920 seg = (struct scsi_ec_segment *)(ptr + off);
1921 if (seg->type_code != EC_SEG_B2B &&
1922 seg->type_code != EC_SEG_VERIFY &&
1923 seg->type_code != EC_SEG_REGISTER_KEY) {
1925 ctl_set_sense(ctsio, /*current_error*/ 1,
1926 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1927 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
1930 list->seg[nseg] = seg;
1931 off += sizeof(struct scsi_ec_segment) +
1932 scsi_2btoul(seg->descr_length);
1934 list->inl = &data->data[lencscd + lenseg];
1935 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1937 list->leninl = leninl;
1938 list->ctsio = ctsio;
1940 mtx_lock(&lun->lun_lock);
1941 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1942 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1943 if (tlist != NULL && !tlist->completed) {
1944 mtx_unlock(&lun->lun_lock);
1946 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1947 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1951 if (tlist != NULL) {
1952 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1956 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1957 mtx_unlock(&lun->lun_lock);
1960 return (CTL_RETVAL_COMPLETE);
1963 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
1964 free(ctsio->kern_data_ptr, M_CTL);
1965 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
1967 ctl_done((union ctl_io *)ctsio);
1968 return (CTL_RETVAL_COMPLETE);
1972 tpc_create_token(struct ctl_lun *lun, struct ctl_port *port, off_t len,
1973 struct scsi_token *token)
1976 struct scsi_vpd_id_descriptor *idd = NULL;
1977 struct scsi_ec_cscd_id *cscd;
1978 struct scsi_read_capacity_data_long *dtsd;
1981 scsi_ulto4b(ROD_TYPE_AUR, token->type);
1982 scsi_ulto2b(0x01f8, token->length);
1983 scsi_u64to8b(atomic_fetchadd_int(&id, 1), &token->body[0]);
1985 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
1986 lun->lun_devid->data, lun->lun_devid->len,
1987 scsi_devid_is_lun_naa);
1988 if (idd == NULL && lun->lun_devid)
1989 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
1990 lun->lun_devid->data, lun->lun_devid->len,
1991 scsi_devid_is_lun_eui64);
1993 cscd = (struct scsi_ec_cscd_id *)&token->body[8];
1994 cscd->type_code = EC_CSCD_ID;
1995 cscd->luidt_pdt = T_DIRECT;
1996 memcpy(&cscd->codeset, idd, 4 + idd->length);
1997 scsi_ulto3b(lun->be_lun->blocksize, cscd->dtsp.block_length);
1999 scsi_u64to8b(0, &token->body[40]); /* XXX: Should be 128bit value. */
2000 scsi_u64to8b(len, &token->body[48]);
2002 /* ROD token device type specific data (RC16 without first field) */
2003 dtsd = (struct scsi_read_capacity_data_long *)&token->body[88 - 8];
2004 scsi_ulto4b(lun->be_lun->blocksize, dtsd->length);
2005 dtsd->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE;
2006 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, dtsd->lalba_lbp);
2007 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
2008 dtsd->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ;
2010 if (port->target_devid) {
2011 targid_len = port->target_devid->len;
2012 memcpy(&token->body[120], port->target_devid->data, targid_len);
2015 arc4rand(&token->body[120 + targid_len], 384 - targid_len, 0);
2019 ctl_populate_token(struct ctl_scsiio *ctsio)
2021 struct ctl_softc *softc = CTL_SOFTC(ctsio);
2022 struct ctl_port *port = CTL_PORT(ctsio);
2023 struct ctl_lun *lun = CTL_LUN(ctsio);
2024 struct scsi_populate_token *cdb;
2025 struct scsi_populate_token_data *data;
2026 struct tpc_list *list, *tlist;
2027 struct tpc_token *token;
2029 int len, lendata, lendesc;
2031 CTL_DEBUG_PRINT(("ctl_populate_token\n"));
2033 cdb = (struct scsi_populate_token *)ctsio->cdb;
2034 len = scsi_4btoul(cdb->length);
2036 if (len < sizeof(struct scsi_populate_token_data) ||
2037 len > sizeof(struct scsi_populate_token_data) +
2038 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
2039 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
2040 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
2045 * If we've got a kernel request that hasn't been malloced yet,
2046 * malloc it and tell the caller the data buffer is here.
2048 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
2049 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
2050 ctsio->kern_data_len = len;
2051 ctsio->kern_total_len = len;
2052 ctsio->kern_rel_offset = 0;
2053 ctsio->kern_sg_entries = 0;
2054 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2055 ctsio->be_move_done = ctl_config_move_done;
2056 ctl_datamove((union ctl_io *)ctsio);
2058 return (CTL_RETVAL_COMPLETE);
2061 data = (struct scsi_populate_token_data *)ctsio->kern_data_ptr;
2062 lendata = scsi_2btoul(data->length);
2063 if (lendata < sizeof(struct scsi_populate_token_data) - 2 +
2064 sizeof(struct scsi_range_desc)) {
2065 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2066 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
2069 lendesc = scsi_2btoul(data->range_descriptor_length);
2070 if (lendesc < sizeof(struct scsi_range_desc) ||
2071 len < sizeof(struct scsi_populate_token_data) + lendesc ||
2072 lendata < sizeof(struct scsi_populate_token_data) - 2 + lendesc) {
2073 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2074 /*field*/ 14, /*bit_valid*/ 0, /*bit*/ 0);
2078 printf("PT(list=%u) flags=%x to=%d rt=%x len=%x\n",
2079 scsi_4btoul(cdb->list_identifier),
2080 data->flags, scsi_4btoul(data->inactivity_timeout),
2081 scsi_4btoul(data->rod_type),
2082 scsi_2btoul(data->range_descriptor_length));
2085 /* Validate INACTIVITY TIMEOUT field */
2086 if (scsi_4btoul(data->inactivity_timeout) > TPC_MAX_TOKEN_TIMEOUT) {
2087 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2088 /*command*/ 0, /*field*/ 4, /*bit_valid*/ 0,
2093 /* Validate ROD TYPE field */
2094 if ((data->flags & EC_PT_RTV) &&
2095 scsi_4btoul(data->rod_type) != ROD_TYPE_AUR) {
2096 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2097 /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0);
2101 /* Validate list of ranges */
2102 if (tpc_check_ranges_l(&data->desc[0],
2103 scsi_2btoul(data->range_descriptor_length) /
2104 sizeof(struct scsi_range_desc),
2105 lun->be_lun->maxlba, &lba) != 0) {
2106 ctl_set_lba_out_of_range(ctsio, lba);
2109 if (tpc_check_ranges_x(&data->desc[0],
2110 scsi_2btoul(data->range_descriptor_length) /
2111 sizeof(struct scsi_range_desc)) != 0) {
2112 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0,
2113 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2118 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
2119 list->service_action = cdb->service_action;
2120 list->init_port = ctsio->io_hdr.nexus.targ_port;
2121 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
2122 list->list_id = scsi_4btoul(cdb->list_identifier);
2123 list->flags = data->flags;
2124 list->ctsio = ctsio;
2126 mtx_lock(&lun->lun_lock);
2127 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
2128 if (tlist != NULL && !tlist->completed) {
2129 mtx_unlock(&lun->lun_lock);
2131 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2132 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2136 if (tlist != NULL) {
2137 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
2140 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
2141 mtx_unlock(&lun->lun_lock);
2143 token = malloc(sizeof(*token), M_CTL, M_WAITOK | M_ZERO);
2144 token->lun = lun->lun;
2145 token->blocksize = lun->be_lun->blocksize;
2146 token->params = ctsio->kern_data_ptr;
2147 token->range = &data->desc[0];
2148 token->nrange = scsi_2btoul(data->range_descriptor_length) /
2149 sizeof(struct scsi_range_desc);
2150 list->cursectors = tpc_ranges_length(token->range, token->nrange);
2151 list->curbytes = (off_t)list->cursectors * lun->be_lun->blocksize;
2152 tpc_create_token(lun, port, list->curbytes,
2153 (struct scsi_token *)token->token);
2155 token->last_active = time_uptime;
2156 token->timeout = scsi_4btoul(data->inactivity_timeout);
2157 if (token->timeout == 0)
2158 token->timeout = TPC_DFL_TOKEN_TIMEOUT;
2159 else if (token->timeout < TPC_MIN_TOKEN_TIMEOUT)
2160 token->timeout = TPC_MIN_TOKEN_TIMEOUT;
2161 memcpy(list->res_token, token->token, sizeof(list->res_token));
2162 list->res_token_valid = 1;
2164 list->completed = 1;
2165 list->last_active = time_uptime;
2166 mtx_lock(&softc->tpc_lock);
2167 TAILQ_INSERT_TAIL(&softc->tpc_tokens, token, links);
2168 mtx_unlock(&softc->tpc_lock);
2169 ctl_set_success(ctsio);
2170 ctl_done((union ctl_io *)ctsio);
2171 return (CTL_RETVAL_COMPLETE);
2174 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
2175 free(ctsio->kern_data_ptr, M_CTL);
2176 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
2178 ctl_done((union ctl_io *)ctsio);
2179 return (CTL_RETVAL_COMPLETE);
2183 ctl_write_using_token(struct ctl_scsiio *ctsio)
2185 struct ctl_softc *softc = CTL_SOFTC(ctsio);
2186 struct ctl_lun *lun = CTL_LUN(ctsio);
2187 struct scsi_write_using_token *cdb;
2188 struct scsi_write_using_token_data *data;
2189 struct tpc_list *list, *tlist;
2190 struct tpc_token *token;
2192 int len, lendata, lendesc;
2194 CTL_DEBUG_PRINT(("ctl_write_using_token\n"));
2196 cdb = (struct scsi_write_using_token *)ctsio->cdb;
2197 len = scsi_4btoul(cdb->length);
2199 if (len < sizeof(struct scsi_write_using_token_data) ||
2200 len > sizeof(struct scsi_write_using_token_data) +
2201 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
2202 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
2203 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
2208 * If we've got a kernel request that hasn't been malloced yet,
2209 * malloc it and tell the caller the data buffer is here.
2211 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
2212 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
2213 ctsio->kern_data_len = len;
2214 ctsio->kern_total_len = len;
2215 ctsio->kern_rel_offset = 0;
2216 ctsio->kern_sg_entries = 0;
2217 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2218 ctsio->be_move_done = ctl_config_move_done;
2219 ctl_datamove((union ctl_io *)ctsio);
2221 return (CTL_RETVAL_COMPLETE);
2224 data = (struct scsi_write_using_token_data *)ctsio->kern_data_ptr;
2225 lendata = scsi_2btoul(data->length);
2226 if (lendata < sizeof(struct scsi_write_using_token_data) - 2 +
2227 sizeof(struct scsi_range_desc)) {
2228 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2229 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
2232 lendesc = scsi_2btoul(data->range_descriptor_length);
2233 if (lendesc < sizeof(struct scsi_range_desc) ||
2234 len < sizeof(struct scsi_write_using_token_data) + lendesc ||
2235 lendata < sizeof(struct scsi_write_using_token_data) - 2 + lendesc) {
2236 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2237 /*field*/ 534, /*bit_valid*/ 0, /*bit*/ 0);
2241 printf("WUT(list=%u) flags=%x off=%ju len=%x\n",
2242 scsi_4btoul(cdb->list_identifier),
2243 data->flags, scsi_8btou64(data->offset_into_rod),
2244 scsi_2btoul(data->range_descriptor_length));
2247 /* Validate list of ranges */
2248 if (tpc_check_ranges_l(&data->desc[0],
2249 scsi_2btoul(data->range_descriptor_length) /
2250 sizeof(struct scsi_range_desc),
2251 lun->be_lun->maxlba, &lba) != 0) {
2252 ctl_set_lba_out_of_range(ctsio, lba);
2255 if (tpc_check_ranges_x(&data->desc[0],
2256 scsi_2btoul(data->range_descriptor_length) /
2257 sizeof(struct scsi_range_desc)) != 0) {
2258 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0,
2259 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2264 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
2265 list->service_action = cdb->service_action;
2266 list->init_port = ctsio->io_hdr.nexus.targ_port;
2267 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
2268 list->list_id = scsi_4btoul(cdb->list_identifier);
2269 list->flags = data->flags;
2270 list->params = ctsio->kern_data_ptr;
2271 list->range = &data->desc[0];
2272 list->nrange = scsi_2btoul(data->range_descriptor_length) /
2273 sizeof(struct scsi_range_desc);
2274 list->offset_into_rod = scsi_8btou64(data->offset_into_rod);
2275 list->ctsio = ctsio;
2277 mtx_lock(&lun->lun_lock);
2278 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
2279 if (tlist != NULL && !tlist->completed) {
2280 mtx_unlock(&lun->lun_lock);
2282 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2283 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2287 if (tlist != NULL) {
2288 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
2291 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
2292 mtx_unlock(&lun->lun_lock);
2294 /* Block device zero ROD token -> no token. */
2295 if (scsi_4btoul(data->rod_token) == ROD_TYPE_BLOCK_ZERO) {
2297 return (CTL_RETVAL_COMPLETE);
2300 mtx_lock(&softc->tpc_lock);
2301 TAILQ_FOREACH(token, &softc->tpc_tokens, links) {
2302 if (memcmp(token->token, data->rod_token,
2303 sizeof(data->rod_token)) == 0)
2306 if (token != NULL) {
2308 list->token = token;
2309 if (data->flags & EC_WUT_DEL_TKN)
2312 mtx_unlock(&softc->tpc_lock);
2313 if (token == NULL) {
2314 mtx_lock(&lun->lun_lock);
2315 TAILQ_REMOVE(&lun->tpc_lists, list, links);
2316 mtx_unlock(&lun->lun_lock);
2318 ctl_set_sense(ctsio, /*current_error*/ 1,
2319 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
2320 /*asc*/ 0x23, /*ascq*/ 0x04, SSD_ELEM_NONE);
2325 return (CTL_RETVAL_COMPLETE);
2328 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
2329 free(ctsio->kern_data_ptr, M_CTL);
2330 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
2332 ctl_done((union ctl_io *)ctsio);
2333 return (CTL_RETVAL_COMPLETE);
2337 ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
2339 struct ctl_lun *lun = CTL_LUN(ctsio);
2340 struct scsi_receive_rod_token_information *cdb;
2341 struct scsi_receive_copy_status_lid4_data *data;
2342 struct tpc_list *list;
2343 struct tpc_list list_copy;
2346 int alloc_len, total_len, token_len;
2349 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
2351 cdb = (struct scsi_receive_rod_token_information *)ctsio->cdb;
2352 retval = CTL_RETVAL_COMPLETE;
2354 list_id = scsi_4btoul(cdb->list_identifier);
2355 mtx_lock(&lun->lun_lock);
2356 list = tpc_find_list(lun, list_id,
2357 ctl_get_initindex(&ctsio->io_hdr.nexus));
2359 mtx_unlock(&lun->lun_lock);
2360 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2361 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
2363 ctl_done((union ctl_io *)ctsio);
2367 if (list->completed) {
2368 TAILQ_REMOVE(&lun->tpc_lists, list, links);
2371 mtx_unlock(&lun->lun_lock);
2373 token_len = list_copy.res_token_valid ? 2 + sizeof(list_copy.res_token) : 0;
2374 total_len = sizeof(*data) + list_copy.sense_len + 4 + token_len;
2375 alloc_len = scsi_4btoul(cdb->length);
2377 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
2378 ctsio->kern_sg_entries = 0;
2379 ctsio->kern_rel_offset = 0;
2380 ctsio->kern_data_len = min(total_len, alloc_len);
2381 ctsio->kern_total_len = ctsio->kern_data_len;
2383 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
2384 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len +
2385 4 + token_len, data->available_data);
2386 data->response_to_service_action = list_copy.service_action;
2387 if (list_copy.completed) {
2388 if (list_copy.error)
2389 data->copy_command_status = RCS_CCS_ERROR;
2390 else if (list_copy.abort)
2391 data->copy_command_status = RCS_CCS_ABORTED;
2393 data->copy_command_status = RCS_CCS_COMPLETED;
2395 data->copy_command_status = RCS_CCS_INPROG_FG;
2396 scsi_ulto2b(list_copy.curops, data->operation_counter);
2397 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
2398 data->transfer_count_units = RCS_TC_LBAS;
2399 scsi_u64to8b(list_copy.cursectors, data->transfer_count);
2400 scsi_ulto2b(list_copy.curseg, data->segments_processed);
2401 data->length_of_the_sense_data_field = list_copy.sense_len;
2402 data->sense_data_length = list_copy.sense_len;
2403 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
2405 ptr = &data->sense_data[data->length_of_the_sense_data_field];
2406 scsi_ulto4b(token_len, &ptr[0]);
2407 if (list_copy.res_token_valid) {
2408 scsi_ulto2b(0, &ptr[4]);
2409 memcpy(&ptr[6], list_copy.res_token, sizeof(list_copy.res_token));
2412 printf("RRTI(list=%u) valid=%d\n",
2413 scsi_4btoul(cdb->list_identifier), list_copy.res_token_valid);
2415 ctl_set_success(ctsio);
2416 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2417 ctsio->be_move_done = ctl_config_move_done;
2418 ctl_datamove((union ctl_io *)ctsio);
2423 ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio)
2425 struct ctl_softc *softc = CTL_SOFTC(ctsio);
2426 struct scsi_report_all_rod_tokens *cdb;
2427 struct scsi_report_all_rod_tokens_data *data;
2428 struct tpc_token *token;
2430 int alloc_len, total_len, tokens, i;
2432 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
2434 cdb = (struct scsi_report_all_rod_tokens *)ctsio->cdb;
2435 retval = CTL_RETVAL_COMPLETE;
2438 mtx_lock(&softc->tpc_lock);
2439 TAILQ_FOREACH(token, &softc->tpc_tokens, links)
2441 mtx_unlock(&softc->tpc_lock);
2445 total_len = sizeof(*data) + tokens * 96;
2446 alloc_len = scsi_4btoul(cdb->length);
2448 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
2449 ctsio->kern_sg_entries = 0;
2450 ctsio->kern_rel_offset = 0;
2451 ctsio->kern_data_len = min(total_len, alloc_len);
2452 ctsio->kern_total_len = ctsio->kern_data_len;
2454 data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr;
2456 mtx_lock(&softc->tpc_lock);
2457 TAILQ_FOREACH(token, &softc->tpc_tokens, links) {
2460 memcpy(&data->rod_management_token_list[i * 96],
2464 mtx_unlock(&softc->tpc_lock);
2465 scsi_ulto4b(sizeof(*data) - 4 + i * 96, data->available_data);
2467 printf("RART tokens=%d\n", i);
2469 ctl_set_success(ctsio);
2470 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2471 ctsio->be_move_done = ctl_config_move_done;
2472 ctl_datamove((union ctl_io *)ctsio);