2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2014-2021 Alexander Motin <mav@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/types.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/condvar.h>
40 #include <sys/malloc.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
46 #include <machine/atomic.h>
49 #include <cam/scsi/scsi_all.h>
50 #include <cam/scsi/scsi_da.h>
51 #include <cam/ctl/ctl_io.h>
52 #include <cam/ctl/ctl.h>
53 #include <cam/ctl/ctl_frontend.h>
54 #include <cam/ctl/ctl_util.h>
55 #include <cam/ctl/ctl_backend.h>
56 #include <cam/ctl/ctl_ioctl.h>
57 #include <cam/ctl/ctl_ha.h>
58 #include <cam/ctl/ctl_private.h>
59 #include <cam/ctl/ctl_debug.h>
60 #include <cam/ctl/ctl_scsi_all.h>
61 #include <cam/ctl/ctl_tpc.h>
62 #include <cam/ctl/ctl_error.h>
64 #define TPC_MAX_CSCDS 64
65 #define TPC_MAX_SEGS 64
67 #define TPC_MAX_LIST 8192
68 #define TPC_MAX_INLINE 0
69 #define TPC_MAX_LISTS 255
70 #define TPC_MAX_IO_SIZE (8 * MIN(1024 * 1024, MAX(128 * 1024, maxphys)))
71 #define TPC_MAX_IOCHUNK_SIZE (TPC_MAX_IO_SIZE * 4)
72 #define TPC_MIN_TOKEN_TIMEOUT 1
73 #define TPC_DFL_TOKEN_TIMEOUT 60
74 #define TPC_MAX_TOKEN_TIMEOUT 600
76 MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC");
79 TPC_ERR_RETRY = 0x000,
82 TPC_ERR_NO_DECREMENT = 0x100
86 TAILQ_HEAD(runl, tpc_io);
93 struct tpc_list *list;
95 TAILQ_ENTRY(tpc_io) rlinks;
96 TAILQ_ENTRY(tpc_io) links;
104 struct scsi_range_desc *range;
109 TAILQ_ENTRY(tpc_token) links;
113 uint8_t service_action;
119 struct scsi_ec_cscd *cscd;
120 struct scsi_ec_segment *seg[TPC_MAX_SEGS];
125 struct tpc_token *token;
126 struct scsi_range_desc *range;
128 off_t offset_into_rod;
142 TAILQ_HEAD(, tpc_io) allio;
143 struct scsi_sense_data fwd_sense_data;
144 uint8_t fwd_sense_len;
145 uint8_t fwd_scsi_status;
148 struct scsi_sense_data sense_data;
151 struct ctl_scsiio *ctsio;
154 uint8_t res_token[512];
155 TAILQ_ENTRY(tpc_list) links;
159 tpc_timeout(void *arg)
161 struct ctl_softc *softc = arg;
163 struct tpc_token *token, *ttoken;
164 struct tpc_list *list, *tlist;
166 /* Free completed lists with expired timeout. */
167 STAILQ_FOREACH(lun, &softc->lun_list, links) {
168 mtx_lock(&lun->lun_lock);
169 TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) {
170 if (!list->completed || time_uptime < list->last_active +
171 TPC_DFL_TOKEN_TIMEOUT)
173 TAILQ_REMOVE(&lun->tpc_lists, list, links);
176 mtx_unlock(&lun->lun_lock);
179 /* Free inactive ROD tokens with expired timeout. */
180 mtx_lock(&softc->tpc_lock);
181 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
183 time_uptime < token->last_active + token->timeout + 1)
185 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
186 free(token->params, M_CTL);
189 mtx_unlock(&softc->tpc_lock);
190 callout_schedule_sbt(&softc->tpc_timeout, SBT_1S, SBT_1S, 0);
194 ctl_tpc_init(struct ctl_softc *softc)
197 mtx_init(&softc->tpc_lock, "CTL TPC mutex", NULL, MTX_DEF);
198 TAILQ_INIT(&softc->tpc_tokens);
199 callout_init_mtx(&softc->tpc_timeout, &softc->ctl_lock, 0);
200 callout_reset_sbt(&softc->tpc_timeout, SBT_1S, SBT_1S,
201 tpc_timeout, softc, 0);
205 ctl_tpc_shutdown(struct ctl_softc *softc)
207 struct tpc_token *token;
209 callout_drain(&softc->tpc_timeout);
211 /* Free ROD tokens. */
212 mtx_lock(&softc->tpc_lock);
213 while ((token = TAILQ_FIRST(&softc->tpc_tokens)) != NULL) {
214 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
215 free(token->params, M_CTL);
218 mtx_unlock(&softc->tpc_lock);
219 mtx_destroy(&softc->tpc_lock);
223 ctl_tpc_lun_init(struct ctl_lun *lun)
226 TAILQ_INIT(&lun->tpc_lists);
230 ctl_tpc_lun_clear(struct ctl_lun *lun, uint32_t initidx)
232 struct tpc_list *list, *tlist;
234 TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) {
235 if (initidx != -1 && list->init_idx != initidx)
237 if (!list->completed)
239 TAILQ_REMOVE(&lun->tpc_lists, list, links);
245 ctl_tpc_lun_shutdown(struct ctl_lun *lun)
247 struct ctl_softc *softc = lun->ctl_softc;
248 struct tpc_list *list;
249 struct tpc_token *token, *ttoken;
251 /* Free lists for this LUN. */
252 while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) {
253 TAILQ_REMOVE(&lun->tpc_lists, list, links);
254 KASSERT(list->completed,
255 ("Not completed TPC (%p) on shutdown", list));
259 /* Free ROD tokens for this LUN. */
260 mtx_lock(&softc->tpc_lock);
261 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
262 if (token->lun != lun->lun || token->active)
264 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
265 free(token->params, M_CTL);
268 mtx_unlock(&softc->tpc_lock);
272 ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
274 struct ctl_lun *lun = CTL_LUN(ctsio);
275 struct scsi_vpd_tpc *tpc_ptr;
276 struct scsi_vpd_tpc_descriptor *d_ptr;
277 struct scsi_vpd_tpc_descriptor_bdrl *bdrl_ptr;
278 struct scsi_vpd_tpc_descriptor_sc *sc_ptr;
279 struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr;
280 struct scsi_vpd_tpc_descriptor_pd *pd_ptr;
281 struct scsi_vpd_tpc_descriptor_sd *sd_ptr;
282 struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr;
283 struct scsi_vpd_tpc_descriptor_rtf *rtf_ptr;
284 struct scsi_vpd_tpc_descriptor_rtf_block *rtfb_ptr;
285 struct scsi_vpd_tpc_descriptor_srt *srt_ptr;
286 struct scsi_vpd_tpc_descriptor_srtd *srtd_ptr;
287 struct scsi_vpd_tpc_descriptor_gco *gco_ptr;
290 data_len = sizeof(struct scsi_vpd_tpc) +
291 sizeof(struct scsi_vpd_tpc_descriptor_bdrl) +
292 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) +
293 2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 11, 4) +
294 sizeof(struct scsi_vpd_tpc_descriptor_pd) +
295 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) +
296 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) +
297 sizeof(struct scsi_vpd_tpc_descriptor_rtf) +
298 sizeof(struct scsi_vpd_tpc_descriptor_rtf_block) +
299 sizeof(struct scsi_vpd_tpc_descriptor_srt) +
300 2*sizeof(struct scsi_vpd_tpc_descriptor_srtd) +
301 sizeof(struct scsi_vpd_tpc_descriptor_gco);
303 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
304 tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr;
305 ctsio->kern_rel_offset = 0;
306 ctsio->kern_sg_entries = 0;
307 ctsio->kern_data_len = min(data_len, alloc_len);
308 ctsio->kern_total_len = ctsio->kern_data_len;
311 * The control device is always connected. The disk device, on the
312 * other hand, may not be online all the time.
315 tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
316 lun->be_lun->lun_type;
318 tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
319 tpc_ptr->page_code = SVPD_SCSI_TPC;
320 scsi_ulto2b(data_len - 4, tpc_ptr->page_length);
322 /* Block Device ROD Limits */
323 d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0];
324 bdrl_ptr = (struct scsi_vpd_tpc_descriptor_bdrl *)d_ptr;
325 scsi_ulto2b(SVPD_TPC_BDRL, bdrl_ptr->desc_type);
326 scsi_ulto2b(sizeof(*bdrl_ptr) - 4, bdrl_ptr->desc_length);
327 scsi_ulto2b(TPC_MAX_SEGS, bdrl_ptr->maximum_ranges);
328 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
329 bdrl_ptr->maximum_inactivity_timeout);
330 scsi_ulto4b(TPC_DFL_TOKEN_TIMEOUT,
331 bdrl_ptr->default_inactivity_timeout);
332 scsi_u64to8b(0, bdrl_ptr->maximum_token_transfer_size);
333 scsi_u64to8b(0, bdrl_ptr->optimal_transfer_count);
335 /* Supported commands */
336 d_ptr = (struct scsi_vpd_tpc_descriptor *)
337 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
338 sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr;
339 scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type);
340 sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 11;
341 scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length);
342 scd_ptr = &sc_ptr->descr[0];
343 scd_ptr->opcode = EXTENDED_COPY;
344 scd_ptr->sa_length = 5;
345 scd_ptr->supported_service_actions[0] = EC_EC_LID1;
346 scd_ptr->supported_service_actions[1] = EC_EC_LID4;
347 scd_ptr->supported_service_actions[2] = EC_PT;
348 scd_ptr->supported_service_actions[3] = EC_WUT;
349 scd_ptr->supported_service_actions[4] = EC_COA;
350 scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *)
351 &scd_ptr->supported_service_actions[scd_ptr->sa_length];
352 scd_ptr->opcode = RECEIVE_COPY_STATUS;
353 scd_ptr->sa_length = 6;
354 scd_ptr->supported_service_actions[0] = RCS_RCS_LID1;
355 scd_ptr->supported_service_actions[1] = RCS_RCFD;
356 scd_ptr->supported_service_actions[2] = RCS_RCS_LID4;
357 scd_ptr->supported_service_actions[3] = RCS_RCOP;
358 scd_ptr->supported_service_actions[4] = RCS_RRTI;
359 scd_ptr->supported_service_actions[5] = RCS_RART;
361 /* Parameter data. */
362 d_ptr = (struct scsi_vpd_tpc_descriptor *)
363 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
364 pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr;
365 scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type);
366 scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length);
367 scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count);
368 scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count);
369 scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length);
370 scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length);
372 /* Supported Descriptors */
373 d_ptr = (struct scsi_vpd_tpc_descriptor *)
374 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
375 sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr;
376 scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type);
377 scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length);
378 sd_ptr->list_length = 4;
379 sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B;
380 sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY;
381 sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY;
382 sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID;
384 /* Supported CSCD Descriptor IDs */
385 d_ptr = (struct scsi_vpd_tpc_descriptor *)
386 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
387 sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr;
388 scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type);
389 scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length);
390 scsi_ulto2b(2, sdid_ptr->list_length);
391 scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]);
393 /* ROD Token Features */
394 d_ptr = (struct scsi_vpd_tpc_descriptor *)
395 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
396 rtf_ptr = (struct scsi_vpd_tpc_descriptor_rtf *)d_ptr;
397 scsi_ulto2b(SVPD_TPC_RTF, rtf_ptr->desc_type);
398 scsi_ulto2b(sizeof(*rtf_ptr) - 4 + sizeof(*rtfb_ptr), rtf_ptr->desc_length);
399 rtf_ptr->remote_tokens = 0;
400 scsi_ulto4b(TPC_MIN_TOKEN_TIMEOUT, rtf_ptr->minimum_token_lifetime);
401 scsi_ulto4b(UINT32_MAX, rtf_ptr->maximum_token_lifetime);
402 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
403 rtf_ptr->maximum_token_inactivity_timeout);
404 scsi_ulto2b(sizeof(*rtfb_ptr), rtf_ptr->type_specific_features_length);
405 rtfb_ptr = (struct scsi_vpd_tpc_descriptor_rtf_block *)
406 &rtf_ptr->type_specific_features;
407 rtfb_ptr->type_format = SVPD_TPC_RTF_BLOCK;
408 scsi_ulto2b(sizeof(*rtfb_ptr) - 4, rtfb_ptr->desc_length);
409 scsi_ulto2b(0, rtfb_ptr->optimal_length_granularity);
410 scsi_u64to8b(0, rtfb_ptr->maximum_bytes);
411 scsi_u64to8b(0, rtfb_ptr->optimal_bytes);
412 scsi_u64to8b(UINT64_MAX, rtfb_ptr->optimal_bytes_to_token_per_segment);
413 scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE,
414 rtfb_ptr->optimal_bytes_from_token_per_segment);
416 /* Supported ROD Tokens */
417 d_ptr = (struct scsi_vpd_tpc_descriptor *)
418 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
419 srt_ptr = (struct scsi_vpd_tpc_descriptor_srt *)d_ptr;
420 scsi_ulto2b(SVPD_TPC_SRT, srt_ptr->desc_type);
421 scsi_ulto2b(sizeof(*srt_ptr) - 4 + 2*sizeof(*srtd_ptr), srt_ptr->desc_length);
422 scsi_ulto2b(2*sizeof(*srtd_ptr), srt_ptr->rod_type_descriptors_length);
423 srtd_ptr = (struct scsi_vpd_tpc_descriptor_srtd *)
424 &srt_ptr->rod_type_descriptors;
425 scsi_ulto4b(ROD_TYPE_AUR, srtd_ptr->rod_type);
426 srtd_ptr->flags = SVPD_TPC_SRTD_TIN | SVPD_TPC_SRTD_TOUT;
427 scsi_ulto2b(0, srtd_ptr->preference_indicator);
429 scsi_ulto4b(ROD_TYPE_BLOCK_ZERO, srtd_ptr->rod_type);
430 srtd_ptr->flags = SVPD_TPC_SRTD_TIN;
431 scsi_ulto2b(0, srtd_ptr->preference_indicator);
433 /* General Copy Operations */
434 d_ptr = (struct scsi_vpd_tpc_descriptor *)
435 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
436 gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr;
437 scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type);
438 scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length);
439 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies);
440 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies);
441 scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length);
442 gco_ptr->data_segment_granularity = 0;
443 gco_ptr->inline_data_granularity = 0;
445 ctl_set_success(ctsio);
446 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
447 ctsio->be_move_done = ctl_config_move_done;
448 ctl_datamove((union ctl_io *)ctsio);
450 return (CTL_RETVAL_COMPLETE);
454 ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
456 struct scsi_receive_copy_operating_parameters *cdb;
457 struct scsi_receive_copy_operating_parameters_data *data;
459 int alloc_len, total_len;
461 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
463 cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb;
465 retval = CTL_RETVAL_COMPLETE;
467 total_len = sizeof(*data) + 4;
468 alloc_len = scsi_4btoul(cdb->length);
470 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
471 ctsio->kern_sg_entries = 0;
472 ctsio->kern_rel_offset = 0;
473 ctsio->kern_data_len = min(total_len, alloc_len);
474 ctsio->kern_total_len = ctsio->kern_data_len;
476 data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr;
477 scsi_ulto4b(sizeof(*data) - 4 + 4, data->length);
478 data->snlid = RCOP_SNLID;
479 scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count);
480 scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count);
481 scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length);
482 scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length);
483 scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length);
484 scsi_ulto4b(0, data->held_data_limit);
485 scsi_ulto4b(0, data->maximum_stream_device_transfer_size);
486 scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies);
487 data->maximum_concurrent_copies = TPC_MAX_LISTS;
488 data->data_segment_granularity = 0;
489 data->inline_data_granularity = 0;
490 data->held_data_granularity = 0;
491 data->implemented_descriptor_list_length = 4;
492 data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B;
493 data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY;
494 data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY;
495 data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID;
497 ctl_set_success(ctsio);
498 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
499 ctsio->be_move_done = ctl_config_move_done;
500 ctl_datamove((union ctl_io *)ctsio);
504 static struct tpc_list *
505 tpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx)
507 struct tpc_list *list;
509 mtx_assert(&lun->lun_lock, MA_OWNED);
510 TAILQ_FOREACH(list, &lun->tpc_lists, links) {
511 if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
512 EC_LIST_ID_USAGE_NONE && list->list_id == list_id &&
513 list->init_idx == init_idx)
520 ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
522 struct ctl_lun *lun = CTL_LUN(ctsio);
523 struct scsi_receive_copy_status_lid1 *cdb;
524 struct scsi_receive_copy_status_lid1_data *data;
525 struct tpc_list *list;
526 struct tpc_list list_copy;
528 int alloc_len, total_len;
531 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n"));
533 cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb;
534 retval = CTL_RETVAL_COMPLETE;
536 list_id = cdb->list_identifier;
537 mtx_lock(&lun->lun_lock);
538 list = tpc_find_list(lun, list_id,
539 ctl_get_initindex(&ctsio->io_hdr.nexus));
541 mtx_unlock(&lun->lun_lock);
542 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
543 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
545 ctl_done((union ctl_io *)ctsio);
549 if (list->completed) {
550 TAILQ_REMOVE(&lun->tpc_lists, list, links);
553 mtx_unlock(&lun->lun_lock);
555 total_len = sizeof(*data);
556 alloc_len = scsi_4btoul(cdb->length);
558 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
559 ctsio->kern_sg_entries = 0;
560 ctsio->kern_rel_offset = 0;
561 ctsio->kern_data_len = min(total_len, alloc_len);
562 ctsio->kern_total_len = ctsio->kern_data_len;
564 data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr;
565 scsi_ulto4b(sizeof(*data) - 4, data->available_data);
566 if (list_copy.completed) {
567 if (list_copy.error || list_copy.abort)
568 data->copy_command_status = RCS_CCS_ERROR;
570 data->copy_command_status = RCS_CCS_COMPLETED;
572 data->copy_command_status = RCS_CCS_INPROG;
573 scsi_ulto2b(list_copy.curseg, data->segments_processed);
574 if (list_copy.curbytes <= UINT32_MAX) {
575 data->transfer_count_units = RCS_TC_BYTES;
576 scsi_ulto4b(list_copy.curbytes, data->transfer_count);
578 data->transfer_count_units = RCS_TC_MBYTES;
579 scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count);
582 ctl_set_success(ctsio);
583 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
584 ctsio->be_move_done = ctl_config_move_done;
585 ctl_datamove((union ctl_io *)ctsio);
590 ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
592 struct ctl_lun *lun = CTL_LUN(ctsio);
593 struct scsi_receive_copy_failure_details *cdb;
594 struct scsi_receive_copy_failure_details_data *data;
595 struct tpc_list *list;
596 struct tpc_list list_copy;
598 int alloc_len, total_len;
601 CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n"));
603 cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb;
604 retval = CTL_RETVAL_COMPLETE;
606 list_id = cdb->list_identifier;
607 mtx_lock(&lun->lun_lock);
608 list = tpc_find_list(lun, list_id,
609 ctl_get_initindex(&ctsio->io_hdr.nexus));
610 if (list == NULL || !list->completed) {
611 mtx_unlock(&lun->lun_lock);
612 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
613 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
615 ctl_done((union ctl_io *)ctsio);
619 TAILQ_REMOVE(&lun->tpc_lists, list, links);
621 mtx_unlock(&lun->lun_lock);
623 total_len = sizeof(*data) + list_copy.sense_len;
624 alloc_len = scsi_4btoul(cdb->length);
626 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
627 ctsio->kern_sg_entries = 0;
628 ctsio->kern_rel_offset = 0;
629 ctsio->kern_data_len = min(total_len, alloc_len);
630 ctsio->kern_total_len = ctsio->kern_data_len;
632 data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr;
633 if (list_copy.completed && (list_copy.error || list_copy.abort)) {
634 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
635 data->available_data);
636 data->copy_command_status = RCS_CCS_ERROR;
638 scsi_ulto4b(0, data->available_data);
639 scsi_ulto2b(list_copy.sense_len, data->sense_data_length);
640 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
642 ctl_set_success(ctsio);
643 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
644 ctsio->be_move_done = ctl_config_move_done;
645 ctl_datamove((union ctl_io *)ctsio);
650 ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
652 struct ctl_lun *lun = CTL_LUN(ctsio);
653 struct scsi_receive_copy_status_lid4 *cdb;
654 struct scsi_receive_copy_status_lid4_data *data;
655 struct tpc_list *list;
656 struct tpc_list list_copy;
658 int alloc_len, total_len;
661 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n"));
663 cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb;
664 retval = CTL_RETVAL_COMPLETE;
666 list_id = scsi_4btoul(cdb->list_identifier);
667 mtx_lock(&lun->lun_lock);
668 list = tpc_find_list(lun, list_id,
669 ctl_get_initindex(&ctsio->io_hdr.nexus));
671 mtx_unlock(&lun->lun_lock);
672 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
673 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
675 ctl_done((union ctl_io *)ctsio);
679 if (list->completed) {
680 TAILQ_REMOVE(&lun->tpc_lists, list, links);
683 mtx_unlock(&lun->lun_lock);
685 total_len = sizeof(*data) + list_copy.sense_len;
686 alloc_len = scsi_4btoul(cdb->length);
688 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
689 ctsio->kern_sg_entries = 0;
690 ctsio->kern_rel_offset = 0;
691 ctsio->kern_data_len = min(total_len, alloc_len);
692 ctsio->kern_total_len = ctsio->kern_data_len;
694 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
695 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
696 data->available_data);
697 data->response_to_service_action = list_copy.service_action;
698 if (list_copy.completed) {
700 data->copy_command_status = RCS_CCS_ERROR;
701 else if (list_copy.abort)
702 data->copy_command_status = RCS_CCS_ABORTED;
704 data->copy_command_status = RCS_CCS_COMPLETED;
706 data->copy_command_status = RCS_CCS_INPROG_FG;
707 scsi_ulto2b(list_copy.curops, data->operation_counter);
708 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
709 data->transfer_count_units = RCS_TC_BYTES;
710 scsi_u64to8b(list_copy.curbytes, data->transfer_count);
711 scsi_ulto2b(list_copy.curseg, data->segments_processed);
712 data->length_of_the_sense_data_field = list_copy.sense_len;
713 data->sense_data_length = list_copy.sense_len;
714 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
716 ctl_set_success(ctsio);
717 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
718 ctsio->be_move_done = ctl_config_move_done;
719 ctl_datamove((union ctl_io *)ctsio);
724 ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
726 struct ctl_lun *lun = CTL_LUN(ctsio);
727 struct scsi_copy_operation_abort *cdb;
728 struct tpc_list *list;
732 CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n"));
734 cdb = (struct scsi_copy_operation_abort *)ctsio->cdb;
735 retval = CTL_RETVAL_COMPLETE;
737 list_id = scsi_4btoul(cdb->list_identifier);
738 mtx_lock(&lun->lun_lock);
739 list = tpc_find_list(lun, list_id,
740 ctl_get_initindex(&ctsio->io_hdr.nexus));
742 mtx_unlock(&lun->lun_lock);
743 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
744 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
746 ctl_done((union ctl_io *)ctsio);
750 mtx_unlock(&lun->lun_lock);
752 ctl_set_success(ctsio);
753 ctl_done((union ctl_io *)ctsio);
758 tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss,
759 uint32_t *pb, uint32_t *pbo)
764 *ss = list->lun->be_lun->blocksize;
766 *pb = list->lun->be_lun->blocksize <<
767 list->lun->be_lun->pblockexp;
769 *pbo = list->lun->be_lun->blocksize *
770 list->lun->be_lun->pblockoff;
771 return (list->lun->lun);
773 if (idx >= list->ncscd)
775 return (tpcl_resolve(list->lun->ctl_softc,
776 list->init_port, &list->cscd[idx], ss, pb, pbo));
780 tpc_set_io_error_sense(struct tpc_list *list)
785 uint8_t fbuf[4 + 64];
787 scsi_ulto4b(list->curseg, csi);
788 if (list->fwd_cscd <= 0x07ff) {
789 sks[0] = SSD_SKS_SEGMENT_VALID;
790 scsi_ulto2b((uint8_t *)&list->cscd[list->fwd_cscd] -
791 list->params, &sks[1]);
794 if (list->fwd_scsi_status) {
796 fbuf[2] = list->fwd_target;
797 flen = list->fwd_sense_len;
800 fbuf[2] |= SSD_FORWARDED_FSDT;
803 fbuf[3] = list->fwd_scsi_status;
804 bcopy(&list->fwd_sense_data, &fbuf[4], flen);
808 ctl_set_sense(list->ctsio, /*current_error*/ 1,
809 /*sense_key*/ SSD_KEY_COPY_ABORTED,
810 /*asc*/ 0x0d, /*ascq*/ 0x01,
811 SSD_ELEM_COMMAND, sizeof(csi), csi,
812 sks[0] ? SSD_ELEM_SKS : SSD_ELEM_SKIP, sizeof(sks), sks,
813 flen ? SSD_ELEM_DESC : SSD_ELEM_SKIP, flen, fbuf,
818 tpc_process_b2b(struct tpc_list *list)
820 struct scsi_ec_segment_b2b *seg;
821 struct scsi_ec_cscd_dtsp *sdstp, *ddstp;
822 struct tpc_io *tior, *tiow;
825 off_t srclba, dstlba, numbytes, donebytes, roundbytes;
827 uint32_t srcblock, dstblock, pb, pbo, adj;
828 uint16_t scscd, dcscd;
831 scsi_ulto4b(list->curseg, csi);
832 if (list->stage == 1) {
833 while ((tior = TAILQ_FIRST(&list->allio)) != NULL) {
834 TAILQ_REMOVE(&list->allio, tior, links);
835 ctl_free_io(tior->io);
836 free(tior->buf, M_CTL);
840 ctl_set_task_aborted(list->ctsio);
841 return (CTL_RETVAL_ERROR);
842 } else if (list->error) {
843 tpc_set_io_error_sense(list);
844 return (CTL_RETVAL_ERROR);
846 list->cursectors += list->segsectors;
847 list->curbytes += list->segbytes;
848 return (CTL_RETVAL_COMPLETE);
851 TAILQ_INIT(&list->allio);
852 seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg];
853 scscd = scsi_2btoul(seg->src_cscd);
854 dcscd = scsi_2btoul(seg->dst_cscd);
855 sl = tpc_resolve(list, scscd, &srcblock, NULL, NULL);
856 dl = tpc_resolve(list, dcscd, &dstblock, &pb, &pbo);
857 if (sl == UINT64_MAX || dl == UINT64_MAX) {
858 ctl_set_sense(list->ctsio, /*current_error*/ 1,
859 /*sense_key*/ SSD_KEY_COPY_ABORTED,
860 /*asc*/ 0x08, /*ascq*/ 0x04,
861 SSD_ELEM_COMMAND, sizeof(csi), csi,
863 return (CTL_RETVAL_ERROR);
867 sdstp = &list->cscd[scscd].dtsp;
868 if (scsi_3btoul(sdstp->block_length) != 0)
869 srcblock = scsi_3btoul(sdstp->block_length);
870 ddstp = &list->cscd[dcscd].dtsp;
871 if (scsi_3btoul(ddstp->block_length) != 0)
872 dstblock = scsi_3btoul(ddstp->block_length);
873 numlba = scsi_2btoul(seg->number_of_blocks);
874 if (seg->flags & EC_SEG_DC)
875 numbytes = (off_t)numlba * dstblock;
877 numbytes = (off_t)numlba * srcblock;
878 srclba = scsi_8btou64(seg->src_lba);
879 dstlba = scsi_8btou64(seg->dst_lba);
881 // printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n",
882 // (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba),
883 // dl, scsi_8btou64(seg->dst_lba));
886 return (CTL_RETVAL_COMPLETE);
888 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
889 ctl_set_sense(list->ctsio, /*current_error*/ 1,
890 /*sense_key*/ SSD_KEY_COPY_ABORTED,
891 /*asc*/ 0x26, /*ascq*/ 0x0A,
892 SSD_ELEM_COMMAND, sizeof(csi), csi,
894 return (CTL_RETVAL_ERROR);
897 list->segbytes = numbytes;
898 list->segsectors = numbytes / dstblock;
902 while (donebytes < numbytes) {
903 roundbytes = numbytes - donebytes;
904 if (roundbytes > TPC_MAX_IO_SIZE) {
905 roundbytes = TPC_MAX_IO_SIZE;
906 roundbytes -= roundbytes % dstblock;
908 adj = (dstlba * dstblock + roundbytes - pbo) % pb;
909 if (roundbytes > adj)
914 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
915 TAILQ_INIT(&tior->run);
916 tior->buf = malloc(roundbytes, M_CTL, M_WAITOK);
918 TAILQ_INSERT_TAIL(&list->allio, tior, links);
919 tior->io = tpcl_alloc_io();
920 ctl_scsi_read_write(tior->io,
921 /*data_ptr*/ tior->buf,
922 /*data_len*/ roundbytes,
925 /*minimum_cdb_size*/ 0,
927 /*num_blocks*/ roundbytes / srcblock,
928 /*tag_type*/ CTL_TAG_SIMPLE,
930 tior->io->io_hdr.retries = 3;
931 tior->target = SSD_FORWARDED_SDS_EXSRC;
934 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
936 tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
937 TAILQ_INIT(&tiow->run);
939 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
940 tiow->io = tpcl_alloc_io();
941 ctl_scsi_read_write(tiow->io,
942 /*data_ptr*/ tior->buf,
943 /*data_len*/ roundbytes,
946 /*minimum_cdb_size*/ 0,
948 /*num_blocks*/ roundbytes / dstblock,
949 /*tag_type*/ CTL_TAG_SIMPLE,
951 tiow->io->io_hdr.retries = 3;
952 tiow->target = SSD_FORWARDED_SDS_EXDST;
955 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
957 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
958 TAILQ_INSERT_TAIL(&run, tior, rlinks);
960 donebytes += roundbytes;
961 srclba += roundbytes / srcblock;
962 dstlba += roundbytes / dstblock;
965 while ((tior = TAILQ_FIRST(&run)) != NULL) {
966 TAILQ_REMOVE(&run, tior, rlinks);
967 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
968 panic("tpcl_queue() error");
972 return (CTL_RETVAL_QUEUED);
976 tpc_process_verify(struct tpc_list *list)
978 struct scsi_ec_segment_verify *seg;
984 scsi_ulto4b(list->curseg, csi);
985 if (list->stage == 1) {
986 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
987 TAILQ_REMOVE(&list->allio, tio, links);
988 ctl_free_io(tio->io);
992 ctl_set_task_aborted(list->ctsio);
993 return (CTL_RETVAL_ERROR);
994 } else if (list->error) {
995 tpc_set_io_error_sense(list);
996 return (CTL_RETVAL_ERROR);
998 return (CTL_RETVAL_COMPLETE);
1001 TAILQ_INIT(&list->allio);
1002 seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg];
1003 cscd = scsi_2btoul(seg->src_cscd);
1004 sl = tpc_resolve(list, cscd, NULL, NULL, NULL);
1005 if (sl == UINT64_MAX) {
1006 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1007 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1008 /*asc*/ 0x08, /*ascq*/ 0x04,
1009 SSD_ELEM_COMMAND, sizeof(csi), csi,
1011 return (CTL_RETVAL_ERROR);
1014 // printf("Verify %ju\n", sl);
1016 if ((seg->tur & 0x01) == 0)
1017 return (CTL_RETVAL_COMPLETE);
1020 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
1021 TAILQ_INIT(&tio->run);
1023 TAILQ_INSERT_TAIL(&list->allio, tio, links);
1024 tio->io = tpcl_alloc_io();
1025 ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
1026 tio->io->io_hdr.retries = 3;
1027 tio->target = SSD_FORWARDED_SDS_EXSRC;
1030 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
1032 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
1033 panic("tpcl_queue() error");
1034 return (CTL_RETVAL_QUEUED);
1038 tpc_process_register_key(struct tpc_list *list)
1040 struct scsi_ec_segment_register_key *seg;
1047 scsi_ulto4b(list->curseg, csi);
1048 if (list->stage == 1) {
1049 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1050 TAILQ_REMOVE(&list->allio, tio, links);
1051 ctl_free_io(tio->io);
1052 free(tio->buf, M_CTL);
1056 ctl_set_task_aborted(list->ctsio);
1057 return (CTL_RETVAL_ERROR);
1058 } else if (list->error) {
1059 tpc_set_io_error_sense(list);
1060 return (CTL_RETVAL_ERROR);
1062 return (CTL_RETVAL_COMPLETE);
1065 TAILQ_INIT(&list->allio);
1066 seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg];
1067 cscd = scsi_2btoul(seg->dst_cscd);
1068 dl = tpc_resolve(list, cscd, NULL, NULL, NULL);
1069 if (dl == UINT64_MAX) {
1070 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1071 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1072 /*asc*/ 0x08, /*ascq*/ 0x04,
1073 SSD_ELEM_COMMAND, sizeof(csi), csi,
1075 return (CTL_RETVAL_ERROR);
1078 // printf("Register Key %ju\n", dl);
1081 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
1082 TAILQ_INIT(&tio->run);
1084 TAILQ_INSERT_TAIL(&list->allio, tio, links);
1085 tio->io = tpcl_alloc_io();
1086 datalen = sizeof(struct scsi_per_res_out_parms);
1087 tio->buf = malloc(datalen, M_CTL, M_WAITOK);
1088 ctl_scsi_persistent_res_out(tio->io,
1089 tio->buf, datalen, SPRO_REGISTER, -1,
1090 scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key),
1091 /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
1092 tio->io->io_hdr.retries = 3;
1093 tio->target = SSD_FORWARDED_SDS_EXDST;
1096 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
1098 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
1099 panic("tpcl_queue() error");
1100 return (CTL_RETVAL_QUEUED);
1104 tpc_ranges_length(struct scsi_range_desc *range, int nrange)
1109 for (r = 0; r < nrange; r++)
1110 length += scsi_4btoul(range[r].length);
1115 tpc_check_ranges_l(struct scsi_range_desc *range, int nrange, uint64_t maxlba,
1122 for (i = 0; i < nrange; i++) {
1123 b1 = scsi_8btou64(range[i].lba);
1124 l1 = scsi_4btoul(range[i].length);
1125 if (b1 + l1 < b1 || b1 + l1 > maxlba + 1) {
1126 *lba = MAX(b1, maxlba + 1);
1134 tpc_check_ranges_x(struct scsi_range_desc *range, int nrange)
1140 for (i = 0; i < nrange - 1; i++) {
1141 b1 = scsi_8btou64(range[i].lba);
1142 l1 = scsi_4btoul(range[i].length);
1143 for (j = i + 1; j < nrange; j++) {
1144 b2 = scsi_8btou64(range[j].lba);
1145 l2 = scsi_4btoul(range[j].length);
1146 if (b1 + l1 > b2 && b2 + l2 > b1)
1154 tpc_skip_ranges(struct scsi_range_desc *range, int nrange, off_t skip,
1155 int *srange, off_t *soffset)
1162 while (r < nrange) {
1163 if (skip - off < scsi_4btoul(range[r].length)) {
1165 *soffset = skip - off;
1168 off += scsi_4btoul(range[r].length);
1175 tpc_process_wut(struct tpc_list *list)
1177 struct tpc_io *tio, *tior, *tiow;
1180 off_t doffset, soffset;
1181 off_t srclba, dstlba, numbytes, donebytes, roundbytes;
1182 uint32_t srcblock, dstblock, pb, pbo, adj;
1184 if (list->stage > 0) {
1185 /* Cleanup after previous rounds. */
1186 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1187 TAILQ_REMOVE(&list->allio, tio, links);
1188 ctl_free_io(tio->io);
1189 free(tio->buf, M_CTL);
1193 ctl_set_task_aborted(list->ctsio);
1194 return (CTL_RETVAL_ERROR);
1195 } else if (list->error) {
1196 if (list->fwd_scsi_status) {
1197 list->ctsio->io_hdr.status =
1198 CTL_SCSI_ERROR | CTL_AUTOSENSE;
1199 list->ctsio->scsi_status = list->fwd_scsi_status;
1200 list->ctsio->sense_data = list->fwd_sense_data;
1201 list->ctsio->sense_len = list->fwd_sense_len;
1203 ctl_set_invalid_field(list->ctsio,
1204 /*sks_valid*/ 0, /*command*/ 0,
1205 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
1207 return (CTL_RETVAL_ERROR);
1209 list->cursectors += list->segsectors;
1210 list->curbytes += list->segbytes;
1213 /* Check where we are on destination ranges list. */
1214 if (tpc_skip_ranges(list->range, list->nrange, list->cursectors,
1215 &drange, &doffset) != 0)
1216 return (CTL_RETVAL_COMPLETE);
1217 dstblock = list->lun->be_lun->blocksize;
1218 pb = dstblock << list->lun->be_lun->pblockexp;
1219 if (list->lun->be_lun->pblockoff > 0)
1220 pbo = pb - dstblock * list->lun->be_lun->pblockoff;
1224 /* Check where we are on source ranges list. */
1225 srcblock = list->token->blocksize;
1226 if (tpc_skip_ranges(list->token->range, list->token->nrange,
1227 list->offset_into_rod + list->cursectors * dstblock / srcblock,
1228 &srange, &soffset) != 0) {
1229 ctl_set_invalid_field(list->ctsio, /*sks_valid*/ 0,
1230 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
1231 return (CTL_RETVAL_ERROR);
1234 srclba = scsi_8btou64(list->token->range[srange].lba) + soffset;
1235 dstlba = scsi_8btou64(list->range[drange].lba) + doffset;
1236 numbytes = srcblock *
1237 (scsi_4btoul(list->token->range[srange].length) - soffset);
1238 numbytes = omin(numbytes, dstblock *
1239 (scsi_4btoul(list->range[drange].length) - doffset));
1240 if (numbytes > TPC_MAX_IOCHUNK_SIZE) {
1241 numbytes = TPC_MAX_IOCHUNK_SIZE;
1242 numbytes -= numbytes % dstblock;
1243 if (pb > dstblock) {
1244 adj = (dstlba * dstblock + numbytes - pbo) % pb;
1250 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
1251 ctl_set_invalid_field(list->ctsio, /*sks_valid*/ 0,
1252 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
1253 return (CTL_RETVAL_ERROR);
1256 list->segbytes = numbytes;
1257 list->segsectors = numbytes / dstblock;
1258 //printf("Copy chunk of %ju sectors from %ju to %ju\n", list->segsectors,
1263 TAILQ_INIT(&list->allio);
1264 while (donebytes < numbytes) {
1265 roundbytes = numbytes - donebytes;
1266 if (roundbytes > TPC_MAX_IO_SIZE) {
1267 roundbytes = TPC_MAX_IO_SIZE;
1268 roundbytes -= roundbytes % dstblock;
1269 if (pb > dstblock) {
1270 adj = (dstlba * dstblock + roundbytes - pbo) % pb;
1271 if (roundbytes > adj)
1276 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
1277 TAILQ_INIT(&tior->run);
1278 tior->buf = malloc(roundbytes, M_CTL, M_WAITOK);
1280 TAILQ_INSERT_TAIL(&list->allio, tior, links);
1281 tior->io = tpcl_alloc_io();
1282 ctl_scsi_read_write(tior->io,
1283 /*data_ptr*/ tior->buf,
1284 /*data_len*/ roundbytes,
1287 /*minimum_cdb_size*/ 0,
1289 /*num_blocks*/ roundbytes / srcblock,
1290 /*tag_type*/ CTL_TAG_SIMPLE,
1292 tior->io->io_hdr.retries = 3;
1293 tior->lun = list->token->lun;
1294 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
1296 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
1297 TAILQ_INIT(&tiow->run);
1299 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
1300 tiow->io = tpcl_alloc_io();
1301 ctl_scsi_read_write(tiow->io,
1302 /*data_ptr*/ tior->buf,
1303 /*data_len*/ roundbytes,
1306 /*minimum_cdb_size*/ 0,
1308 /*num_blocks*/ roundbytes / dstblock,
1309 /*tag_type*/ CTL_TAG_SIMPLE,
1311 tiow->io->io_hdr.retries = 3;
1312 tiow->lun = list->lun->lun;
1313 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
1315 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
1316 TAILQ_INSERT_TAIL(&run, tior, rlinks);
1318 donebytes += roundbytes;
1319 srclba += roundbytes / srcblock;
1320 dstlba += roundbytes / dstblock;
1323 while ((tior = TAILQ_FIRST(&run)) != NULL) {
1324 TAILQ_REMOVE(&run, tior, rlinks);
1325 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1326 panic("tpcl_queue() error");
1330 return (CTL_RETVAL_QUEUED);
1334 tpc_process_zero_wut(struct tpc_list *list)
1336 struct tpc_io *tio, *tiow;
1337 struct runl run, *prun;
1339 uint32_t dstblock, len;
1341 if (list->stage > 0) {
1343 /* Cleanup after previous rounds. */
1344 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1345 TAILQ_REMOVE(&list->allio, tio, links);
1346 ctl_free_io(tio->io);
1350 ctl_set_task_aborted(list->ctsio);
1351 return (CTL_RETVAL_ERROR);
1352 } else if (list->error) {
1353 if (list->fwd_scsi_status) {
1354 list->ctsio->io_hdr.status =
1355 CTL_SCSI_ERROR | CTL_AUTOSENSE;
1356 list->ctsio->scsi_status = list->fwd_scsi_status;
1357 list->ctsio->sense_data = list->fwd_sense_data;
1358 list->ctsio->sense_len = list->fwd_sense_len;
1360 ctl_set_invalid_field(list->ctsio,
1361 /*sks_valid*/ 0, /*command*/ 0,
1362 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
1364 return (CTL_RETVAL_ERROR);
1366 list->cursectors += list->segsectors;
1367 list->curbytes += list->segbytes;
1368 return (CTL_RETVAL_COMPLETE);
1371 dstblock = list->lun->be_lun->blocksize;
1375 TAILQ_INIT(&list->allio);
1376 list->segsectors = 0;
1377 for (r = 0; r < list->nrange; r++) {
1378 len = scsi_4btoul(list->range[r].length);
1382 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
1383 TAILQ_INIT(&tiow->run);
1385 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
1386 tiow->io = tpcl_alloc_io();
1387 ctl_scsi_write_same(tiow->io,
1391 /*lba*/ scsi_8btou64(list->range[r].lba),
1393 /*tag_type*/ CTL_TAG_SIMPLE,
1395 tiow->io->io_hdr.retries = 3;
1396 tiow->lun = list->lun->lun;
1397 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
1399 TAILQ_INSERT_TAIL(prun, tiow, rlinks);
1401 list->segsectors += len;
1403 list->segbytes = list->segsectors * dstblock;
1405 if (TAILQ_EMPTY(&run))
1408 while ((tiow = TAILQ_FIRST(&run)) != NULL) {
1409 TAILQ_REMOVE(&run, tiow, rlinks);
1410 if (tpcl_queue(tiow->io, tiow->lun) != CTL_RETVAL_COMPLETE)
1411 panic("tpcl_queue() error");
1415 return (CTL_RETVAL_QUEUED);
1419 tpc_process(struct tpc_list *list)
1421 struct ctl_lun *lun = list->lun;
1422 struct ctl_softc *softc = lun->ctl_softc;
1423 struct scsi_ec_segment *seg;
1424 struct ctl_scsiio *ctsio = list->ctsio;
1425 int retval = CTL_RETVAL_COMPLETE;
1428 if (list->service_action == EC_WUT) {
1429 if (list->token != NULL)
1430 retval = tpc_process_wut(list);
1432 retval = tpc_process_zero_wut(list);
1433 if (retval == CTL_RETVAL_QUEUED)
1435 if (retval == CTL_RETVAL_ERROR) {
1440 //printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg);
1441 while (list->curseg < list->nseg) {
1442 seg = list->seg[list->curseg];
1443 switch (seg->type_code) {
1445 retval = tpc_process_b2b(list);
1448 retval = tpc_process_verify(list);
1450 case EC_SEG_REGISTER_KEY:
1451 retval = tpc_process_register_key(list);
1454 scsi_ulto4b(list->curseg, csi);
1455 ctl_set_sense(ctsio, /*current_error*/ 1,
1456 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1457 /*asc*/ 0x26, /*ascq*/ 0x09,
1458 SSD_ELEM_COMMAND, sizeof(csi), csi,
1462 if (retval == CTL_RETVAL_QUEUED)
1464 if (retval == CTL_RETVAL_ERROR) {
1473 ctl_set_success(ctsio);
1476 //printf("ZZZ done\n");
1477 free(list->params, M_CTL);
1478 list->params = NULL;
1480 mtx_lock(&softc->tpc_lock);
1481 if (--list->token->active == 0)
1482 list->token->last_active = time_uptime;
1483 mtx_unlock(&softc->tpc_lock);
1486 mtx_lock(&lun->lun_lock);
1487 if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) {
1488 TAILQ_REMOVE(&lun->tpc_lists, list, links);
1491 list->completed = 1;
1492 list->last_active = time_uptime;
1493 list->sense_data = ctsio->sense_data;
1494 list->sense_len = ctsio->sense_len;
1495 list->scsi_status = ctsio->scsi_status;
1497 mtx_unlock(&lun->lun_lock);
1499 ctl_done((union ctl_io *)ctsio);
1503 * For any sort of check condition, busy, etc., we just retry. We do not
1504 * decrement the retry count for unit attention type errors. These are
1505 * normal, and we want to save the retry count for "real" errors. Otherwise,
1506 * we could end up with situations where a command will succeed in some
1507 * situations and fail in others, depending on whether a unit attention is
1508 * pending. Also, some of our error recovery actions, most notably the
1509 * LUN reset action, will cause a unit attention.
1511 * We can add more detail here later if necessary.
1513 static tpc_error_action
1514 tpc_checkcond_parse(union ctl_io *io)
1516 tpc_error_action error_action;
1517 int error_code, sense_key, asc, ascq;
1520 * Default to retrying the command.
1522 error_action = TPC_ERR_RETRY;
1524 scsi_extract_sense_len(&io->scsiio.sense_data,
1525 io->scsiio.sense_len,
1532 switch (error_code) {
1533 case SSD_DEFERRED_ERROR:
1534 case SSD_DESC_DEFERRED_ERROR:
1535 error_action |= TPC_ERR_NO_DECREMENT;
1537 case SSD_CURRENT_ERROR:
1538 case SSD_DESC_CURRENT_ERROR:
1540 switch (sense_key) {
1541 case SSD_KEY_UNIT_ATTENTION:
1542 error_action |= TPC_ERR_NO_DECREMENT;
1544 case SSD_KEY_HARDWARE_ERROR:
1546 * This is our generic "something bad happened"
1547 * error code. It often isn't recoverable.
1549 if ((asc == 0x44) && (ascq == 0x00))
1550 error_action = TPC_ERR_FAIL;
1552 case SSD_KEY_NOT_READY:
1554 * If the LUN is powered down, there likely isn't
1555 * much point in retrying right now.
1557 if ((asc == 0x04) && (ascq == 0x02))
1558 error_action = TPC_ERR_FAIL;
1560 * If the LUN is offline, there probably isn't much
1561 * point in retrying, either.
1563 if ((asc == 0x04) && (ascq == 0x03))
1564 error_action = TPC_ERR_FAIL;
1568 return (error_action);
1571 static tpc_error_action
1572 tpc_error_parse(union ctl_io *io)
1574 tpc_error_action error_action = TPC_ERR_RETRY;
1576 switch (io->io_hdr.io_type) {
1578 switch (io->io_hdr.status & CTL_STATUS_MASK) {
1579 case CTL_SCSI_ERROR:
1580 switch (io->scsiio.scsi_status) {
1581 case SCSI_STATUS_CHECK_COND:
1582 error_action = tpc_checkcond_parse(io);
1595 panic("%s: invalid ctl_io type %d\n", __func__,
1596 io->io_hdr.io_type);
1599 return (error_action);
1603 tpc_done(union ctl_io *io)
1605 struct tpc_io *tio, *tior;
1608 * Very minimal retry logic. We basically retry if we got an error
1609 * back, and the retry count is greater than 0. If we ever want
1610 * more sophisticated initiator type behavior, the CAM error
1611 * recovery code in ../common might be helpful.
1613 tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1614 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1615 && (io->io_hdr.retries > 0)) {
1616 ctl_io_status old_status;
1617 tpc_error_action error_action;
1619 error_action = tpc_error_parse(io);
1620 switch (error_action & TPC_ERR_MASK) {
1625 if ((error_action & TPC_ERR_NO_DECREMENT) == 0)
1626 io->io_hdr.retries--;
1627 old_status = io->io_hdr.status;
1628 io->io_hdr.status = CTL_STATUS_NONE;
1629 io->io_hdr.flags &= ~CTL_FLAG_ABORT;
1630 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
1631 if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) {
1632 printf("%s: error returned from tpcl_queue()!\n",
1634 io->io_hdr.status = old_status;
1640 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
1641 tio->list->error = 1;
1642 if (io->io_hdr.io_type == CTL_IO_SCSI &&
1643 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR) {
1644 tio->list->fwd_scsi_status = io->scsiio.scsi_status;
1645 tio->list->fwd_sense_data = io->scsiio.sense_data;
1646 tio->list->fwd_sense_len = io->scsiio.sense_len;
1647 tio->list->fwd_target = tio->target;
1648 tio->list->fwd_cscd = tio->cscd;
1651 atomic_add_int(&tio->list->curops, 1);
1652 if (!tio->list->error && !tio->list->abort) {
1653 while ((tior = TAILQ_FIRST(&tio->run)) != NULL) {
1654 TAILQ_REMOVE(&tio->run, tior, rlinks);
1655 atomic_add_int(&tio->list->tbdio, 1);
1656 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1657 panic("tpcl_queue() error");
1660 if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1)
1661 tpc_process(tio->list);
1665 ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
1667 struct ctl_lun *lun = CTL_LUN(ctsio);
1668 struct scsi_extended_copy *cdb;
1669 struct scsi_extended_copy_lid1_data *data;
1670 struct scsi_ec_cscd *cscd;
1671 struct scsi_ec_segment *seg;
1672 struct tpc_list *list, *tlist;
1675 int len, off, lencscd, lenseg, leninl, nseg;
1677 CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n"));
1679 cdb = (struct scsi_extended_copy *)ctsio->cdb;
1680 len = scsi_4btoul(cdb->length);
1683 ctl_set_success(ctsio);
1686 if (len < sizeof(struct scsi_extended_copy_lid1_data) ||
1687 len > sizeof(struct scsi_extended_copy_lid1_data) +
1688 TPC_MAX_LIST + TPC_MAX_INLINE) {
1689 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1690 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1695 * If we've got a kernel request that hasn't been malloced yet,
1696 * malloc it and tell the caller the data buffer is here.
1698 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1699 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1700 ctsio->kern_data_len = len;
1701 ctsio->kern_total_len = len;
1702 ctsio->kern_rel_offset = 0;
1703 ctsio->kern_sg_entries = 0;
1704 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1705 ctsio->be_move_done = ctl_config_move_done;
1706 ctl_datamove((union ctl_io *)ctsio);
1708 return (CTL_RETVAL_COMPLETE);
1711 data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr;
1712 lencscd = scsi_2btoul(data->cscd_list_length);
1713 lenseg = scsi_4btoul(data->segment_list_length);
1714 leninl = scsi_4btoul(data->inline_data_length);
1715 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1716 ctl_set_sense(ctsio, /*current_error*/ 1,
1717 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1718 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1721 if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) {
1722 ctl_set_sense(ctsio, /*current_error*/ 1,
1723 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1724 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1727 if (lencscd + lenseg > TPC_MAX_LIST ||
1728 leninl > TPC_MAX_INLINE ||
1729 len < sizeof(struct scsi_extended_copy_lid1_data) +
1730 lencscd + lenseg + leninl) {
1731 ctl_set_param_len_error(ctsio);
1735 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1736 list->service_action = cdb->service_action;
1737 value = dnvlist_get_string(lun->be_lun->options, "insecure_tpc", NULL);
1738 if (value != NULL && strcmp(value, "on") == 0)
1739 list->init_port = -1;
1741 list->init_port = ctsio->io_hdr.nexus.targ_port;
1742 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
1743 list->list_id = data->list_identifier;
1744 list->flags = data->flags;
1745 list->params = ctsio->kern_data_ptr;
1746 list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1747 ptr = &data->data[0];
1748 for (off = 0; off < lencscd; off += sizeof(struct scsi_ec_cscd)) {
1749 cscd = (struct scsi_ec_cscd *)(ptr + off);
1750 if (cscd->type_code != EC_CSCD_ID) {
1752 ctl_set_sense(ctsio, /*current_error*/ 1,
1753 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1754 /*asc*/ 0x26, /*ascq*/ 0x07, SSD_ELEM_NONE);
1758 ptr = &data->data[lencscd];
1759 for (nseg = 0, off = 0; off < lenseg; nseg++) {
1760 if (nseg >= TPC_MAX_SEGS) {
1762 ctl_set_sense(ctsio, /*current_error*/ 1,
1763 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1764 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1767 seg = (struct scsi_ec_segment *)(ptr + off);
1768 if (seg->type_code != EC_SEG_B2B &&
1769 seg->type_code != EC_SEG_VERIFY &&
1770 seg->type_code != EC_SEG_REGISTER_KEY) {
1772 ctl_set_sense(ctsio, /*current_error*/ 1,
1773 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1774 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
1777 list->seg[nseg] = seg;
1778 off += sizeof(struct scsi_ec_segment) +
1779 scsi_2btoul(seg->descr_length);
1781 list->inl = &data->data[lencscd + lenseg];
1782 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1784 list->leninl = leninl;
1785 list->ctsio = ctsio;
1787 mtx_lock(&lun->lun_lock);
1788 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1789 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1790 if (tlist != NULL && !tlist->completed) {
1791 mtx_unlock(&lun->lun_lock);
1793 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1794 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1798 if (tlist != NULL) {
1799 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1803 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1804 mtx_unlock(&lun->lun_lock);
1807 return (CTL_RETVAL_COMPLETE);
1810 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
1811 free(ctsio->kern_data_ptr, M_CTL);
1812 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
1814 ctl_done((union ctl_io *)ctsio);
1815 return (CTL_RETVAL_COMPLETE);
1819 ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
1821 struct ctl_lun *lun = CTL_LUN(ctsio);
1822 struct scsi_extended_copy *cdb;
1823 struct scsi_extended_copy_lid4_data *data;
1824 struct scsi_ec_cscd *cscd;
1825 struct scsi_ec_segment *seg;
1826 struct tpc_list *list, *tlist;
1829 int len, off, lencscd, lenseg, leninl, nseg;
1831 CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n"));
1833 cdb = (struct scsi_extended_copy *)ctsio->cdb;
1834 len = scsi_4btoul(cdb->length);
1837 ctl_set_success(ctsio);
1840 if (len < sizeof(struct scsi_extended_copy_lid4_data) ||
1841 len > sizeof(struct scsi_extended_copy_lid4_data) +
1842 TPC_MAX_LIST + TPC_MAX_INLINE) {
1843 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1844 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1849 * If we've got a kernel request that hasn't been malloced yet,
1850 * malloc it and tell the caller the data buffer is here.
1852 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1853 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1854 ctsio->kern_data_len = len;
1855 ctsio->kern_total_len = len;
1856 ctsio->kern_rel_offset = 0;
1857 ctsio->kern_sg_entries = 0;
1858 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1859 ctsio->be_move_done = ctl_config_move_done;
1860 ctl_datamove((union ctl_io *)ctsio);
1862 return (CTL_RETVAL_COMPLETE);
1865 data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr;
1866 lencscd = scsi_2btoul(data->cscd_list_length);
1867 lenseg = scsi_2btoul(data->segment_list_length);
1868 leninl = scsi_2btoul(data->inline_data_length);
1869 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1870 ctl_set_sense(ctsio, /*current_error*/ 1,
1871 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1872 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1875 if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) {
1876 ctl_set_sense(ctsio, /*current_error*/ 1,
1877 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1878 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1881 if (lencscd + lenseg > TPC_MAX_LIST ||
1882 leninl > TPC_MAX_INLINE ||
1883 len < sizeof(struct scsi_extended_copy_lid1_data) +
1884 lencscd + lenseg + leninl) {
1885 ctl_set_param_len_error(ctsio);
1889 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1890 list->service_action = cdb->service_action;
1891 value = dnvlist_get_string(lun->be_lun->options, "insecure_tpc", NULL);
1892 if (value != NULL && strcmp(value, "on") == 0)
1893 list->init_port = -1;
1895 list->init_port = ctsio->io_hdr.nexus.targ_port;
1896 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
1897 list->list_id = scsi_4btoul(data->list_identifier);
1898 list->flags = data->flags;
1899 list->params = ctsio->kern_data_ptr;
1900 list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1901 ptr = &data->data[0];
1902 for (off = 0; off < lencscd; off += sizeof(struct scsi_ec_cscd)) {
1903 cscd = (struct scsi_ec_cscd *)(ptr + off);
1904 if (cscd->type_code != EC_CSCD_ID) {
1906 ctl_set_sense(ctsio, /*current_error*/ 1,
1907 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1908 /*asc*/ 0x26, /*ascq*/ 0x07, SSD_ELEM_NONE);
1912 ptr = &data->data[lencscd];
1913 for (nseg = 0, off = 0; off < lenseg; nseg++) {
1914 if (nseg >= TPC_MAX_SEGS) {
1916 ctl_set_sense(ctsio, /*current_error*/ 1,
1917 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1918 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1921 seg = (struct scsi_ec_segment *)(ptr + off);
1922 if (seg->type_code != EC_SEG_B2B &&
1923 seg->type_code != EC_SEG_VERIFY &&
1924 seg->type_code != EC_SEG_REGISTER_KEY) {
1926 ctl_set_sense(ctsio, /*current_error*/ 1,
1927 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1928 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
1931 list->seg[nseg] = seg;
1932 off += sizeof(struct scsi_ec_segment) +
1933 scsi_2btoul(seg->descr_length);
1935 list->inl = &data->data[lencscd + lenseg];
1936 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1938 list->leninl = leninl;
1939 list->ctsio = ctsio;
1941 mtx_lock(&lun->lun_lock);
1942 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1943 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1944 if (tlist != NULL && !tlist->completed) {
1945 mtx_unlock(&lun->lun_lock);
1947 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1948 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1952 if (tlist != NULL) {
1953 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1957 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1958 mtx_unlock(&lun->lun_lock);
1961 return (CTL_RETVAL_COMPLETE);
1964 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
1965 free(ctsio->kern_data_ptr, M_CTL);
1966 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
1968 ctl_done((union ctl_io *)ctsio);
1969 return (CTL_RETVAL_COMPLETE);
1973 tpc_create_token(struct ctl_lun *lun, struct ctl_port *port, off_t len,
1974 struct scsi_token *token)
1977 struct scsi_vpd_id_descriptor *idd = NULL;
1978 struct scsi_ec_cscd_id *cscd;
1979 struct scsi_read_capacity_data_long *dtsd;
1982 scsi_ulto4b(ROD_TYPE_AUR, token->type);
1983 scsi_ulto2b(0x01f8, token->length);
1984 scsi_u64to8b(atomic_fetchadd_int(&id, 1), &token->body[0]);
1986 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
1987 lun->lun_devid->data, lun->lun_devid->len,
1988 scsi_devid_is_lun_naa);
1989 if (idd == NULL && lun->lun_devid)
1990 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
1991 lun->lun_devid->data, lun->lun_devid->len,
1992 scsi_devid_is_lun_eui64);
1994 cscd = (struct scsi_ec_cscd_id *)&token->body[8];
1995 cscd->type_code = EC_CSCD_ID;
1996 cscd->luidt_pdt = T_DIRECT;
1997 memcpy(&cscd->codeset, idd, 4 + idd->length);
1998 scsi_ulto3b(lun->be_lun->blocksize, cscd->dtsp.block_length);
2000 scsi_u64to8b(0, &token->body[40]); /* XXX: Should be 128bit value. */
2001 scsi_u64to8b(len, &token->body[48]);
2003 /* ROD token device type specific data (RC16 without first field) */
2004 dtsd = (struct scsi_read_capacity_data_long *)&token->body[88 - 8];
2005 scsi_ulto4b(lun->be_lun->blocksize, dtsd->length);
2006 dtsd->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE;
2007 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, dtsd->lalba_lbp);
2008 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
2009 dtsd->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ;
2011 if (port->target_devid) {
2012 targid_len = port->target_devid->len;
2013 memcpy(&token->body[120], port->target_devid->data, targid_len);
2016 arc4rand(&token->body[120 + targid_len], 384 - targid_len, 0);
2020 ctl_populate_token(struct ctl_scsiio *ctsio)
2022 struct ctl_softc *softc = CTL_SOFTC(ctsio);
2023 struct ctl_port *port = CTL_PORT(ctsio);
2024 struct ctl_lun *lun = CTL_LUN(ctsio);
2025 struct scsi_populate_token *cdb;
2026 struct scsi_populate_token_data *data;
2027 struct tpc_list *list, *tlist;
2028 struct tpc_token *token;
2030 int len, lendata, lendesc;
2032 CTL_DEBUG_PRINT(("ctl_populate_token\n"));
2034 cdb = (struct scsi_populate_token *)ctsio->cdb;
2035 len = scsi_4btoul(cdb->length);
2037 if (len < sizeof(struct scsi_populate_token_data) ||
2038 len > sizeof(struct scsi_populate_token_data) +
2039 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
2040 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
2041 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
2046 * If we've got a kernel request that hasn't been malloced yet,
2047 * malloc it and tell the caller the data buffer is here.
2049 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
2050 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
2051 ctsio->kern_data_len = len;
2052 ctsio->kern_total_len = len;
2053 ctsio->kern_rel_offset = 0;
2054 ctsio->kern_sg_entries = 0;
2055 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2056 ctsio->be_move_done = ctl_config_move_done;
2057 ctl_datamove((union ctl_io *)ctsio);
2059 return (CTL_RETVAL_COMPLETE);
2062 data = (struct scsi_populate_token_data *)ctsio->kern_data_ptr;
2063 lendata = scsi_2btoul(data->length);
2064 if (lendata < sizeof(struct scsi_populate_token_data) - 2 +
2065 sizeof(struct scsi_range_desc)) {
2066 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2067 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
2070 lendesc = scsi_2btoul(data->range_descriptor_length);
2071 if (lendesc < sizeof(struct scsi_range_desc) ||
2072 len < sizeof(struct scsi_populate_token_data) + lendesc ||
2073 lendata < sizeof(struct scsi_populate_token_data) - 2 + lendesc) {
2074 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2075 /*field*/ 14, /*bit_valid*/ 0, /*bit*/ 0);
2079 printf("PT(list=%u) flags=%x to=%d rt=%x len=%x\n",
2080 scsi_4btoul(cdb->list_identifier),
2081 data->flags, scsi_4btoul(data->inactivity_timeout),
2082 scsi_4btoul(data->rod_type),
2083 scsi_2btoul(data->range_descriptor_length));
2086 /* Validate INACTIVITY TIMEOUT field */
2087 if (scsi_4btoul(data->inactivity_timeout) > TPC_MAX_TOKEN_TIMEOUT) {
2088 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2089 /*command*/ 0, /*field*/ 4, /*bit_valid*/ 0,
2094 /* Validate ROD TYPE field */
2095 if ((data->flags & EC_PT_RTV) &&
2096 scsi_4btoul(data->rod_type) != ROD_TYPE_AUR) {
2097 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2098 /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0);
2102 /* Validate list of ranges */
2103 if (tpc_check_ranges_l(&data->desc[0],
2104 scsi_2btoul(data->range_descriptor_length) /
2105 sizeof(struct scsi_range_desc),
2106 lun->be_lun->maxlba, &lba) != 0) {
2107 ctl_set_lba_out_of_range(ctsio, lba);
2110 if (tpc_check_ranges_x(&data->desc[0],
2111 scsi_2btoul(data->range_descriptor_length) /
2112 sizeof(struct scsi_range_desc)) != 0) {
2113 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0,
2114 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2119 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
2120 list->service_action = cdb->service_action;
2121 list->init_port = ctsio->io_hdr.nexus.targ_port;
2122 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
2123 list->list_id = scsi_4btoul(cdb->list_identifier);
2124 list->flags = data->flags;
2125 list->ctsio = ctsio;
2127 mtx_lock(&lun->lun_lock);
2128 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
2129 if (tlist != NULL && !tlist->completed) {
2130 mtx_unlock(&lun->lun_lock);
2132 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2133 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2137 if (tlist != NULL) {
2138 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
2141 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
2142 mtx_unlock(&lun->lun_lock);
2144 token = malloc(sizeof(*token), M_CTL, M_WAITOK | M_ZERO);
2145 token->lun = lun->lun;
2146 token->blocksize = lun->be_lun->blocksize;
2147 token->params = ctsio->kern_data_ptr;
2148 token->range = &data->desc[0];
2149 token->nrange = scsi_2btoul(data->range_descriptor_length) /
2150 sizeof(struct scsi_range_desc);
2151 list->cursectors = tpc_ranges_length(token->range, token->nrange);
2152 list->curbytes = (off_t)list->cursectors * lun->be_lun->blocksize;
2153 tpc_create_token(lun, port, list->curbytes,
2154 (struct scsi_token *)token->token);
2156 token->last_active = time_uptime;
2157 token->timeout = scsi_4btoul(data->inactivity_timeout);
2158 if (token->timeout == 0)
2159 token->timeout = TPC_DFL_TOKEN_TIMEOUT;
2160 else if (token->timeout < TPC_MIN_TOKEN_TIMEOUT)
2161 token->timeout = TPC_MIN_TOKEN_TIMEOUT;
2162 memcpy(list->res_token, token->token, sizeof(list->res_token));
2163 list->res_token_valid = 1;
2165 list->completed = 1;
2166 list->last_active = time_uptime;
2167 mtx_lock(&softc->tpc_lock);
2168 TAILQ_INSERT_TAIL(&softc->tpc_tokens, token, links);
2169 mtx_unlock(&softc->tpc_lock);
2170 ctl_set_success(ctsio);
2171 ctl_done((union ctl_io *)ctsio);
2172 return (CTL_RETVAL_COMPLETE);
2175 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
2176 free(ctsio->kern_data_ptr, M_CTL);
2177 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
2179 ctl_done((union ctl_io *)ctsio);
2180 return (CTL_RETVAL_COMPLETE);
2184 ctl_write_using_token(struct ctl_scsiio *ctsio)
2186 struct ctl_softc *softc = CTL_SOFTC(ctsio);
2187 struct ctl_lun *lun = CTL_LUN(ctsio);
2188 struct scsi_write_using_token *cdb;
2189 struct scsi_write_using_token_data *data;
2190 struct tpc_list *list, *tlist;
2191 struct tpc_token *token;
2193 int len, lendata, lendesc;
2195 CTL_DEBUG_PRINT(("ctl_write_using_token\n"));
2197 cdb = (struct scsi_write_using_token *)ctsio->cdb;
2198 len = scsi_4btoul(cdb->length);
2200 if (len < sizeof(struct scsi_write_using_token_data) ||
2201 len > sizeof(struct scsi_write_using_token_data) +
2202 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
2203 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
2204 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
2209 * If we've got a kernel request that hasn't been malloced yet,
2210 * malloc it and tell the caller the data buffer is here.
2212 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
2213 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
2214 ctsio->kern_data_len = len;
2215 ctsio->kern_total_len = len;
2216 ctsio->kern_rel_offset = 0;
2217 ctsio->kern_sg_entries = 0;
2218 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2219 ctsio->be_move_done = ctl_config_move_done;
2220 ctl_datamove((union ctl_io *)ctsio);
2222 return (CTL_RETVAL_COMPLETE);
2225 data = (struct scsi_write_using_token_data *)ctsio->kern_data_ptr;
2226 lendata = scsi_2btoul(data->length);
2227 if (lendata < sizeof(struct scsi_write_using_token_data) - 2 +
2228 sizeof(struct scsi_range_desc)) {
2229 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2230 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
2233 lendesc = scsi_2btoul(data->range_descriptor_length);
2234 if (lendesc < sizeof(struct scsi_range_desc) ||
2235 len < sizeof(struct scsi_write_using_token_data) + lendesc ||
2236 lendata < sizeof(struct scsi_write_using_token_data) - 2 + lendesc) {
2237 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2238 /*field*/ 534, /*bit_valid*/ 0, /*bit*/ 0);
2242 printf("WUT(list=%u) flags=%x off=%ju len=%x\n",
2243 scsi_4btoul(cdb->list_identifier),
2244 data->flags, scsi_8btou64(data->offset_into_rod),
2245 scsi_2btoul(data->range_descriptor_length));
2248 /* Validate list of ranges */
2249 if (tpc_check_ranges_l(&data->desc[0],
2250 scsi_2btoul(data->range_descriptor_length) /
2251 sizeof(struct scsi_range_desc),
2252 lun->be_lun->maxlba, &lba) != 0) {
2253 ctl_set_lba_out_of_range(ctsio, lba);
2256 if (tpc_check_ranges_x(&data->desc[0],
2257 scsi_2btoul(data->range_descriptor_length) /
2258 sizeof(struct scsi_range_desc)) != 0) {
2259 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0,
2260 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2265 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
2266 list->service_action = cdb->service_action;
2267 list->init_port = ctsio->io_hdr.nexus.targ_port;
2268 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
2269 list->list_id = scsi_4btoul(cdb->list_identifier);
2270 list->flags = data->flags;
2271 list->params = ctsio->kern_data_ptr;
2272 list->range = &data->desc[0];
2273 list->nrange = scsi_2btoul(data->range_descriptor_length) /
2274 sizeof(struct scsi_range_desc);
2275 list->offset_into_rod = scsi_8btou64(data->offset_into_rod);
2276 list->ctsio = ctsio;
2278 mtx_lock(&lun->lun_lock);
2279 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
2280 if (tlist != NULL && !tlist->completed) {
2281 mtx_unlock(&lun->lun_lock);
2283 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2284 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2288 if (tlist != NULL) {
2289 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
2292 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
2293 mtx_unlock(&lun->lun_lock);
2295 /* Block device zero ROD token -> no token. */
2296 if (scsi_4btoul(data->rod_token) == ROD_TYPE_BLOCK_ZERO) {
2298 return (CTL_RETVAL_COMPLETE);
2301 mtx_lock(&softc->tpc_lock);
2302 TAILQ_FOREACH(token, &softc->tpc_tokens, links) {
2303 if (memcmp(token->token, data->rod_token,
2304 sizeof(data->rod_token)) == 0)
2307 if (token != NULL) {
2309 list->token = token;
2310 if (data->flags & EC_WUT_DEL_TKN)
2313 mtx_unlock(&softc->tpc_lock);
2314 if (token == NULL) {
2315 mtx_lock(&lun->lun_lock);
2316 TAILQ_REMOVE(&lun->tpc_lists, list, links);
2317 mtx_unlock(&lun->lun_lock);
2319 ctl_set_sense(ctsio, /*current_error*/ 1,
2320 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
2321 /*asc*/ 0x23, /*ascq*/ 0x04, SSD_ELEM_NONE);
2326 return (CTL_RETVAL_COMPLETE);
2329 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
2330 free(ctsio->kern_data_ptr, M_CTL);
2331 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
2333 ctl_done((union ctl_io *)ctsio);
2334 return (CTL_RETVAL_COMPLETE);
2338 ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
2340 struct ctl_lun *lun = CTL_LUN(ctsio);
2341 struct scsi_receive_rod_token_information *cdb;
2342 struct scsi_receive_copy_status_lid4_data *data;
2343 struct tpc_list *list;
2344 struct tpc_list list_copy;
2347 int alloc_len, total_len, token_len;
2350 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
2352 cdb = (struct scsi_receive_rod_token_information *)ctsio->cdb;
2353 retval = CTL_RETVAL_COMPLETE;
2355 list_id = scsi_4btoul(cdb->list_identifier);
2356 mtx_lock(&lun->lun_lock);
2357 list = tpc_find_list(lun, list_id,
2358 ctl_get_initindex(&ctsio->io_hdr.nexus));
2360 mtx_unlock(&lun->lun_lock);
2361 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2362 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
2364 ctl_done((union ctl_io *)ctsio);
2368 if (list->completed) {
2369 TAILQ_REMOVE(&lun->tpc_lists, list, links);
2372 mtx_unlock(&lun->lun_lock);
2374 token_len = list_copy.res_token_valid ? 2 + sizeof(list_copy.res_token) : 0;
2375 total_len = sizeof(*data) + list_copy.sense_len + 4 + token_len;
2376 alloc_len = scsi_4btoul(cdb->length);
2378 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
2379 ctsio->kern_sg_entries = 0;
2380 ctsio->kern_rel_offset = 0;
2381 ctsio->kern_data_len = min(total_len, alloc_len);
2382 ctsio->kern_total_len = ctsio->kern_data_len;
2384 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
2385 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len +
2386 4 + token_len, data->available_data);
2387 data->response_to_service_action = list_copy.service_action;
2388 if (list_copy.completed) {
2389 if (list_copy.error)
2390 data->copy_command_status = RCS_CCS_ERROR;
2391 else if (list_copy.abort)
2392 data->copy_command_status = RCS_CCS_ABORTED;
2394 data->copy_command_status = RCS_CCS_COMPLETED;
2396 data->copy_command_status = RCS_CCS_INPROG_FG;
2397 scsi_ulto2b(list_copy.curops, data->operation_counter);
2398 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
2399 data->transfer_count_units = RCS_TC_LBAS;
2400 scsi_u64to8b(list_copy.cursectors, data->transfer_count);
2401 scsi_ulto2b(list_copy.curseg, data->segments_processed);
2402 data->length_of_the_sense_data_field = list_copy.sense_len;
2403 data->sense_data_length = list_copy.sense_len;
2404 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
2406 ptr = &data->sense_data[data->length_of_the_sense_data_field];
2407 scsi_ulto4b(token_len, &ptr[0]);
2408 if (list_copy.res_token_valid) {
2409 scsi_ulto2b(0, &ptr[4]);
2410 memcpy(&ptr[6], list_copy.res_token, sizeof(list_copy.res_token));
2413 printf("RRTI(list=%u) valid=%d\n",
2414 scsi_4btoul(cdb->list_identifier), list_copy.res_token_valid);
2416 ctl_set_success(ctsio);
2417 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2418 ctsio->be_move_done = ctl_config_move_done;
2419 ctl_datamove((union ctl_io *)ctsio);
2424 ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio)
2426 struct ctl_softc *softc = CTL_SOFTC(ctsio);
2427 struct scsi_report_all_rod_tokens *cdb;
2428 struct scsi_report_all_rod_tokens_data *data;
2429 struct tpc_token *token;
2431 int alloc_len, total_len, tokens, i;
2433 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
2435 cdb = (struct scsi_report_all_rod_tokens *)ctsio->cdb;
2436 retval = CTL_RETVAL_COMPLETE;
2439 mtx_lock(&softc->tpc_lock);
2440 TAILQ_FOREACH(token, &softc->tpc_tokens, links)
2442 mtx_unlock(&softc->tpc_lock);
2446 total_len = sizeof(*data) + tokens * 96;
2447 alloc_len = scsi_4btoul(cdb->length);
2449 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
2450 ctsio->kern_sg_entries = 0;
2451 ctsio->kern_rel_offset = 0;
2452 ctsio->kern_data_len = min(total_len, alloc_len);
2453 ctsio->kern_total_len = ctsio->kern_data_len;
2455 data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr;
2457 mtx_lock(&softc->tpc_lock);
2458 TAILQ_FOREACH(token, &softc->tpc_tokens, links) {
2461 memcpy(&data->rod_management_token_list[i * 96],
2465 mtx_unlock(&softc->tpc_lock);
2466 scsi_ulto4b(sizeof(*data) - 4 + i * 96, data->available_data);
2468 printf("RART tokens=%d\n", i);
2470 ctl_set_success(ctsio);
2471 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2472 ctsio->be_move_done = ctl_config_move_done;
2473 ctl_datamove((union ctl_io *)ctsio);