2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2014-2021 Alexander Motin <mav@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/types.h>
35 #include <sys/module.h>
36 #include <sys/mutex.h>
37 #include <sys/condvar.h>
38 #include <sys/malloc.h>
40 #include <sys/queue.h>
41 #include <sys/sysctl.h>
44 #include <machine/atomic.h>
47 #include <cam/scsi/scsi_all.h>
48 #include <cam/scsi/scsi_da.h>
49 #include <cam/ctl/ctl_io.h>
50 #include <cam/ctl/ctl.h>
51 #include <cam/ctl/ctl_frontend.h>
52 #include <cam/ctl/ctl_util.h>
53 #include <cam/ctl/ctl_backend.h>
54 #include <cam/ctl/ctl_ioctl.h>
55 #include <cam/ctl/ctl_ha.h>
56 #include <cam/ctl/ctl_private.h>
57 #include <cam/ctl/ctl_debug.h>
58 #include <cam/ctl/ctl_scsi_all.h>
59 #include <cam/ctl/ctl_tpc.h>
60 #include <cam/ctl/ctl_error.h>
62 #define TPC_MAX_CSCDS 64
63 #define TPC_MAX_SEGS 64
65 #define TPC_MAX_LIST 8192
66 #define TPC_MAX_INLINE 0
67 #define TPC_MAX_LISTS 255
68 #define TPC_MAX_IO_SIZE (8 * MIN(1024 * 1024, MAX(128 * 1024, maxphys)))
69 #define TPC_MAX_IOCHUNK_SIZE (TPC_MAX_IO_SIZE * 4)
70 #define TPC_MIN_TOKEN_TIMEOUT 1
71 #define TPC_DFL_TOKEN_TIMEOUT 60
72 #define TPC_MAX_TOKEN_TIMEOUT 600
74 MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC");
77 TPC_ERR_RETRY = 0x000,
80 TPC_ERR_NO_DECREMENT = 0x100
84 TAILQ_HEAD(runl, tpc_io);
91 struct tpc_list *list;
93 TAILQ_ENTRY(tpc_io) rlinks;
94 TAILQ_ENTRY(tpc_io) links;
102 struct scsi_range_desc *range;
107 TAILQ_ENTRY(tpc_token) links;
111 uint8_t service_action;
117 struct scsi_ec_cscd *cscd;
118 struct scsi_ec_segment *seg[TPC_MAX_SEGS];
123 struct tpc_token *token;
124 struct scsi_range_desc *range;
126 off_t offset_into_rod;
140 TAILQ_HEAD(, tpc_io) allio;
141 struct scsi_sense_data fwd_sense_data;
142 uint8_t fwd_sense_len;
143 uint8_t fwd_scsi_status;
146 struct scsi_sense_data sense_data;
149 struct ctl_scsiio *ctsio;
152 uint8_t res_token[512];
153 TAILQ_ENTRY(tpc_list) links;
157 tpc_timeout(void *arg)
159 struct ctl_softc *softc = arg;
161 struct tpc_token *token, *ttoken;
162 struct tpc_list *list, *tlist;
164 /* Free completed lists with expired timeout. */
165 STAILQ_FOREACH(lun, &softc->lun_list, links) {
166 mtx_lock(&lun->lun_lock);
167 TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) {
168 if (!list->completed || time_uptime < list->last_active +
169 TPC_DFL_TOKEN_TIMEOUT)
171 TAILQ_REMOVE(&lun->tpc_lists, list, links);
174 mtx_unlock(&lun->lun_lock);
177 /* Free inactive ROD tokens with expired timeout. */
178 mtx_lock(&softc->tpc_lock);
179 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
181 time_uptime < token->last_active + token->timeout + 1)
183 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
184 free(token->params, M_CTL);
187 mtx_unlock(&softc->tpc_lock);
188 callout_schedule_sbt(&softc->tpc_timeout, SBT_1S, SBT_1S, 0);
192 ctl_tpc_init(struct ctl_softc *softc)
195 mtx_init(&softc->tpc_lock, "CTL TPC mutex", NULL, MTX_DEF);
196 TAILQ_INIT(&softc->tpc_tokens);
197 callout_init_mtx(&softc->tpc_timeout, &softc->ctl_lock, 0);
198 callout_reset_sbt(&softc->tpc_timeout, SBT_1S, SBT_1S,
199 tpc_timeout, softc, 0);
203 ctl_tpc_shutdown(struct ctl_softc *softc)
205 struct tpc_token *token;
207 callout_drain(&softc->tpc_timeout);
209 /* Free ROD tokens. */
210 mtx_lock(&softc->tpc_lock);
211 while ((token = TAILQ_FIRST(&softc->tpc_tokens)) != NULL) {
212 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
213 free(token->params, M_CTL);
216 mtx_unlock(&softc->tpc_lock);
217 mtx_destroy(&softc->tpc_lock);
221 ctl_tpc_lun_init(struct ctl_lun *lun)
224 TAILQ_INIT(&lun->tpc_lists);
228 ctl_tpc_lun_clear(struct ctl_lun *lun, uint32_t initidx)
230 struct tpc_list *list, *tlist;
232 TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) {
233 if (initidx != -1 && list->init_idx != initidx)
235 if (!list->completed)
237 TAILQ_REMOVE(&lun->tpc_lists, list, links);
243 ctl_tpc_lun_shutdown(struct ctl_lun *lun)
245 struct ctl_softc *softc = lun->ctl_softc;
246 struct tpc_list *list;
247 struct tpc_token *token, *ttoken;
249 /* Free lists for this LUN. */
250 while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) {
251 TAILQ_REMOVE(&lun->tpc_lists, list, links);
252 KASSERT(list->completed,
253 ("Not completed TPC (%p) on shutdown", list));
257 /* Free ROD tokens for this LUN. */
258 mtx_lock(&softc->tpc_lock);
259 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
260 if (token->lun != lun->lun || token->active)
262 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
263 free(token->params, M_CTL);
266 mtx_unlock(&softc->tpc_lock);
270 ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
272 struct ctl_lun *lun = CTL_LUN(ctsio);
273 struct scsi_vpd_tpc *tpc_ptr;
274 struct scsi_vpd_tpc_descriptor *d_ptr;
275 struct scsi_vpd_tpc_descriptor_bdrl *bdrl_ptr;
276 struct scsi_vpd_tpc_descriptor_sc *sc_ptr;
277 struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr;
278 struct scsi_vpd_tpc_descriptor_pd *pd_ptr;
279 struct scsi_vpd_tpc_descriptor_sd *sd_ptr;
280 struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr;
281 struct scsi_vpd_tpc_descriptor_rtf *rtf_ptr;
282 struct scsi_vpd_tpc_descriptor_rtf_block *rtfb_ptr;
283 struct scsi_vpd_tpc_descriptor_srt *srt_ptr;
284 struct scsi_vpd_tpc_descriptor_srtd *srtd_ptr;
285 struct scsi_vpd_tpc_descriptor_gco *gco_ptr;
288 data_len = sizeof(struct scsi_vpd_tpc) +
289 sizeof(struct scsi_vpd_tpc_descriptor_bdrl) +
290 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) +
291 2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 11, 4) +
292 sizeof(struct scsi_vpd_tpc_descriptor_pd) +
293 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) +
294 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) +
295 sizeof(struct scsi_vpd_tpc_descriptor_rtf) +
296 sizeof(struct scsi_vpd_tpc_descriptor_rtf_block) +
297 sizeof(struct scsi_vpd_tpc_descriptor_srt) +
298 2*sizeof(struct scsi_vpd_tpc_descriptor_srtd) +
299 sizeof(struct scsi_vpd_tpc_descriptor_gco);
301 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
302 tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr;
303 ctsio->kern_rel_offset = 0;
304 ctsio->kern_sg_entries = 0;
305 ctsio->kern_data_len = min(data_len, alloc_len);
306 ctsio->kern_total_len = ctsio->kern_data_len;
309 * The control device is always connected. The disk device, on the
310 * other hand, may not be online all the time.
313 tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
314 lun->be_lun->lun_type;
316 tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
317 tpc_ptr->page_code = SVPD_SCSI_TPC;
318 scsi_ulto2b(data_len - 4, tpc_ptr->page_length);
320 /* Block Device ROD Limits */
321 d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0];
322 bdrl_ptr = (struct scsi_vpd_tpc_descriptor_bdrl *)d_ptr;
323 scsi_ulto2b(SVPD_TPC_BDRL, bdrl_ptr->desc_type);
324 scsi_ulto2b(sizeof(*bdrl_ptr) - 4, bdrl_ptr->desc_length);
325 scsi_ulto2b(TPC_MAX_SEGS, bdrl_ptr->maximum_ranges);
326 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
327 bdrl_ptr->maximum_inactivity_timeout);
328 scsi_ulto4b(TPC_DFL_TOKEN_TIMEOUT,
329 bdrl_ptr->default_inactivity_timeout);
330 scsi_u64to8b(0, bdrl_ptr->maximum_token_transfer_size);
331 scsi_u64to8b(0, bdrl_ptr->optimal_transfer_count);
333 /* Supported commands */
334 d_ptr = (struct scsi_vpd_tpc_descriptor *)
335 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
336 sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr;
337 scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type);
338 sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 11;
339 scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length);
340 scd_ptr = &sc_ptr->descr[0];
341 scd_ptr->opcode = EXTENDED_COPY;
342 scd_ptr->sa_length = 5;
343 scd_ptr->supported_service_actions[0] = EC_EC_LID1;
344 scd_ptr->supported_service_actions[1] = EC_EC_LID4;
345 scd_ptr->supported_service_actions[2] = EC_PT;
346 scd_ptr->supported_service_actions[3] = EC_WUT;
347 scd_ptr->supported_service_actions[4] = EC_COA;
348 scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *)
349 &scd_ptr->supported_service_actions[scd_ptr->sa_length];
350 scd_ptr->opcode = RECEIVE_COPY_STATUS;
351 scd_ptr->sa_length = 6;
352 scd_ptr->supported_service_actions[0] = RCS_RCS_LID1;
353 scd_ptr->supported_service_actions[1] = RCS_RCFD;
354 scd_ptr->supported_service_actions[2] = RCS_RCS_LID4;
355 scd_ptr->supported_service_actions[3] = RCS_RCOP;
356 scd_ptr->supported_service_actions[4] = RCS_RRTI;
357 scd_ptr->supported_service_actions[5] = RCS_RART;
359 /* Parameter data. */
360 d_ptr = (struct scsi_vpd_tpc_descriptor *)
361 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
362 pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr;
363 scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type);
364 scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length);
365 scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count);
366 scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count);
367 scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length);
368 scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length);
370 /* Supported Descriptors */
371 d_ptr = (struct scsi_vpd_tpc_descriptor *)
372 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
373 sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr;
374 scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type);
375 scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length);
376 sd_ptr->list_length = 4;
377 sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B;
378 sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY;
379 sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY;
380 sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID;
382 /* Supported CSCD Descriptor IDs */
383 d_ptr = (struct scsi_vpd_tpc_descriptor *)
384 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
385 sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr;
386 scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type);
387 scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length);
388 scsi_ulto2b(2, sdid_ptr->list_length);
389 scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]);
391 /* ROD Token Features */
392 d_ptr = (struct scsi_vpd_tpc_descriptor *)
393 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
394 rtf_ptr = (struct scsi_vpd_tpc_descriptor_rtf *)d_ptr;
395 scsi_ulto2b(SVPD_TPC_RTF, rtf_ptr->desc_type);
396 scsi_ulto2b(sizeof(*rtf_ptr) - 4 + sizeof(*rtfb_ptr), rtf_ptr->desc_length);
397 rtf_ptr->remote_tokens = 0;
398 scsi_ulto4b(TPC_MIN_TOKEN_TIMEOUT, rtf_ptr->minimum_token_lifetime);
399 scsi_ulto4b(UINT32_MAX, rtf_ptr->maximum_token_lifetime);
400 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
401 rtf_ptr->maximum_token_inactivity_timeout);
402 scsi_ulto2b(sizeof(*rtfb_ptr), rtf_ptr->type_specific_features_length);
403 rtfb_ptr = (struct scsi_vpd_tpc_descriptor_rtf_block *)
404 &rtf_ptr->type_specific_features;
405 rtfb_ptr->type_format = SVPD_TPC_RTF_BLOCK;
406 scsi_ulto2b(sizeof(*rtfb_ptr) - 4, rtfb_ptr->desc_length);
407 scsi_ulto2b(0, rtfb_ptr->optimal_length_granularity);
408 scsi_u64to8b(0, rtfb_ptr->maximum_bytes);
409 scsi_u64to8b(0, rtfb_ptr->optimal_bytes);
410 scsi_u64to8b(UINT64_MAX, rtfb_ptr->optimal_bytes_to_token_per_segment);
411 scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE,
412 rtfb_ptr->optimal_bytes_from_token_per_segment);
414 /* Supported ROD Tokens */
415 d_ptr = (struct scsi_vpd_tpc_descriptor *)
416 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
417 srt_ptr = (struct scsi_vpd_tpc_descriptor_srt *)d_ptr;
418 scsi_ulto2b(SVPD_TPC_SRT, srt_ptr->desc_type);
419 scsi_ulto2b(sizeof(*srt_ptr) - 4 + 2*sizeof(*srtd_ptr), srt_ptr->desc_length);
420 scsi_ulto2b(2*sizeof(*srtd_ptr), srt_ptr->rod_type_descriptors_length);
421 srtd_ptr = (struct scsi_vpd_tpc_descriptor_srtd *)
422 &srt_ptr->rod_type_descriptors;
423 scsi_ulto4b(ROD_TYPE_AUR, srtd_ptr->rod_type);
424 srtd_ptr->flags = SVPD_TPC_SRTD_TIN | SVPD_TPC_SRTD_TOUT;
425 scsi_ulto2b(0, srtd_ptr->preference_indicator);
427 scsi_ulto4b(ROD_TYPE_BLOCK_ZERO, srtd_ptr->rod_type);
428 srtd_ptr->flags = SVPD_TPC_SRTD_TIN;
429 scsi_ulto2b(0, srtd_ptr->preference_indicator);
431 /* General Copy Operations */
432 d_ptr = (struct scsi_vpd_tpc_descriptor *)
433 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
434 gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr;
435 scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type);
436 scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length);
437 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies);
438 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies);
439 scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length);
440 gco_ptr->data_segment_granularity = 0;
441 gco_ptr->inline_data_granularity = 0;
443 ctl_set_success(ctsio);
444 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
445 ctsio->be_move_done = ctl_config_move_done;
446 ctl_datamove((union ctl_io *)ctsio);
448 return (CTL_RETVAL_COMPLETE);
452 ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
454 struct scsi_receive_copy_operating_parameters *cdb;
455 struct scsi_receive_copy_operating_parameters_data *data;
457 int alloc_len, total_len;
459 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
461 cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb;
463 retval = CTL_RETVAL_COMPLETE;
465 total_len = sizeof(*data) + 4;
466 alloc_len = scsi_4btoul(cdb->length);
468 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
469 ctsio->kern_sg_entries = 0;
470 ctsio->kern_rel_offset = 0;
471 ctsio->kern_data_len = min(total_len, alloc_len);
472 ctsio->kern_total_len = ctsio->kern_data_len;
474 data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr;
475 scsi_ulto4b(sizeof(*data) - 4 + 4, data->length);
476 data->snlid = RCOP_SNLID;
477 scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count);
478 scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count);
479 scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length);
480 scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length);
481 scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length);
482 scsi_ulto4b(0, data->held_data_limit);
483 scsi_ulto4b(0, data->maximum_stream_device_transfer_size);
484 scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies);
485 data->maximum_concurrent_copies = TPC_MAX_LISTS;
486 data->data_segment_granularity = 0;
487 data->inline_data_granularity = 0;
488 data->held_data_granularity = 0;
489 data->implemented_descriptor_list_length = 4;
490 data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B;
491 data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY;
492 data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY;
493 data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID;
495 ctl_set_success(ctsio);
496 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
497 ctsio->be_move_done = ctl_config_move_done;
498 ctl_datamove((union ctl_io *)ctsio);
502 static struct tpc_list *
503 tpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx)
505 struct tpc_list *list;
507 mtx_assert(&lun->lun_lock, MA_OWNED);
508 TAILQ_FOREACH(list, &lun->tpc_lists, links) {
509 if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
510 EC_LIST_ID_USAGE_NONE && list->list_id == list_id &&
511 list->init_idx == init_idx)
518 ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
520 struct ctl_lun *lun = CTL_LUN(ctsio);
521 struct scsi_receive_copy_status_lid1 *cdb;
522 struct scsi_receive_copy_status_lid1_data *data;
523 struct tpc_list *list;
524 struct tpc_list list_copy;
526 int alloc_len, total_len;
529 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n"));
531 cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb;
532 retval = CTL_RETVAL_COMPLETE;
534 list_id = cdb->list_identifier;
535 mtx_lock(&lun->lun_lock);
536 list = tpc_find_list(lun, list_id,
537 ctl_get_initindex(&ctsio->io_hdr.nexus));
539 mtx_unlock(&lun->lun_lock);
540 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
541 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
543 ctl_done((union ctl_io *)ctsio);
547 if (list->completed) {
548 TAILQ_REMOVE(&lun->tpc_lists, list, links);
551 mtx_unlock(&lun->lun_lock);
553 total_len = sizeof(*data);
554 alloc_len = scsi_4btoul(cdb->length);
556 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
557 ctsio->kern_sg_entries = 0;
558 ctsio->kern_rel_offset = 0;
559 ctsio->kern_data_len = min(total_len, alloc_len);
560 ctsio->kern_total_len = ctsio->kern_data_len;
562 data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr;
563 scsi_ulto4b(sizeof(*data) - 4, data->available_data);
564 if (list_copy.completed) {
565 if (list_copy.error || list_copy.abort)
566 data->copy_command_status = RCS_CCS_ERROR;
568 data->copy_command_status = RCS_CCS_COMPLETED;
570 data->copy_command_status = RCS_CCS_INPROG;
571 scsi_ulto2b(list_copy.curseg, data->segments_processed);
572 if (list_copy.curbytes <= UINT32_MAX) {
573 data->transfer_count_units = RCS_TC_BYTES;
574 scsi_ulto4b(list_copy.curbytes, data->transfer_count);
576 data->transfer_count_units = RCS_TC_MBYTES;
577 scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count);
580 ctl_set_success(ctsio);
581 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
582 ctsio->be_move_done = ctl_config_move_done;
583 ctl_datamove((union ctl_io *)ctsio);
588 ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
590 struct ctl_lun *lun = CTL_LUN(ctsio);
591 struct scsi_receive_copy_failure_details *cdb;
592 struct scsi_receive_copy_failure_details_data *data;
593 struct tpc_list *list;
594 struct tpc_list list_copy;
596 int alloc_len, total_len;
599 CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n"));
601 cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb;
602 retval = CTL_RETVAL_COMPLETE;
604 list_id = cdb->list_identifier;
605 mtx_lock(&lun->lun_lock);
606 list = tpc_find_list(lun, list_id,
607 ctl_get_initindex(&ctsio->io_hdr.nexus));
608 if (list == NULL || !list->completed) {
609 mtx_unlock(&lun->lun_lock);
610 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
611 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
613 ctl_done((union ctl_io *)ctsio);
617 TAILQ_REMOVE(&lun->tpc_lists, list, links);
619 mtx_unlock(&lun->lun_lock);
621 total_len = sizeof(*data) + list_copy.sense_len;
622 alloc_len = scsi_4btoul(cdb->length);
624 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
625 ctsio->kern_sg_entries = 0;
626 ctsio->kern_rel_offset = 0;
627 ctsio->kern_data_len = min(total_len, alloc_len);
628 ctsio->kern_total_len = ctsio->kern_data_len;
630 data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr;
631 if (list_copy.completed && (list_copy.error || list_copy.abort)) {
632 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
633 data->available_data);
634 data->copy_command_status = RCS_CCS_ERROR;
636 scsi_ulto4b(0, data->available_data);
637 scsi_ulto2b(list_copy.sense_len, data->sense_data_length);
638 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
640 ctl_set_success(ctsio);
641 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
642 ctsio->be_move_done = ctl_config_move_done;
643 ctl_datamove((union ctl_io *)ctsio);
648 ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
650 struct ctl_lun *lun = CTL_LUN(ctsio);
651 struct scsi_receive_copy_status_lid4 *cdb;
652 struct scsi_receive_copy_status_lid4_data *data;
653 struct tpc_list *list;
654 struct tpc_list list_copy;
656 int alloc_len, total_len;
659 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n"));
661 cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb;
662 retval = CTL_RETVAL_COMPLETE;
664 list_id = scsi_4btoul(cdb->list_identifier);
665 mtx_lock(&lun->lun_lock);
666 list = tpc_find_list(lun, list_id,
667 ctl_get_initindex(&ctsio->io_hdr.nexus));
669 mtx_unlock(&lun->lun_lock);
670 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
671 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
673 ctl_done((union ctl_io *)ctsio);
677 if (list->completed) {
678 TAILQ_REMOVE(&lun->tpc_lists, list, links);
681 mtx_unlock(&lun->lun_lock);
683 total_len = sizeof(*data) + list_copy.sense_len;
684 alloc_len = scsi_4btoul(cdb->length);
686 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
687 ctsio->kern_sg_entries = 0;
688 ctsio->kern_rel_offset = 0;
689 ctsio->kern_data_len = min(total_len, alloc_len);
690 ctsio->kern_total_len = ctsio->kern_data_len;
692 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
693 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
694 data->available_data);
695 data->response_to_service_action = list_copy.service_action;
696 if (list_copy.completed) {
698 data->copy_command_status = RCS_CCS_ERROR;
699 else if (list_copy.abort)
700 data->copy_command_status = RCS_CCS_ABORTED;
702 data->copy_command_status = RCS_CCS_COMPLETED;
704 data->copy_command_status = RCS_CCS_INPROG_FG;
705 scsi_ulto2b(list_copy.curops, data->operation_counter);
706 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
707 data->transfer_count_units = RCS_TC_BYTES;
708 scsi_u64to8b(list_copy.curbytes, data->transfer_count);
709 scsi_ulto2b(list_copy.curseg, data->segments_processed);
710 data->length_of_the_sense_data_field = list_copy.sense_len;
711 data->sense_data_length = list_copy.sense_len;
712 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
714 ctl_set_success(ctsio);
715 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
716 ctsio->be_move_done = ctl_config_move_done;
717 ctl_datamove((union ctl_io *)ctsio);
722 ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
724 struct ctl_lun *lun = CTL_LUN(ctsio);
725 struct scsi_copy_operation_abort *cdb;
726 struct tpc_list *list;
730 CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n"));
732 cdb = (struct scsi_copy_operation_abort *)ctsio->cdb;
733 retval = CTL_RETVAL_COMPLETE;
735 list_id = scsi_4btoul(cdb->list_identifier);
736 mtx_lock(&lun->lun_lock);
737 list = tpc_find_list(lun, list_id,
738 ctl_get_initindex(&ctsio->io_hdr.nexus));
740 mtx_unlock(&lun->lun_lock);
741 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
742 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
744 ctl_done((union ctl_io *)ctsio);
748 mtx_unlock(&lun->lun_lock);
750 ctl_set_success(ctsio);
751 ctl_done((union ctl_io *)ctsio);
756 tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss,
757 uint32_t *pb, uint32_t *pbo)
762 *ss = list->lun->be_lun->blocksize;
764 *pb = list->lun->be_lun->blocksize <<
765 list->lun->be_lun->pblockexp;
767 *pbo = list->lun->be_lun->blocksize *
768 list->lun->be_lun->pblockoff;
769 return (list->lun->lun);
771 if (idx >= list->ncscd)
773 return (tpcl_resolve(list->lun->ctl_softc,
774 list->init_port, &list->cscd[idx], ss, pb, pbo));
778 tpc_set_io_error_sense(struct tpc_list *list)
783 uint8_t fbuf[4 + 64];
785 scsi_ulto4b(list->curseg, csi);
786 if (list->fwd_cscd <= 0x07ff) {
787 sks[0] = SSD_SKS_SEGMENT_VALID;
788 scsi_ulto2b((uint8_t *)&list->cscd[list->fwd_cscd] -
789 list->params, &sks[1]);
792 if (list->fwd_scsi_status) {
794 fbuf[2] = list->fwd_target;
795 flen = list->fwd_sense_len;
798 fbuf[2] |= SSD_FORWARDED_FSDT;
801 fbuf[3] = list->fwd_scsi_status;
802 bcopy(&list->fwd_sense_data, &fbuf[4], flen);
806 ctl_set_sense(list->ctsio, /*current_error*/ 1,
807 /*sense_key*/ SSD_KEY_COPY_ABORTED,
808 /*asc*/ 0x0d, /*ascq*/ 0x01,
809 SSD_ELEM_COMMAND, sizeof(csi), csi,
810 sks[0] ? SSD_ELEM_SKS : SSD_ELEM_SKIP, sizeof(sks), sks,
811 flen ? SSD_ELEM_DESC : SSD_ELEM_SKIP, flen, fbuf,
816 tpc_process_b2b(struct tpc_list *list)
818 struct scsi_ec_segment_b2b *seg;
819 struct scsi_ec_cscd_dtsp *sdstp, *ddstp;
820 struct tpc_io *tior, *tiow;
823 off_t srclba, dstlba, numbytes, donebytes, roundbytes;
825 uint32_t srcblock, dstblock, pb, pbo, adj;
826 uint16_t scscd, dcscd;
829 scsi_ulto4b(list->curseg, csi);
830 if (list->stage == 1) {
831 while ((tior = TAILQ_FIRST(&list->allio)) != NULL) {
832 TAILQ_REMOVE(&list->allio, tior, links);
833 ctl_free_io(tior->io);
834 free(tior->buf, M_CTL);
838 ctl_set_task_aborted(list->ctsio);
839 return (CTL_RETVAL_ERROR);
840 } else if (list->error) {
841 tpc_set_io_error_sense(list);
842 return (CTL_RETVAL_ERROR);
844 list->cursectors += list->segsectors;
845 list->curbytes += list->segbytes;
846 return (CTL_RETVAL_COMPLETE);
849 TAILQ_INIT(&list->allio);
850 seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg];
851 scscd = scsi_2btoul(seg->src_cscd);
852 dcscd = scsi_2btoul(seg->dst_cscd);
853 sl = tpc_resolve(list, scscd, &srcblock, NULL, NULL);
854 dl = tpc_resolve(list, dcscd, &dstblock, &pb, &pbo);
855 if (sl == UINT64_MAX || dl == UINT64_MAX) {
856 ctl_set_sense(list->ctsio, /*current_error*/ 1,
857 /*sense_key*/ SSD_KEY_COPY_ABORTED,
858 /*asc*/ 0x08, /*ascq*/ 0x04,
859 SSD_ELEM_COMMAND, sizeof(csi), csi,
861 return (CTL_RETVAL_ERROR);
865 sdstp = &list->cscd[scscd].dtsp;
866 if (scsi_3btoul(sdstp->block_length) != 0)
867 srcblock = scsi_3btoul(sdstp->block_length);
868 ddstp = &list->cscd[dcscd].dtsp;
869 if (scsi_3btoul(ddstp->block_length) != 0)
870 dstblock = scsi_3btoul(ddstp->block_length);
871 numlba = scsi_2btoul(seg->number_of_blocks);
872 if (seg->flags & EC_SEG_DC)
873 numbytes = (off_t)numlba * dstblock;
875 numbytes = (off_t)numlba * srcblock;
876 srclba = scsi_8btou64(seg->src_lba);
877 dstlba = scsi_8btou64(seg->dst_lba);
879 // printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n",
880 // (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba),
881 // dl, scsi_8btou64(seg->dst_lba));
884 return (CTL_RETVAL_COMPLETE);
886 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
887 ctl_set_sense(list->ctsio, /*current_error*/ 1,
888 /*sense_key*/ SSD_KEY_COPY_ABORTED,
889 /*asc*/ 0x26, /*ascq*/ 0x0A,
890 SSD_ELEM_COMMAND, sizeof(csi), csi,
892 return (CTL_RETVAL_ERROR);
895 list->segbytes = numbytes;
896 list->segsectors = numbytes / dstblock;
900 while (donebytes < numbytes) {
901 roundbytes = numbytes - donebytes;
902 if (roundbytes > TPC_MAX_IO_SIZE) {
903 roundbytes = TPC_MAX_IO_SIZE;
904 roundbytes -= roundbytes % dstblock;
906 adj = (dstlba * dstblock + roundbytes - pbo) % pb;
907 if (roundbytes > adj)
912 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
913 TAILQ_INIT(&tior->run);
914 tior->buf = malloc(roundbytes, M_CTL, M_WAITOK);
916 TAILQ_INSERT_TAIL(&list->allio, tior, links);
917 tior->io = tpcl_alloc_io();
918 ctl_scsi_read_write(tior->io,
919 /*data_ptr*/ tior->buf,
920 /*data_len*/ roundbytes,
923 /*minimum_cdb_size*/ 0,
925 /*num_blocks*/ roundbytes / srcblock,
926 /*tag_type*/ CTL_TAG_SIMPLE,
928 tior->io->io_hdr.retries = 3;
929 tior->target = SSD_FORWARDED_SDS_EXSRC;
932 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
934 tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
935 TAILQ_INIT(&tiow->run);
937 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
938 tiow->io = tpcl_alloc_io();
939 ctl_scsi_read_write(tiow->io,
940 /*data_ptr*/ tior->buf,
941 /*data_len*/ roundbytes,
944 /*minimum_cdb_size*/ 0,
946 /*num_blocks*/ roundbytes / dstblock,
947 /*tag_type*/ CTL_TAG_SIMPLE,
949 tiow->io->io_hdr.retries = 3;
950 tiow->target = SSD_FORWARDED_SDS_EXDST;
953 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
955 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
956 TAILQ_INSERT_TAIL(&run, tior, rlinks);
958 donebytes += roundbytes;
959 srclba += roundbytes / srcblock;
960 dstlba += roundbytes / dstblock;
963 while ((tior = TAILQ_FIRST(&run)) != NULL) {
964 TAILQ_REMOVE(&run, tior, rlinks);
965 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
966 panic("tpcl_queue() error");
970 return (CTL_RETVAL_QUEUED);
974 tpc_process_verify(struct tpc_list *list)
976 struct scsi_ec_segment_verify *seg;
982 scsi_ulto4b(list->curseg, csi);
983 if (list->stage == 1) {
984 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
985 TAILQ_REMOVE(&list->allio, tio, links);
986 ctl_free_io(tio->io);
990 ctl_set_task_aborted(list->ctsio);
991 return (CTL_RETVAL_ERROR);
992 } else if (list->error) {
993 tpc_set_io_error_sense(list);
994 return (CTL_RETVAL_ERROR);
996 return (CTL_RETVAL_COMPLETE);
999 TAILQ_INIT(&list->allio);
1000 seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg];
1001 cscd = scsi_2btoul(seg->src_cscd);
1002 sl = tpc_resolve(list, cscd, NULL, NULL, NULL);
1003 if (sl == UINT64_MAX) {
1004 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1005 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1006 /*asc*/ 0x08, /*ascq*/ 0x04,
1007 SSD_ELEM_COMMAND, sizeof(csi), csi,
1009 return (CTL_RETVAL_ERROR);
1012 // printf("Verify %ju\n", sl);
1014 if ((seg->tur & 0x01) == 0)
1015 return (CTL_RETVAL_COMPLETE);
1018 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
1019 TAILQ_INIT(&tio->run);
1021 TAILQ_INSERT_TAIL(&list->allio, tio, links);
1022 tio->io = tpcl_alloc_io();
1023 ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
1024 tio->io->io_hdr.retries = 3;
1025 tio->target = SSD_FORWARDED_SDS_EXSRC;
1028 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
1030 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
1031 panic("tpcl_queue() error");
1032 return (CTL_RETVAL_QUEUED);
1036 tpc_process_register_key(struct tpc_list *list)
1038 struct scsi_ec_segment_register_key *seg;
1045 scsi_ulto4b(list->curseg, csi);
1046 if (list->stage == 1) {
1047 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1048 TAILQ_REMOVE(&list->allio, tio, links);
1049 ctl_free_io(tio->io);
1050 free(tio->buf, M_CTL);
1054 ctl_set_task_aborted(list->ctsio);
1055 return (CTL_RETVAL_ERROR);
1056 } else if (list->error) {
1057 tpc_set_io_error_sense(list);
1058 return (CTL_RETVAL_ERROR);
1060 return (CTL_RETVAL_COMPLETE);
1063 TAILQ_INIT(&list->allio);
1064 seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg];
1065 cscd = scsi_2btoul(seg->dst_cscd);
1066 dl = tpc_resolve(list, cscd, NULL, NULL, NULL);
1067 if (dl == UINT64_MAX) {
1068 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1069 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1070 /*asc*/ 0x08, /*ascq*/ 0x04,
1071 SSD_ELEM_COMMAND, sizeof(csi), csi,
1073 return (CTL_RETVAL_ERROR);
1076 // printf("Register Key %ju\n", dl);
1079 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
1080 TAILQ_INIT(&tio->run);
1082 TAILQ_INSERT_TAIL(&list->allio, tio, links);
1083 tio->io = tpcl_alloc_io();
1084 datalen = sizeof(struct scsi_per_res_out_parms);
1085 tio->buf = malloc(datalen, M_CTL, M_WAITOK);
1086 ctl_scsi_persistent_res_out(tio->io,
1087 tio->buf, datalen, SPRO_REGISTER, -1,
1088 scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key),
1089 /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
1090 tio->io->io_hdr.retries = 3;
1091 tio->target = SSD_FORWARDED_SDS_EXDST;
1094 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
1096 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
1097 panic("tpcl_queue() error");
1098 return (CTL_RETVAL_QUEUED);
1102 tpc_ranges_length(struct scsi_range_desc *range, int nrange)
1107 for (r = 0; r < nrange; r++)
1108 length += scsi_4btoul(range[r].length);
1113 tpc_check_ranges_l(struct scsi_range_desc *range, int nrange, uint64_t maxlba,
1120 for (i = 0; i < nrange; i++) {
1121 b1 = scsi_8btou64(range[i].lba);
1122 l1 = scsi_4btoul(range[i].length);
1123 if (b1 + l1 < b1 || b1 + l1 > maxlba + 1) {
1124 *lba = MAX(b1, maxlba + 1);
1132 tpc_check_ranges_x(struct scsi_range_desc *range, int nrange)
1138 for (i = 0; i < nrange - 1; i++) {
1139 b1 = scsi_8btou64(range[i].lba);
1140 l1 = scsi_4btoul(range[i].length);
1141 for (j = i + 1; j < nrange; j++) {
1142 b2 = scsi_8btou64(range[j].lba);
1143 l2 = scsi_4btoul(range[j].length);
1144 if (b1 + l1 > b2 && b2 + l2 > b1)
1152 tpc_skip_ranges(struct scsi_range_desc *range, int nrange, off_t skip,
1153 int *srange, off_t *soffset)
1160 while (r < nrange) {
1161 if (skip - off < scsi_4btoul(range[r].length)) {
1163 *soffset = skip - off;
1166 off += scsi_4btoul(range[r].length);
1173 tpc_process_wut(struct tpc_list *list)
1175 struct tpc_io *tio, *tior, *tiow;
1178 off_t doffset, soffset;
1179 off_t srclba, dstlba, numbytes, donebytes, roundbytes;
1180 uint32_t srcblock, dstblock, pb, pbo, adj;
1182 if (list->stage > 0) {
1183 /* Cleanup after previous rounds. */
1184 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1185 TAILQ_REMOVE(&list->allio, tio, links);
1186 ctl_free_io(tio->io);
1187 free(tio->buf, M_CTL);
1191 ctl_set_task_aborted(list->ctsio);
1192 return (CTL_RETVAL_ERROR);
1193 } else if (list->error) {
1194 if (list->fwd_scsi_status) {
1195 list->ctsio->io_hdr.status =
1196 CTL_SCSI_ERROR | CTL_AUTOSENSE;
1197 list->ctsio->scsi_status = list->fwd_scsi_status;
1198 list->ctsio->sense_data = list->fwd_sense_data;
1199 list->ctsio->sense_len = list->fwd_sense_len;
1201 ctl_set_invalid_field(list->ctsio,
1202 /*sks_valid*/ 0, /*command*/ 0,
1203 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
1205 return (CTL_RETVAL_ERROR);
1207 list->cursectors += list->segsectors;
1208 list->curbytes += list->segbytes;
1211 /* Check where we are on destination ranges list. */
1212 if (tpc_skip_ranges(list->range, list->nrange, list->cursectors,
1213 &drange, &doffset) != 0)
1214 return (CTL_RETVAL_COMPLETE);
1215 dstblock = list->lun->be_lun->blocksize;
1216 pb = dstblock << list->lun->be_lun->pblockexp;
1217 if (list->lun->be_lun->pblockoff > 0)
1218 pbo = pb - dstblock * list->lun->be_lun->pblockoff;
1222 /* Check where we are on source ranges list. */
1223 srcblock = list->token->blocksize;
1224 if (tpc_skip_ranges(list->token->range, list->token->nrange,
1225 list->offset_into_rod + list->cursectors * dstblock / srcblock,
1226 &srange, &soffset) != 0) {
1227 ctl_set_invalid_field(list->ctsio, /*sks_valid*/ 0,
1228 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
1229 return (CTL_RETVAL_ERROR);
1232 srclba = scsi_8btou64(list->token->range[srange].lba) + soffset;
1233 dstlba = scsi_8btou64(list->range[drange].lba) + doffset;
1234 numbytes = srcblock *
1235 (scsi_4btoul(list->token->range[srange].length) - soffset);
1236 numbytes = omin(numbytes, dstblock *
1237 (scsi_4btoul(list->range[drange].length) - doffset));
1238 if (numbytes > TPC_MAX_IOCHUNK_SIZE) {
1239 numbytes = TPC_MAX_IOCHUNK_SIZE;
1240 numbytes -= numbytes % dstblock;
1241 if (pb > dstblock) {
1242 adj = (dstlba * dstblock + numbytes - pbo) % pb;
1248 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
1249 ctl_set_invalid_field(list->ctsio, /*sks_valid*/ 0,
1250 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
1251 return (CTL_RETVAL_ERROR);
1254 list->segbytes = numbytes;
1255 list->segsectors = numbytes / dstblock;
1256 //printf("Copy chunk of %ju sectors from %ju to %ju\n", list->segsectors,
1261 TAILQ_INIT(&list->allio);
1262 while (donebytes < numbytes) {
1263 roundbytes = numbytes - donebytes;
1264 if (roundbytes > TPC_MAX_IO_SIZE) {
1265 roundbytes = TPC_MAX_IO_SIZE;
1266 roundbytes -= roundbytes % dstblock;
1267 if (pb > dstblock) {
1268 adj = (dstlba * dstblock + roundbytes - pbo) % pb;
1269 if (roundbytes > adj)
1274 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
1275 TAILQ_INIT(&tior->run);
1276 tior->buf = malloc(roundbytes, M_CTL, M_WAITOK);
1278 TAILQ_INSERT_TAIL(&list->allio, tior, links);
1279 tior->io = tpcl_alloc_io();
1280 ctl_scsi_read_write(tior->io,
1281 /*data_ptr*/ tior->buf,
1282 /*data_len*/ roundbytes,
1285 /*minimum_cdb_size*/ 0,
1287 /*num_blocks*/ roundbytes / srcblock,
1288 /*tag_type*/ CTL_TAG_SIMPLE,
1290 tior->io->io_hdr.retries = 3;
1291 tior->lun = list->token->lun;
1292 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
1294 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
1295 TAILQ_INIT(&tiow->run);
1297 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
1298 tiow->io = tpcl_alloc_io();
1299 ctl_scsi_read_write(tiow->io,
1300 /*data_ptr*/ tior->buf,
1301 /*data_len*/ roundbytes,
1304 /*minimum_cdb_size*/ 0,
1306 /*num_blocks*/ roundbytes / dstblock,
1307 /*tag_type*/ CTL_TAG_SIMPLE,
1309 tiow->io->io_hdr.retries = 3;
1310 tiow->lun = list->lun->lun;
1311 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
1313 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
1314 TAILQ_INSERT_TAIL(&run, tior, rlinks);
1316 donebytes += roundbytes;
1317 srclba += roundbytes / srcblock;
1318 dstlba += roundbytes / dstblock;
1321 while ((tior = TAILQ_FIRST(&run)) != NULL) {
1322 TAILQ_REMOVE(&run, tior, rlinks);
1323 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1324 panic("tpcl_queue() error");
1328 return (CTL_RETVAL_QUEUED);
1332 tpc_process_zero_wut(struct tpc_list *list)
1334 struct tpc_io *tio, *tiow;
1335 struct runl run, *prun;
1337 uint32_t dstblock, len;
1339 if (list->stage > 0) {
1341 /* Cleanup after previous rounds. */
1342 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1343 TAILQ_REMOVE(&list->allio, tio, links);
1344 ctl_free_io(tio->io);
1348 ctl_set_task_aborted(list->ctsio);
1349 return (CTL_RETVAL_ERROR);
1350 } else if (list->error) {
1351 if (list->fwd_scsi_status) {
1352 list->ctsio->io_hdr.status =
1353 CTL_SCSI_ERROR | CTL_AUTOSENSE;
1354 list->ctsio->scsi_status = list->fwd_scsi_status;
1355 list->ctsio->sense_data = list->fwd_sense_data;
1356 list->ctsio->sense_len = list->fwd_sense_len;
1358 ctl_set_invalid_field(list->ctsio,
1359 /*sks_valid*/ 0, /*command*/ 0,
1360 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
1362 return (CTL_RETVAL_ERROR);
1364 list->cursectors += list->segsectors;
1365 list->curbytes += list->segbytes;
1366 return (CTL_RETVAL_COMPLETE);
1369 dstblock = list->lun->be_lun->blocksize;
1373 TAILQ_INIT(&list->allio);
1374 list->segsectors = 0;
1375 for (r = 0; r < list->nrange; r++) {
1376 len = scsi_4btoul(list->range[r].length);
1380 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
1381 TAILQ_INIT(&tiow->run);
1383 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
1384 tiow->io = tpcl_alloc_io();
1385 ctl_scsi_write_same(tiow->io,
1389 /*lba*/ scsi_8btou64(list->range[r].lba),
1391 /*tag_type*/ CTL_TAG_SIMPLE,
1393 tiow->io->io_hdr.retries = 3;
1394 tiow->lun = list->lun->lun;
1395 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
1397 TAILQ_INSERT_TAIL(prun, tiow, rlinks);
1399 list->segsectors += len;
1401 list->segbytes = list->segsectors * dstblock;
1403 if (TAILQ_EMPTY(&run))
1406 while ((tiow = TAILQ_FIRST(&run)) != NULL) {
1407 TAILQ_REMOVE(&run, tiow, rlinks);
1408 if (tpcl_queue(tiow->io, tiow->lun) != CTL_RETVAL_COMPLETE)
1409 panic("tpcl_queue() error");
1413 return (CTL_RETVAL_QUEUED);
1417 tpc_process(struct tpc_list *list)
1419 struct ctl_lun *lun = list->lun;
1420 struct ctl_softc *softc = lun->ctl_softc;
1421 struct scsi_ec_segment *seg;
1422 struct ctl_scsiio *ctsio = list->ctsio;
1423 int retval = CTL_RETVAL_COMPLETE;
1426 if (list->service_action == EC_WUT) {
1427 if (list->token != NULL)
1428 retval = tpc_process_wut(list);
1430 retval = tpc_process_zero_wut(list);
1431 if (retval == CTL_RETVAL_QUEUED)
1433 if (retval == CTL_RETVAL_ERROR) {
1438 //printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg);
1439 while (list->curseg < list->nseg) {
1440 seg = list->seg[list->curseg];
1441 switch (seg->type_code) {
1443 retval = tpc_process_b2b(list);
1446 retval = tpc_process_verify(list);
1448 case EC_SEG_REGISTER_KEY:
1449 retval = tpc_process_register_key(list);
1452 scsi_ulto4b(list->curseg, csi);
1453 ctl_set_sense(ctsio, /*current_error*/ 1,
1454 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1455 /*asc*/ 0x26, /*ascq*/ 0x09,
1456 SSD_ELEM_COMMAND, sizeof(csi), csi,
1460 if (retval == CTL_RETVAL_QUEUED)
1462 if (retval == CTL_RETVAL_ERROR) {
1471 ctl_set_success(ctsio);
1474 //printf("ZZZ done\n");
1475 free(list->params, M_CTL);
1476 list->params = NULL;
1478 mtx_lock(&softc->tpc_lock);
1479 if (--list->token->active == 0)
1480 list->token->last_active = time_uptime;
1481 mtx_unlock(&softc->tpc_lock);
1484 mtx_lock(&lun->lun_lock);
1485 if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) {
1486 TAILQ_REMOVE(&lun->tpc_lists, list, links);
1489 list->completed = 1;
1490 list->last_active = time_uptime;
1491 list->sense_data = ctsio->sense_data;
1492 list->sense_len = ctsio->sense_len;
1493 list->scsi_status = ctsio->scsi_status;
1495 mtx_unlock(&lun->lun_lock);
1497 ctl_done((union ctl_io *)ctsio);
1501 * For any sort of check condition, busy, etc., we just retry. We do not
1502 * decrement the retry count for unit attention type errors. These are
1503 * normal, and we want to save the retry count for "real" errors. Otherwise,
1504 * we could end up with situations where a command will succeed in some
1505 * situations and fail in others, depending on whether a unit attention is
1506 * pending. Also, some of our error recovery actions, most notably the
1507 * LUN reset action, will cause a unit attention.
1509 * We can add more detail here later if necessary.
1511 static tpc_error_action
1512 tpc_checkcond_parse(union ctl_io *io)
1514 tpc_error_action error_action;
1515 int error_code, sense_key, asc, ascq;
1518 * Default to retrying the command.
1520 error_action = TPC_ERR_RETRY;
1522 scsi_extract_sense_len(&io->scsiio.sense_data,
1523 io->scsiio.sense_len,
1530 switch (error_code) {
1531 case SSD_DEFERRED_ERROR:
1532 case SSD_DESC_DEFERRED_ERROR:
1533 error_action |= TPC_ERR_NO_DECREMENT;
1535 case SSD_CURRENT_ERROR:
1536 case SSD_DESC_CURRENT_ERROR:
1538 switch (sense_key) {
1539 case SSD_KEY_UNIT_ATTENTION:
1540 error_action |= TPC_ERR_NO_DECREMENT;
1542 case SSD_KEY_HARDWARE_ERROR:
1544 * This is our generic "something bad happened"
1545 * error code. It often isn't recoverable.
1547 if ((asc == 0x44) && (ascq == 0x00))
1548 error_action = TPC_ERR_FAIL;
1550 case SSD_KEY_NOT_READY:
1552 * If the LUN is powered down, there likely isn't
1553 * much point in retrying right now.
1555 if ((asc == 0x04) && (ascq == 0x02))
1556 error_action = TPC_ERR_FAIL;
1558 * If the LUN is offline, there probably isn't much
1559 * point in retrying, either.
1561 if ((asc == 0x04) && (ascq == 0x03))
1562 error_action = TPC_ERR_FAIL;
1566 return (error_action);
1569 static tpc_error_action
1570 tpc_error_parse(union ctl_io *io)
1572 tpc_error_action error_action = TPC_ERR_RETRY;
1574 switch (io->io_hdr.io_type) {
1576 switch (io->io_hdr.status & CTL_STATUS_MASK) {
1577 case CTL_SCSI_ERROR:
1578 switch (io->scsiio.scsi_status) {
1579 case SCSI_STATUS_CHECK_COND:
1580 error_action = tpc_checkcond_parse(io);
1593 panic("%s: invalid ctl_io type %d\n", __func__,
1594 io->io_hdr.io_type);
1597 return (error_action);
1601 tpc_done(union ctl_io *io)
1603 struct tpc_io *tio, *tior;
1606 * Very minimal retry logic. We basically retry if we got an error
1607 * back, and the retry count is greater than 0. If we ever want
1608 * more sophisticated initiator type behavior, the CAM error
1609 * recovery code in ../common might be helpful.
1611 tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1612 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1613 && (io->io_hdr.retries > 0)) {
1614 ctl_io_status old_status;
1615 tpc_error_action error_action;
1617 error_action = tpc_error_parse(io);
1618 switch (error_action & TPC_ERR_MASK) {
1623 if ((error_action & TPC_ERR_NO_DECREMENT) == 0)
1624 io->io_hdr.retries--;
1625 old_status = io->io_hdr.status;
1626 io->io_hdr.status = CTL_STATUS_NONE;
1627 io->io_hdr.flags &= ~CTL_FLAG_ABORT;
1628 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
1629 if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) {
1630 printf("%s: error returned from tpcl_queue()!\n",
1632 io->io_hdr.status = old_status;
1638 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
1639 tio->list->error = 1;
1640 if (io->io_hdr.io_type == CTL_IO_SCSI &&
1641 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR) {
1642 tio->list->fwd_scsi_status = io->scsiio.scsi_status;
1643 tio->list->fwd_sense_data = io->scsiio.sense_data;
1644 tio->list->fwd_sense_len = io->scsiio.sense_len;
1645 tio->list->fwd_target = tio->target;
1646 tio->list->fwd_cscd = tio->cscd;
1649 atomic_add_int(&tio->list->curops, 1);
1650 if (!tio->list->error && !tio->list->abort) {
1651 while ((tior = TAILQ_FIRST(&tio->run)) != NULL) {
1652 TAILQ_REMOVE(&tio->run, tior, rlinks);
1653 atomic_add_int(&tio->list->tbdio, 1);
1654 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1655 panic("tpcl_queue() error");
1658 if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1)
1659 tpc_process(tio->list);
1663 ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
1665 struct ctl_lun *lun = CTL_LUN(ctsio);
1666 struct scsi_extended_copy *cdb;
1667 struct scsi_extended_copy_lid1_data *data;
1668 struct scsi_ec_cscd *cscd;
1669 struct scsi_ec_segment *seg;
1670 struct tpc_list *list, *tlist;
1673 int len, off, lencscd, lenseg, leninl, nseg;
1675 CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n"));
1677 cdb = (struct scsi_extended_copy *)ctsio->cdb;
1678 len = scsi_4btoul(cdb->length);
1681 ctl_set_success(ctsio);
1684 if (len < sizeof(struct scsi_extended_copy_lid1_data) ||
1685 len > sizeof(struct scsi_extended_copy_lid1_data) +
1686 TPC_MAX_LIST + TPC_MAX_INLINE) {
1687 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1688 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1693 * If we've got a kernel request that hasn't been malloced yet,
1694 * malloc it and tell the caller the data buffer is here.
1696 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1697 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1698 ctsio->kern_data_len = len;
1699 ctsio->kern_total_len = len;
1700 ctsio->kern_rel_offset = 0;
1701 ctsio->kern_sg_entries = 0;
1702 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1703 ctsio->be_move_done = ctl_config_move_done;
1704 ctl_datamove((union ctl_io *)ctsio);
1706 return (CTL_RETVAL_COMPLETE);
1709 data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr;
1710 lencscd = scsi_2btoul(data->cscd_list_length);
1711 lenseg = scsi_4btoul(data->segment_list_length);
1712 leninl = scsi_4btoul(data->inline_data_length);
1713 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1714 ctl_set_sense(ctsio, /*current_error*/ 1,
1715 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1716 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1719 if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) {
1720 ctl_set_sense(ctsio, /*current_error*/ 1,
1721 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1722 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1725 if (lencscd + lenseg > TPC_MAX_LIST ||
1726 leninl > TPC_MAX_INLINE ||
1727 len < sizeof(struct scsi_extended_copy_lid1_data) +
1728 lencscd + lenseg + leninl) {
1729 ctl_set_param_len_error(ctsio);
1733 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1734 list->service_action = cdb->service_action;
1735 value = dnvlist_get_string(lun->be_lun->options, "insecure_tpc", NULL);
1736 if (value != NULL && strcmp(value, "on") == 0)
1737 list->init_port = -1;
1739 list->init_port = ctsio->io_hdr.nexus.targ_port;
1740 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
1741 list->list_id = data->list_identifier;
1742 list->flags = data->flags;
1743 list->params = ctsio->kern_data_ptr;
1744 list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1745 ptr = &data->data[0];
1746 for (off = 0; off < lencscd; off += sizeof(struct scsi_ec_cscd)) {
1747 cscd = (struct scsi_ec_cscd *)(ptr + off);
1748 if (cscd->type_code != EC_CSCD_ID) {
1750 ctl_set_sense(ctsio, /*current_error*/ 1,
1751 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1752 /*asc*/ 0x26, /*ascq*/ 0x07, SSD_ELEM_NONE);
1756 ptr = &data->data[lencscd];
1757 for (nseg = 0, off = 0; off < lenseg; nseg++) {
1758 if (nseg >= TPC_MAX_SEGS) {
1760 ctl_set_sense(ctsio, /*current_error*/ 1,
1761 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1762 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1765 seg = (struct scsi_ec_segment *)(ptr + off);
1766 if (seg->type_code != EC_SEG_B2B &&
1767 seg->type_code != EC_SEG_VERIFY &&
1768 seg->type_code != EC_SEG_REGISTER_KEY) {
1770 ctl_set_sense(ctsio, /*current_error*/ 1,
1771 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1772 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
1775 list->seg[nseg] = seg;
1776 off += sizeof(struct scsi_ec_segment) +
1777 scsi_2btoul(seg->descr_length);
1779 list->inl = &data->data[lencscd + lenseg];
1780 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1782 list->leninl = leninl;
1783 list->ctsio = ctsio;
1785 mtx_lock(&lun->lun_lock);
1786 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1787 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1788 if (tlist != NULL && !tlist->completed) {
1789 mtx_unlock(&lun->lun_lock);
1791 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1792 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1796 if (tlist != NULL) {
1797 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1801 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1802 mtx_unlock(&lun->lun_lock);
1805 return (CTL_RETVAL_COMPLETE);
1808 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
1809 free(ctsio->kern_data_ptr, M_CTL);
1810 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
1812 ctl_done((union ctl_io *)ctsio);
1813 return (CTL_RETVAL_COMPLETE);
1817 ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
1819 struct ctl_lun *lun = CTL_LUN(ctsio);
1820 struct scsi_extended_copy *cdb;
1821 struct scsi_extended_copy_lid4_data *data;
1822 struct scsi_ec_cscd *cscd;
1823 struct scsi_ec_segment *seg;
1824 struct tpc_list *list, *tlist;
1827 int len, off, lencscd, lenseg, leninl, nseg;
1829 CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n"));
1831 cdb = (struct scsi_extended_copy *)ctsio->cdb;
1832 len = scsi_4btoul(cdb->length);
1835 ctl_set_success(ctsio);
1838 if (len < sizeof(struct scsi_extended_copy_lid4_data) ||
1839 len > sizeof(struct scsi_extended_copy_lid4_data) +
1840 TPC_MAX_LIST + TPC_MAX_INLINE) {
1841 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1842 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1847 * If we've got a kernel request that hasn't been malloced yet,
1848 * malloc it and tell the caller the data buffer is here.
1850 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1851 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1852 ctsio->kern_data_len = len;
1853 ctsio->kern_total_len = len;
1854 ctsio->kern_rel_offset = 0;
1855 ctsio->kern_sg_entries = 0;
1856 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1857 ctsio->be_move_done = ctl_config_move_done;
1858 ctl_datamove((union ctl_io *)ctsio);
1860 return (CTL_RETVAL_COMPLETE);
1863 data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr;
1864 lencscd = scsi_2btoul(data->cscd_list_length);
1865 lenseg = scsi_2btoul(data->segment_list_length);
1866 leninl = scsi_2btoul(data->inline_data_length);
1867 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1868 ctl_set_sense(ctsio, /*current_error*/ 1,
1869 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1870 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1873 if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) {
1874 ctl_set_sense(ctsio, /*current_error*/ 1,
1875 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1876 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1879 if (lencscd + lenseg > TPC_MAX_LIST ||
1880 leninl > TPC_MAX_INLINE ||
1881 len < sizeof(struct scsi_extended_copy_lid1_data) +
1882 lencscd + lenseg + leninl) {
1883 ctl_set_param_len_error(ctsio);
1887 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1888 list->service_action = cdb->service_action;
1889 value = dnvlist_get_string(lun->be_lun->options, "insecure_tpc", NULL);
1890 if (value != NULL && strcmp(value, "on") == 0)
1891 list->init_port = -1;
1893 list->init_port = ctsio->io_hdr.nexus.targ_port;
1894 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
1895 list->list_id = scsi_4btoul(data->list_identifier);
1896 list->flags = data->flags;
1897 list->params = ctsio->kern_data_ptr;
1898 list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1899 ptr = &data->data[0];
1900 for (off = 0; off < lencscd; off += sizeof(struct scsi_ec_cscd)) {
1901 cscd = (struct scsi_ec_cscd *)(ptr + off);
1902 if (cscd->type_code != EC_CSCD_ID) {
1904 ctl_set_sense(ctsio, /*current_error*/ 1,
1905 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1906 /*asc*/ 0x26, /*ascq*/ 0x07, SSD_ELEM_NONE);
1910 ptr = &data->data[lencscd];
1911 for (nseg = 0, off = 0; off < lenseg; nseg++) {
1912 if (nseg >= TPC_MAX_SEGS) {
1914 ctl_set_sense(ctsio, /*current_error*/ 1,
1915 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1916 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1919 seg = (struct scsi_ec_segment *)(ptr + off);
1920 if (seg->type_code != EC_SEG_B2B &&
1921 seg->type_code != EC_SEG_VERIFY &&
1922 seg->type_code != EC_SEG_REGISTER_KEY) {
1924 ctl_set_sense(ctsio, /*current_error*/ 1,
1925 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1926 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
1929 list->seg[nseg] = seg;
1930 off += sizeof(struct scsi_ec_segment) +
1931 scsi_2btoul(seg->descr_length);
1933 list->inl = &data->data[lencscd + lenseg];
1934 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1936 list->leninl = leninl;
1937 list->ctsio = ctsio;
1939 mtx_lock(&lun->lun_lock);
1940 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1941 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1942 if (tlist != NULL && !tlist->completed) {
1943 mtx_unlock(&lun->lun_lock);
1945 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1946 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1950 if (tlist != NULL) {
1951 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1955 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1956 mtx_unlock(&lun->lun_lock);
1959 return (CTL_RETVAL_COMPLETE);
1962 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
1963 free(ctsio->kern_data_ptr, M_CTL);
1964 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
1966 ctl_done((union ctl_io *)ctsio);
1967 return (CTL_RETVAL_COMPLETE);
1971 tpc_create_token(struct ctl_lun *lun, struct ctl_port *port, off_t len,
1972 struct scsi_token *token)
1975 struct scsi_vpd_id_descriptor *idd = NULL;
1976 struct scsi_ec_cscd_id *cscd;
1977 struct scsi_read_capacity_data_long *dtsd;
1980 scsi_ulto4b(ROD_TYPE_AUR, token->type);
1981 scsi_ulto2b(0x01f8, token->length);
1982 scsi_u64to8b(atomic_fetchadd_int(&id, 1), &token->body[0]);
1984 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
1985 lun->lun_devid->data, lun->lun_devid->len,
1986 scsi_devid_is_lun_naa);
1987 if (idd == NULL && lun->lun_devid)
1988 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
1989 lun->lun_devid->data, lun->lun_devid->len,
1990 scsi_devid_is_lun_eui64);
1992 cscd = (struct scsi_ec_cscd_id *)&token->body[8];
1993 cscd->type_code = EC_CSCD_ID;
1994 cscd->luidt_pdt = T_DIRECT;
1995 memcpy(&cscd->codeset, idd, 4 + idd->length);
1996 scsi_ulto3b(lun->be_lun->blocksize, cscd->dtsp.block_length);
1998 scsi_u64to8b(0, &token->body[40]); /* XXX: Should be 128bit value. */
1999 scsi_u64to8b(len, &token->body[48]);
2001 /* ROD token device type specific data (RC16 without first field) */
2002 dtsd = (struct scsi_read_capacity_data_long *)&token->body[88 - 8];
2003 scsi_ulto4b(lun->be_lun->blocksize, dtsd->length);
2004 dtsd->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE;
2005 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, dtsd->lalba_lbp);
2006 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
2007 dtsd->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ;
2009 if (port->target_devid) {
2010 targid_len = port->target_devid->len;
2011 memcpy(&token->body[120], port->target_devid->data, targid_len);
2014 arc4rand(&token->body[120 + targid_len], 384 - targid_len, 0);
2018 ctl_populate_token(struct ctl_scsiio *ctsio)
2020 struct ctl_softc *softc = CTL_SOFTC(ctsio);
2021 struct ctl_port *port = CTL_PORT(ctsio);
2022 struct ctl_lun *lun = CTL_LUN(ctsio);
2023 struct scsi_populate_token *cdb;
2024 struct scsi_populate_token_data *data;
2025 struct tpc_list *list, *tlist;
2026 struct tpc_token *token;
2028 int len, lendata, lendesc;
2030 CTL_DEBUG_PRINT(("ctl_populate_token\n"));
2032 cdb = (struct scsi_populate_token *)ctsio->cdb;
2033 len = scsi_4btoul(cdb->length);
2035 if (len < sizeof(struct scsi_populate_token_data) ||
2036 len > sizeof(struct scsi_populate_token_data) +
2037 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
2038 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
2039 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
2044 * If we've got a kernel request that hasn't been malloced yet,
2045 * malloc it and tell the caller the data buffer is here.
2047 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
2048 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
2049 ctsio->kern_data_len = len;
2050 ctsio->kern_total_len = len;
2051 ctsio->kern_rel_offset = 0;
2052 ctsio->kern_sg_entries = 0;
2053 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2054 ctsio->be_move_done = ctl_config_move_done;
2055 ctl_datamove((union ctl_io *)ctsio);
2057 return (CTL_RETVAL_COMPLETE);
2060 data = (struct scsi_populate_token_data *)ctsio->kern_data_ptr;
2061 lendata = scsi_2btoul(data->length);
2062 if (lendata < sizeof(struct scsi_populate_token_data) - 2 +
2063 sizeof(struct scsi_range_desc)) {
2064 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2065 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
2068 lendesc = scsi_2btoul(data->range_descriptor_length);
2069 if (lendesc < sizeof(struct scsi_range_desc) ||
2070 len < sizeof(struct scsi_populate_token_data) + lendesc ||
2071 lendata < sizeof(struct scsi_populate_token_data) - 2 + lendesc) {
2072 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2073 /*field*/ 14, /*bit_valid*/ 0, /*bit*/ 0);
2077 printf("PT(list=%u) flags=%x to=%d rt=%x len=%x\n",
2078 scsi_4btoul(cdb->list_identifier),
2079 data->flags, scsi_4btoul(data->inactivity_timeout),
2080 scsi_4btoul(data->rod_type),
2081 scsi_2btoul(data->range_descriptor_length));
2084 /* Validate INACTIVITY TIMEOUT field */
2085 if (scsi_4btoul(data->inactivity_timeout) > TPC_MAX_TOKEN_TIMEOUT) {
2086 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2087 /*command*/ 0, /*field*/ 4, /*bit_valid*/ 0,
2092 /* Validate ROD TYPE field */
2093 if ((data->flags & EC_PT_RTV) &&
2094 scsi_4btoul(data->rod_type) != ROD_TYPE_AUR) {
2095 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2096 /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0);
2100 /* Validate list of ranges */
2101 if (tpc_check_ranges_l(&data->desc[0],
2102 scsi_2btoul(data->range_descriptor_length) /
2103 sizeof(struct scsi_range_desc),
2104 lun->be_lun->maxlba, &lba) != 0) {
2105 ctl_set_lba_out_of_range(ctsio, lba);
2108 if (tpc_check_ranges_x(&data->desc[0],
2109 scsi_2btoul(data->range_descriptor_length) /
2110 sizeof(struct scsi_range_desc)) != 0) {
2111 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0,
2112 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2117 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
2118 list->service_action = cdb->service_action;
2119 list->init_port = ctsio->io_hdr.nexus.targ_port;
2120 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
2121 list->list_id = scsi_4btoul(cdb->list_identifier);
2122 list->flags = data->flags;
2123 list->ctsio = ctsio;
2125 mtx_lock(&lun->lun_lock);
2126 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
2127 if (tlist != NULL && !tlist->completed) {
2128 mtx_unlock(&lun->lun_lock);
2130 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2131 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2135 if (tlist != NULL) {
2136 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
2139 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
2140 mtx_unlock(&lun->lun_lock);
2142 token = malloc(sizeof(*token), M_CTL, M_WAITOK | M_ZERO);
2143 token->lun = lun->lun;
2144 token->blocksize = lun->be_lun->blocksize;
2145 token->params = ctsio->kern_data_ptr;
2146 token->range = &data->desc[0];
2147 token->nrange = scsi_2btoul(data->range_descriptor_length) /
2148 sizeof(struct scsi_range_desc);
2149 list->cursectors = tpc_ranges_length(token->range, token->nrange);
2150 list->curbytes = (off_t)list->cursectors * lun->be_lun->blocksize;
2151 tpc_create_token(lun, port, list->curbytes,
2152 (struct scsi_token *)token->token);
2154 token->last_active = time_uptime;
2155 token->timeout = scsi_4btoul(data->inactivity_timeout);
2156 if (token->timeout == 0)
2157 token->timeout = TPC_DFL_TOKEN_TIMEOUT;
2158 else if (token->timeout < TPC_MIN_TOKEN_TIMEOUT)
2159 token->timeout = TPC_MIN_TOKEN_TIMEOUT;
2160 memcpy(list->res_token, token->token, sizeof(list->res_token));
2161 list->res_token_valid = 1;
2163 list->completed = 1;
2164 list->last_active = time_uptime;
2165 mtx_lock(&softc->tpc_lock);
2166 TAILQ_INSERT_TAIL(&softc->tpc_tokens, token, links);
2167 mtx_unlock(&softc->tpc_lock);
2168 ctl_set_success(ctsio);
2169 ctl_done((union ctl_io *)ctsio);
2170 return (CTL_RETVAL_COMPLETE);
2173 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
2174 free(ctsio->kern_data_ptr, M_CTL);
2175 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
2177 ctl_done((union ctl_io *)ctsio);
2178 return (CTL_RETVAL_COMPLETE);
2182 ctl_write_using_token(struct ctl_scsiio *ctsio)
2184 struct ctl_softc *softc = CTL_SOFTC(ctsio);
2185 struct ctl_lun *lun = CTL_LUN(ctsio);
2186 struct scsi_write_using_token *cdb;
2187 struct scsi_write_using_token_data *data;
2188 struct tpc_list *list, *tlist;
2189 struct tpc_token *token;
2191 int len, lendata, lendesc;
2193 CTL_DEBUG_PRINT(("ctl_write_using_token\n"));
2195 cdb = (struct scsi_write_using_token *)ctsio->cdb;
2196 len = scsi_4btoul(cdb->length);
2198 if (len < sizeof(struct scsi_write_using_token_data) ||
2199 len > sizeof(struct scsi_write_using_token_data) +
2200 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
2201 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
2202 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
2207 * If we've got a kernel request that hasn't been malloced yet,
2208 * malloc it and tell the caller the data buffer is here.
2210 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
2211 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
2212 ctsio->kern_data_len = len;
2213 ctsio->kern_total_len = len;
2214 ctsio->kern_rel_offset = 0;
2215 ctsio->kern_sg_entries = 0;
2216 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2217 ctsio->be_move_done = ctl_config_move_done;
2218 ctl_datamove((union ctl_io *)ctsio);
2220 return (CTL_RETVAL_COMPLETE);
2223 data = (struct scsi_write_using_token_data *)ctsio->kern_data_ptr;
2224 lendata = scsi_2btoul(data->length);
2225 if (lendata < sizeof(struct scsi_write_using_token_data) - 2 +
2226 sizeof(struct scsi_range_desc)) {
2227 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2228 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
2231 lendesc = scsi_2btoul(data->range_descriptor_length);
2232 if (lendesc < sizeof(struct scsi_range_desc) ||
2233 len < sizeof(struct scsi_write_using_token_data) + lendesc ||
2234 lendata < sizeof(struct scsi_write_using_token_data) - 2 + lendesc) {
2235 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2236 /*field*/ 534, /*bit_valid*/ 0, /*bit*/ 0);
2240 printf("WUT(list=%u) flags=%x off=%ju len=%x\n",
2241 scsi_4btoul(cdb->list_identifier),
2242 data->flags, scsi_8btou64(data->offset_into_rod),
2243 scsi_2btoul(data->range_descriptor_length));
2246 /* Validate list of ranges */
2247 if (tpc_check_ranges_l(&data->desc[0],
2248 scsi_2btoul(data->range_descriptor_length) /
2249 sizeof(struct scsi_range_desc),
2250 lun->be_lun->maxlba, &lba) != 0) {
2251 ctl_set_lba_out_of_range(ctsio, lba);
2254 if (tpc_check_ranges_x(&data->desc[0],
2255 scsi_2btoul(data->range_descriptor_length) /
2256 sizeof(struct scsi_range_desc)) != 0) {
2257 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0,
2258 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2263 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
2264 list->service_action = cdb->service_action;
2265 list->init_port = ctsio->io_hdr.nexus.targ_port;
2266 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
2267 list->list_id = scsi_4btoul(cdb->list_identifier);
2268 list->flags = data->flags;
2269 list->params = ctsio->kern_data_ptr;
2270 list->range = &data->desc[0];
2271 list->nrange = scsi_2btoul(data->range_descriptor_length) /
2272 sizeof(struct scsi_range_desc);
2273 list->offset_into_rod = scsi_8btou64(data->offset_into_rod);
2274 list->ctsio = ctsio;
2276 mtx_lock(&lun->lun_lock);
2277 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
2278 if (tlist != NULL && !tlist->completed) {
2279 mtx_unlock(&lun->lun_lock);
2281 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2282 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2286 if (tlist != NULL) {
2287 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
2290 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
2291 mtx_unlock(&lun->lun_lock);
2293 /* Block device zero ROD token -> no token. */
2294 if (scsi_4btoul(data->rod_token) == ROD_TYPE_BLOCK_ZERO) {
2296 return (CTL_RETVAL_COMPLETE);
2299 mtx_lock(&softc->tpc_lock);
2300 TAILQ_FOREACH(token, &softc->tpc_tokens, links) {
2301 if (memcmp(token->token, data->rod_token,
2302 sizeof(data->rod_token)) == 0)
2305 if (token != NULL) {
2307 list->token = token;
2308 if (data->flags & EC_WUT_DEL_TKN)
2311 mtx_unlock(&softc->tpc_lock);
2312 if (token == NULL) {
2313 mtx_lock(&lun->lun_lock);
2314 TAILQ_REMOVE(&lun->tpc_lists, list, links);
2315 mtx_unlock(&lun->lun_lock);
2317 ctl_set_sense(ctsio, /*current_error*/ 1,
2318 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
2319 /*asc*/ 0x23, /*ascq*/ 0x04, SSD_ELEM_NONE);
2324 return (CTL_RETVAL_COMPLETE);
2327 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
2328 free(ctsio->kern_data_ptr, M_CTL);
2329 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
2331 ctl_done((union ctl_io *)ctsio);
2332 return (CTL_RETVAL_COMPLETE);
2336 ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
2338 struct ctl_lun *lun = CTL_LUN(ctsio);
2339 struct scsi_receive_rod_token_information *cdb;
2340 struct scsi_receive_copy_status_lid4_data *data;
2341 struct tpc_list *list;
2342 struct tpc_list list_copy;
2345 int alloc_len, total_len, token_len;
2348 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
2350 cdb = (struct scsi_receive_rod_token_information *)ctsio->cdb;
2351 retval = CTL_RETVAL_COMPLETE;
2353 list_id = scsi_4btoul(cdb->list_identifier);
2354 mtx_lock(&lun->lun_lock);
2355 list = tpc_find_list(lun, list_id,
2356 ctl_get_initindex(&ctsio->io_hdr.nexus));
2358 mtx_unlock(&lun->lun_lock);
2359 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2360 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
2362 ctl_done((union ctl_io *)ctsio);
2366 if (list->completed) {
2367 TAILQ_REMOVE(&lun->tpc_lists, list, links);
2370 mtx_unlock(&lun->lun_lock);
2372 token_len = list_copy.res_token_valid ? 2 + sizeof(list_copy.res_token) : 0;
2373 total_len = sizeof(*data) + list_copy.sense_len + 4 + token_len;
2374 alloc_len = scsi_4btoul(cdb->length);
2376 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
2377 ctsio->kern_sg_entries = 0;
2378 ctsio->kern_rel_offset = 0;
2379 ctsio->kern_data_len = min(total_len, alloc_len);
2380 ctsio->kern_total_len = ctsio->kern_data_len;
2382 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
2383 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len +
2384 4 + token_len, data->available_data);
2385 data->response_to_service_action = list_copy.service_action;
2386 if (list_copy.completed) {
2387 if (list_copy.error)
2388 data->copy_command_status = RCS_CCS_ERROR;
2389 else if (list_copy.abort)
2390 data->copy_command_status = RCS_CCS_ABORTED;
2392 data->copy_command_status = RCS_CCS_COMPLETED;
2394 data->copy_command_status = RCS_CCS_INPROG_FG;
2395 scsi_ulto2b(list_copy.curops, data->operation_counter);
2396 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
2397 data->transfer_count_units = RCS_TC_LBAS;
2398 scsi_u64to8b(list_copy.cursectors, data->transfer_count);
2399 scsi_ulto2b(list_copy.curseg, data->segments_processed);
2400 data->length_of_the_sense_data_field = list_copy.sense_len;
2401 data->sense_data_length = list_copy.sense_len;
2402 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
2404 ptr = &data->sense_data[data->length_of_the_sense_data_field];
2405 scsi_ulto4b(token_len, &ptr[0]);
2406 if (list_copy.res_token_valid) {
2407 scsi_ulto2b(0, &ptr[4]);
2408 memcpy(&ptr[6], list_copy.res_token, sizeof(list_copy.res_token));
2411 printf("RRTI(list=%u) valid=%d\n",
2412 scsi_4btoul(cdb->list_identifier), list_copy.res_token_valid);
2414 ctl_set_success(ctsio);
2415 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2416 ctsio->be_move_done = ctl_config_move_done;
2417 ctl_datamove((union ctl_io *)ctsio);
2422 ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio)
2424 struct ctl_softc *softc = CTL_SOFTC(ctsio);
2425 struct scsi_report_all_rod_tokens *cdb;
2426 struct scsi_report_all_rod_tokens_data *data;
2427 struct tpc_token *token;
2429 int alloc_len, total_len, tokens, i;
2431 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
2433 cdb = (struct scsi_report_all_rod_tokens *)ctsio->cdb;
2434 retval = CTL_RETVAL_COMPLETE;
2437 mtx_lock(&softc->tpc_lock);
2438 TAILQ_FOREACH(token, &softc->tpc_tokens, links)
2440 mtx_unlock(&softc->tpc_lock);
2444 total_len = sizeof(*data) + tokens * 96;
2445 alloc_len = scsi_4btoul(cdb->length);
2447 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
2448 ctsio->kern_sg_entries = 0;
2449 ctsio->kern_rel_offset = 0;
2450 ctsio->kern_data_len = min(total_len, alloc_len);
2451 ctsio->kern_total_len = ctsio->kern_data_len;
2453 data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr;
2455 mtx_lock(&softc->tpc_lock);
2456 TAILQ_FOREACH(token, &softc->tpc_tokens, links) {
2459 memcpy(&data->rod_management_token_list[i * 96],
2463 mtx_unlock(&softc->tpc_lock);
2464 scsi_ulto4b(sizeof(*data) - 4 + i * 96, data->available_data);
2466 printf("RART tokens=%d\n", i);
2468 ctl_set_success(ctsio);
2469 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2470 ctsio->be_move_done = ctl_config_move_done;
2471 ctl_datamove((union ctl_io *)ctsio);