2 * Copyright (c) 2014 Alexander Motin <mav@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/types.h>
35 #include <sys/module.h>
36 #include <sys/mutex.h>
37 #include <sys/condvar.h>
38 #include <sys/malloc.h>
40 #include <sys/queue.h>
41 #include <sys/sysctl.h>
42 #include <machine/atomic.h>
45 #include <cam/scsi/scsi_all.h>
46 #include <cam/scsi/scsi_da.h>
47 #include <cam/ctl/ctl_io.h>
48 #include <cam/ctl/ctl.h>
49 #include <cam/ctl/ctl_frontend.h>
50 #include <cam/ctl/ctl_util.h>
51 #include <cam/ctl/ctl_backend.h>
52 #include <cam/ctl/ctl_ioctl.h>
53 #include <cam/ctl/ctl_ha.h>
54 #include <cam/ctl/ctl_private.h>
55 #include <cam/ctl/ctl_debug.h>
56 #include <cam/ctl/ctl_scsi_all.h>
57 #include <cam/ctl/ctl_tpc.h>
58 #include <cam/ctl/ctl_error.h>
60 #define TPC_MAX_CSCDS 64
61 #define TPC_MAX_SEGS 64
63 #define TPC_MAX_LIST 8192
64 #define TPC_MAX_INLINE 0
65 #define TPC_MAX_LISTS 255
66 #define TPC_MAX_IO_SIZE (1024 * 1024)
67 #define TPC_MAX_IOCHUNK_SIZE (TPC_MAX_IO_SIZE * 16)
68 #define TPC_MIN_TOKEN_TIMEOUT 1
69 #define TPC_DFL_TOKEN_TIMEOUT 60
70 #define TPC_MAX_TOKEN_TIMEOUT 600
72 MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC");
75 TPC_ERR_RETRY = 0x000,
78 TPC_ERR_NO_DECREMENT = 0x100
82 TAILQ_HEAD(runl, tpc_io);
88 struct tpc_list *list;
90 TAILQ_ENTRY(tpc_io) rlinks;
91 TAILQ_ENTRY(tpc_io) links;
99 struct scsi_range_desc *range;
104 TAILQ_ENTRY(tpc_token) links;
108 uint8_t service_action;
114 struct scsi_ec_cscd *cscd;
115 struct scsi_ec_segment *seg[TPC_MAX_SEGS];
120 struct tpc_token *token;
121 struct scsi_range_desc *range;
123 off_t offset_into_rod;
138 TAILQ_HEAD(, tpc_io) allio;
139 struct scsi_sense_data fwd_sense_data;
140 uint8_t fwd_sense_len;
141 uint8_t fwd_scsi_status;
144 struct scsi_sense_data sense_data;
147 struct ctl_scsiio *ctsio;
150 uint8_t res_token[512];
151 TAILQ_ENTRY(tpc_list) links;
155 tpc_timeout(void *arg)
157 struct ctl_softc *softc = arg;
159 struct tpc_token *token, *ttoken;
160 struct tpc_list *list, *tlist;
162 /* Free completed lists with expired timeout. */
163 STAILQ_FOREACH(lun, &softc->lun_list, links) {
164 mtx_lock(&lun->lun_lock);
165 TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) {
166 if (!list->completed || time_uptime < list->last_active +
167 TPC_DFL_TOKEN_TIMEOUT)
169 TAILQ_REMOVE(&lun->tpc_lists, list, links);
172 mtx_unlock(&lun->lun_lock);
175 /* Free inactive ROD tokens with expired timeout. */
176 mtx_lock(&softc->tpc_lock);
177 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
179 time_uptime < token->last_active + token->timeout + 1)
181 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
182 free(token->params, M_CTL);
185 mtx_unlock(&softc->tpc_lock);
186 callout_schedule(&softc->tpc_timeout, hz);
190 ctl_tpc_init(struct ctl_softc *softc)
193 mtx_init(&softc->tpc_lock, "CTL TPC mutex", NULL, MTX_DEF);
194 TAILQ_INIT(&softc->tpc_tokens);
195 callout_init_mtx(&softc->tpc_timeout, &softc->ctl_lock, 0);
196 callout_reset(&softc->tpc_timeout, hz, tpc_timeout, softc);
200 ctl_tpc_shutdown(struct ctl_softc *softc)
202 struct tpc_token *token;
204 callout_drain(&softc->tpc_timeout);
206 /* Free ROD tokens. */
207 mtx_lock(&softc->tpc_lock);
208 while ((token = TAILQ_FIRST(&softc->tpc_tokens)) != NULL) {
209 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
210 free(token->params, M_CTL);
213 mtx_unlock(&softc->tpc_lock);
214 mtx_destroy(&softc->tpc_lock);
218 ctl_tpc_lun_init(struct ctl_lun *lun)
221 TAILQ_INIT(&lun->tpc_lists);
225 ctl_tpc_lun_shutdown(struct ctl_lun *lun)
227 struct ctl_softc *softc = lun->ctl_softc;
228 struct tpc_list *list;
229 struct tpc_token *token, *ttoken;
231 /* Free lists for this LUN. */
232 while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) {
233 TAILQ_REMOVE(&lun->tpc_lists, list, links);
234 KASSERT(list->completed,
235 ("Not completed TPC (%p) on shutdown", list));
239 /* Free ROD tokens for this LUN. */
240 mtx_lock(&softc->tpc_lock);
241 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
242 if (token->lun != lun->lun || token->active)
244 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
245 free(token->params, M_CTL);
248 mtx_unlock(&softc->tpc_lock);
252 ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
254 struct scsi_vpd_tpc *tpc_ptr;
255 struct scsi_vpd_tpc_descriptor *d_ptr;
256 struct scsi_vpd_tpc_descriptor_bdrl *bdrl_ptr;
257 struct scsi_vpd_tpc_descriptor_sc *sc_ptr;
258 struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr;
259 struct scsi_vpd_tpc_descriptor_pd *pd_ptr;
260 struct scsi_vpd_tpc_descriptor_sd *sd_ptr;
261 struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr;
262 struct scsi_vpd_tpc_descriptor_rtf *rtf_ptr;
263 struct scsi_vpd_tpc_descriptor_rtf_block *rtfb_ptr;
264 struct scsi_vpd_tpc_descriptor_srt *srt_ptr;
265 struct scsi_vpd_tpc_descriptor_srtd *srtd_ptr;
266 struct scsi_vpd_tpc_descriptor_gco *gco_ptr;
270 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
272 data_len = sizeof(struct scsi_vpd_tpc) +
273 sizeof(struct scsi_vpd_tpc_descriptor_bdrl) +
274 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) +
275 2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 11, 4) +
276 sizeof(struct scsi_vpd_tpc_descriptor_pd) +
277 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) +
278 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) +
279 sizeof(struct scsi_vpd_tpc_descriptor_rtf) +
280 sizeof(struct scsi_vpd_tpc_descriptor_rtf_block) +
281 sizeof(struct scsi_vpd_tpc_descriptor_srt) +
282 2*sizeof(struct scsi_vpd_tpc_descriptor_srtd) +
283 sizeof(struct scsi_vpd_tpc_descriptor_gco);
285 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
286 tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr;
287 ctsio->kern_sg_entries = 0;
289 if (data_len < alloc_len) {
290 ctsio->residual = alloc_len - data_len;
291 ctsio->kern_data_len = data_len;
292 ctsio->kern_total_len = data_len;
295 ctsio->kern_data_len = alloc_len;
296 ctsio->kern_total_len = alloc_len;
298 ctsio->kern_data_resid = 0;
299 ctsio->kern_rel_offset = 0;
300 ctsio->kern_sg_entries = 0;
303 * The control device is always connected. The disk device, on the
304 * other hand, may not be online all the time.
307 tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
308 lun->be_lun->lun_type;
310 tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
311 tpc_ptr->page_code = SVPD_SCSI_TPC;
312 scsi_ulto2b(data_len - 4, tpc_ptr->page_length);
314 /* Block Device ROD Limits */
315 d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0];
316 bdrl_ptr = (struct scsi_vpd_tpc_descriptor_bdrl *)d_ptr;
317 scsi_ulto2b(SVPD_TPC_BDRL, bdrl_ptr->desc_type);
318 scsi_ulto2b(sizeof(*bdrl_ptr) - 4, bdrl_ptr->desc_length);
319 scsi_ulto2b(TPC_MAX_SEGS, bdrl_ptr->maximum_ranges);
320 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
321 bdrl_ptr->maximum_inactivity_timeout);
322 scsi_ulto4b(TPC_DFL_TOKEN_TIMEOUT,
323 bdrl_ptr->default_inactivity_timeout);
324 scsi_u64to8b(0, bdrl_ptr->maximum_token_transfer_size);
325 scsi_u64to8b(0, bdrl_ptr->optimal_transfer_count);
327 /* Supported commands */
328 d_ptr = (struct scsi_vpd_tpc_descriptor *)
329 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
330 sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr;
331 scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type);
332 sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 11;
333 scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length);
334 scd_ptr = &sc_ptr->descr[0];
335 scd_ptr->opcode = EXTENDED_COPY;
336 scd_ptr->sa_length = 5;
337 scd_ptr->supported_service_actions[0] = EC_EC_LID1;
338 scd_ptr->supported_service_actions[1] = EC_EC_LID4;
339 scd_ptr->supported_service_actions[2] = EC_PT;
340 scd_ptr->supported_service_actions[3] = EC_WUT;
341 scd_ptr->supported_service_actions[4] = EC_COA;
342 scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *)
343 &scd_ptr->supported_service_actions[scd_ptr->sa_length];
344 scd_ptr->opcode = RECEIVE_COPY_STATUS;
345 scd_ptr->sa_length = 6;
346 scd_ptr->supported_service_actions[0] = RCS_RCS_LID1;
347 scd_ptr->supported_service_actions[1] = RCS_RCFD;
348 scd_ptr->supported_service_actions[2] = RCS_RCS_LID4;
349 scd_ptr->supported_service_actions[3] = RCS_RCOP;
350 scd_ptr->supported_service_actions[4] = RCS_RRTI;
351 scd_ptr->supported_service_actions[5] = RCS_RART;
353 /* Parameter data. */
354 d_ptr = (struct scsi_vpd_tpc_descriptor *)
355 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
356 pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr;
357 scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type);
358 scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length);
359 scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count);
360 scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count);
361 scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length);
362 scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length);
364 /* Supported Descriptors */
365 d_ptr = (struct scsi_vpd_tpc_descriptor *)
366 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
367 sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr;
368 scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type);
369 scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length);
370 sd_ptr->list_length = 4;
371 sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B;
372 sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY;
373 sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY;
374 sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID;
376 /* Supported CSCD Descriptor IDs */
377 d_ptr = (struct scsi_vpd_tpc_descriptor *)
378 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
379 sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr;
380 scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type);
381 scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length);
382 scsi_ulto2b(2, sdid_ptr->list_length);
383 scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]);
385 /* ROD Token Features */
386 d_ptr = (struct scsi_vpd_tpc_descriptor *)
387 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
388 rtf_ptr = (struct scsi_vpd_tpc_descriptor_rtf *)d_ptr;
389 scsi_ulto2b(SVPD_TPC_RTF, rtf_ptr->desc_type);
390 scsi_ulto2b(sizeof(*rtf_ptr) - 4 + sizeof(*rtfb_ptr), rtf_ptr->desc_length);
391 rtf_ptr->remote_tokens = 0;
392 scsi_ulto4b(TPC_MIN_TOKEN_TIMEOUT, rtf_ptr->minimum_token_lifetime);
393 scsi_ulto4b(UINT32_MAX, rtf_ptr->maximum_token_lifetime);
394 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
395 rtf_ptr->maximum_token_inactivity_timeout);
396 scsi_ulto2b(sizeof(*rtfb_ptr), rtf_ptr->type_specific_features_length);
397 rtfb_ptr = (struct scsi_vpd_tpc_descriptor_rtf_block *)
398 &rtf_ptr->type_specific_features;
399 rtfb_ptr->type_format = SVPD_TPC_RTF_BLOCK;
400 scsi_ulto2b(sizeof(*rtfb_ptr) - 4, rtfb_ptr->desc_length);
401 scsi_ulto2b(0, rtfb_ptr->optimal_length_granularity);
402 scsi_u64to8b(0, rtfb_ptr->maximum_bytes);
403 scsi_u64to8b(0, rtfb_ptr->optimal_bytes);
404 scsi_u64to8b(UINT64_MAX, rtfb_ptr->optimal_bytes_to_token_per_segment);
405 scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE,
406 rtfb_ptr->optimal_bytes_from_token_per_segment);
408 /* Supported ROD Tokens */
409 d_ptr = (struct scsi_vpd_tpc_descriptor *)
410 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
411 srt_ptr = (struct scsi_vpd_tpc_descriptor_srt *)d_ptr;
412 scsi_ulto2b(SVPD_TPC_SRT, srt_ptr->desc_type);
413 scsi_ulto2b(sizeof(*srt_ptr) - 4 + 2*sizeof(*srtd_ptr), srt_ptr->desc_length);
414 scsi_ulto2b(2*sizeof(*srtd_ptr), srt_ptr->rod_type_descriptors_length);
415 srtd_ptr = (struct scsi_vpd_tpc_descriptor_srtd *)
416 &srt_ptr->rod_type_descriptors;
417 scsi_ulto4b(ROD_TYPE_AUR, srtd_ptr->rod_type);
418 srtd_ptr->flags = SVPD_TPC_SRTD_TIN | SVPD_TPC_SRTD_TOUT;
419 scsi_ulto2b(0, srtd_ptr->preference_indicator);
421 scsi_ulto4b(ROD_TYPE_BLOCK_ZERO, srtd_ptr->rod_type);
422 srtd_ptr->flags = SVPD_TPC_SRTD_TIN;
423 scsi_ulto2b(0, srtd_ptr->preference_indicator);
425 /* General Copy Operations */
426 d_ptr = (struct scsi_vpd_tpc_descriptor *)
427 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
428 gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr;
429 scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type);
430 scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length);
431 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies);
432 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies);
433 scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length);
434 gco_ptr->data_segment_granularity = 0;
435 gco_ptr->inline_data_granularity = 0;
437 ctl_set_success(ctsio);
438 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
439 ctsio->be_move_done = ctl_config_move_done;
440 ctl_datamove((union ctl_io *)ctsio);
442 return (CTL_RETVAL_COMPLETE);
446 ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
448 struct scsi_receive_copy_operating_parameters *cdb;
449 struct scsi_receive_copy_operating_parameters_data *data;
451 int alloc_len, total_len;
453 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
455 cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb;
457 retval = CTL_RETVAL_COMPLETE;
459 total_len = sizeof(*data) + 4;
460 alloc_len = scsi_4btoul(cdb->length);
462 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
464 ctsio->kern_sg_entries = 0;
466 if (total_len < alloc_len) {
467 ctsio->residual = alloc_len - total_len;
468 ctsio->kern_data_len = total_len;
469 ctsio->kern_total_len = total_len;
472 ctsio->kern_data_len = alloc_len;
473 ctsio->kern_total_len = alloc_len;
475 ctsio->kern_data_resid = 0;
476 ctsio->kern_rel_offset = 0;
478 data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr;
479 scsi_ulto4b(sizeof(*data) - 4 + 4, data->length);
480 data->snlid = RCOP_SNLID;
481 scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count);
482 scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count);
483 scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length);
484 scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length);
485 scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length);
486 scsi_ulto4b(0, data->held_data_limit);
487 scsi_ulto4b(0, data->maximum_stream_device_transfer_size);
488 scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies);
489 data->maximum_concurrent_copies = TPC_MAX_LISTS;
490 data->data_segment_granularity = 0;
491 data->inline_data_granularity = 0;
492 data->held_data_granularity = 0;
493 data->implemented_descriptor_list_length = 4;
494 data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B;
495 data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY;
496 data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY;
497 data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID;
499 ctl_set_success(ctsio);
500 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
501 ctsio->be_move_done = ctl_config_move_done;
502 ctl_datamove((union ctl_io *)ctsio);
506 static struct tpc_list *
507 tpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx)
509 struct tpc_list *list;
511 mtx_assert(&lun->lun_lock, MA_OWNED);
512 TAILQ_FOREACH(list, &lun->tpc_lists, links) {
513 if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
514 EC_LIST_ID_USAGE_NONE && list->list_id == list_id &&
515 list->init_idx == init_idx)
522 ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
525 struct scsi_receive_copy_status_lid1 *cdb;
526 struct scsi_receive_copy_status_lid1_data *data;
527 struct tpc_list *list;
528 struct tpc_list list_copy;
530 int alloc_len, total_len;
533 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n"));
535 cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb;
536 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
538 retval = CTL_RETVAL_COMPLETE;
540 list_id = cdb->list_identifier;
541 mtx_lock(&lun->lun_lock);
542 list = tpc_find_list(lun, list_id,
543 ctl_get_initindex(&ctsio->io_hdr.nexus));
545 mtx_unlock(&lun->lun_lock);
546 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
547 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
549 ctl_done((union ctl_io *)ctsio);
553 if (list->completed) {
554 TAILQ_REMOVE(&lun->tpc_lists, list, links);
557 mtx_unlock(&lun->lun_lock);
559 total_len = sizeof(*data);
560 alloc_len = scsi_4btoul(cdb->length);
562 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
564 ctsio->kern_sg_entries = 0;
566 if (total_len < alloc_len) {
567 ctsio->residual = alloc_len - total_len;
568 ctsio->kern_data_len = total_len;
569 ctsio->kern_total_len = total_len;
572 ctsio->kern_data_len = alloc_len;
573 ctsio->kern_total_len = alloc_len;
575 ctsio->kern_data_resid = 0;
576 ctsio->kern_rel_offset = 0;
578 data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr;
579 scsi_ulto4b(sizeof(*data) - 4, data->available_data);
580 if (list_copy.completed) {
581 if (list_copy.error || list_copy.abort)
582 data->copy_command_status = RCS_CCS_ERROR;
584 data->copy_command_status = RCS_CCS_COMPLETED;
586 data->copy_command_status = RCS_CCS_INPROG;
587 scsi_ulto2b(list_copy.curseg, data->segments_processed);
588 if (list_copy.curbytes <= UINT32_MAX) {
589 data->transfer_count_units = RCS_TC_BYTES;
590 scsi_ulto4b(list_copy.curbytes, data->transfer_count);
592 data->transfer_count_units = RCS_TC_MBYTES;
593 scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count);
596 ctl_set_success(ctsio);
597 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
598 ctsio->be_move_done = ctl_config_move_done;
599 ctl_datamove((union ctl_io *)ctsio);
604 ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
607 struct scsi_receive_copy_failure_details *cdb;
608 struct scsi_receive_copy_failure_details_data *data;
609 struct tpc_list *list;
610 struct tpc_list list_copy;
612 int alloc_len, total_len;
615 CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n"));
617 cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb;
618 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
620 retval = CTL_RETVAL_COMPLETE;
622 list_id = cdb->list_identifier;
623 mtx_lock(&lun->lun_lock);
624 list = tpc_find_list(lun, list_id,
625 ctl_get_initindex(&ctsio->io_hdr.nexus));
626 if (list == NULL || !list->completed) {
627 mtx_unlock(&lun->lun_lock);
628 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
629 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
631 ctl_done((union ctl_io *)ctsio);
635 TAILQ_REMOVE(&lun->tpc_lists, list, links);
637 mtx_unlock(&lun->lun_lock);
639 total_len = sizeof(*data) + list_copy.sense_len;
640 alloc_len = scsi_4btoul(cdb->length);
642 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
644 ctsio->kern_sg_entries = 0;
646 if (total_len < alloc_len) {
647 ctsio->residual = alloc_len - total_len;
648 ctsio->kern_data_len = total_len;
649 ctsio->kern_total_len = total_len;
652 ctsio->kern_data_len = alloc_len;
653 ctsio->kern_total_len = alloc_len;
655 ctsio->kern_data_resid = 0;
656 ctsio->kern_rel_offset = 0;
658 data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr;
659 if (list_copy.completed && (list_copy.error || list_copy.abort)) {
660 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
661 data->available_data);
662 data->copy_command_status = RCS_CCS_ERROR;
664 scsi_ulto4b(0, data->available_data);
665 scsi_ulto2b(list_copy.sense_len, data->sense_data_length);
666 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
668 ctl_set_success(ctsio);
669 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
670 ctsio->be_move_done = ctl_config_move_done;
671 ctl_datamove((union ctl_io *)ctsio);
676 ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
679 struct scsi_receive_copy_status_lid4 *cdb;
680 struct scsi_receive_copy_status_lid4_data *data;
681 struct tpc_list *list;
682 struct tpc_list list_copy;
684 int alloc_len, total_len;
687 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n"));
689 cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb;
690 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
692 retval = CTL_RETVAL_COMPLETE;
694 list_id = scsi_4btoul(cdb->list_identifier);
695 mtx_lock(&lun->lun_lock);
696 list = tpc_find_list(lun, list_id,
697 ctl_get_initindex(&ctsio->io_hdr.nexus));
699 mtx_unlock(&lun->lun_lock);
700 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
701 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
703 ctl_done((union ctl_io *)ctsio);
707 if (list->completed) {
708 TAILQ_REMOVE(&lun->tpc_lists, list, links);
711 mtx_unlock(&lun->lun_lock);
713 total_len = sizeof(*data) + list_copy.sense_len;
714 alloc_len = scsi_4btoul(cdb->length);
716 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
718 ctsio->kern_sg_entries = 0;
720 if (total_len < alloc_len) {
721 ctsio->residual = alloc_len - total_len;
722 ctsio->kern_data_len = total_len;
723 ctsio->kern_total_len = total_len;
726 ctsio->kern_data_len = alloc_len;
727 ctsio->kern_total_len = alloc_len;
729 ctsio->kern_data_resid = 0;
730 ctsio->kern_rel_offset = 0;
732 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
733 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
734 data->available_data);
735 data->response_to_service_action = list_copy.service_action;
736 if (list_copy.completed) {
738 data->copy_command_status = RCS_CCS_ERROR;
739 else if (list_copy.abort)
740 data->copy_command_status = RCS_CCS_ABORTED;
742 data->copy_command_status = RCS_CCS_COMPLETED;
744 data->copy_command_status = RCS_CCS_INPROG_FG;
745 scsi_ulto2b(list_copy.curops, data->operation_counter);
746 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
747 data->transfer_count_units = RCS_TC_BYTES;
748 scsi_u64to8b(list_copy.curbytes, data->transfer_count);
749 scsi_ulto2b(list_copy.curseg, data->segments_processed);
750 data->length_of_the_sense_data_field = list_copy.sense_len;
751 data->sense_data_length = list_copy.sense_len;
752 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
754 ctl_set_success(ctsio);
755 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
756 ctsio->be_move_done = ctl_config_move_done;
757 ctl_datamove((union ctl_io *)ctsio);
762 ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
765 struct scsi_copy_operation_abort *cdb;
766 struct tpc_list *list;
770 CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n"));
772 cdb = (struct scsi_copy_operation_abort *)ctsio->cdb;
773 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
775 retval = CTL_RETVAL_COMPLETE;
777 list_id = scsi_4btoul(cdb->list_identifier);
778 mtx_lock(&lun->lun_lock);
779 list = tpc_find_list(lun, list_id,
780 ctl_get_initindex(&ctsio->io_hdr.nexus));
782 mtx_unlock(&lun->lun_lock);
783 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
784 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
786 ctl_done((union ctl_io *)ctsio);
790 mtx_unlock(&lun->lun_lock);
792 ctl_set_success(ctsio);
793 ctl_done((union ctl_io *)ctsio);
798 tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss,
799 uint32_t *pb, uint32_t *pbo)
803 if (ss && list->lun->be_lun)
804 *ss = list->lun->be_lun->blocksize;
805 if (pb && list->lun->be_lun)
806 *pb = list->lun->be_lun->blocksize <<
807 list->lun->be_lun->pblockexp;
808 if (pbo && list->lun->be_lun)
809 *pbo = list->lun->be_lun->blocksize *
810 list->lun->be_lun->pblockoff;
811 return (list->lun->lun);
813 if (idx >= list->ncscd)
815 return (tpcl_resolve(list->lun->ctl_softc,
816 list->init_port, &list->cscd[idx], ss, pb, pbo));
820 tpc_set_io_error_sense(struct tpc_list *list)
825 uint8_t fbuf[4 + 64];
827 scsi_ulto4b(list->curseg, csi);
828 if (list->fwd_cscd <= 0x07ff) {
829 sks[0] = SSD_SKS_SEGMENT_VALID;
830 scsi_ulto2b((uint8_t *)&list->cscd[list->fwd_cscd] -
831 list->params, &sks[1]);
834 if (list->fwd_scsi_status) {
836 fbuf[2] = list->fwd_target;
837 flen = list->fwd_sense_len;
840 fbuf[2] |= SSD_FORWARDED_FSDT;
843 fbuf[3] = list->fwd_scsi_status;
844 bcopy(&list->fwd_sense_data, &fbuf[4], flen);
848 ctl_set_sense(list->ctsio, /*current_error*/ 1,
849 /*sense_key*/ SSD_KEY_COPY_ABORTED,
850 /*asc*/ 0x0d, /*ascq*/ 0x01,
851 SSD_ELEM_COMMAND, sizeof(csi), csi,
852 sks[0] ? SSD_ELEM_SKS : SSD_ELEM_SKIP, sizeof(sks), sks,
853 flen ? SSD_ELEM_DESC : SSD_ELEM_SKIP, flen, fbuf,
858 tpc_process_b2b(struct tpc_list *list)
860 struct scsi_ec_segment_b2b *seg;
861 struct scsi_ec_cscd_dtsp *sdstp, *ddstp;
862 struct tpc_io *tior, *tiow;
865 off_t srclba, dstlba, numbytes, donebytes, roundbytes;
867 uint32_t srcblock, dstblock, pb, pbo, adj;
868 uint16_t scscd, dcscd;
871 scsi_ulto4b(list->curseg, csi);
872 if (list->stage == 1) {
873 while ((tior = TAILQ_FIRST(&list->allio)) != NULL) {
874 TAILQ_REMOVE(&list->allio, tior, links);
875 ctl_free_io(tior->io);
878 free(list->buf, M_CTL);
880 ctl_set_task_aborted(list->ctsio);
881 return (CTL_RETVAL_ERROR);
882 } else if (list->error) {
883 tpc_set_io_error_sense(list);
884 return (CTL_RETVAL_ERROR);
886 list->cursectors += list->segsectors;
887 list->curbytes += list->segbytes;
888 return (CTL_RETVAL_COMPLETE);
891 TAILQ_INIT(&list->allio);
892 seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg];
893 scscd = scsi_2btoul(seg->src_cscd);
894 dcscd = scsi_2btoul(seg->dst_cscd);
895 sl = tpc_resolve(list, scscd, &srcblock, NULL, NULL);
896 dl = tpc_resolve(list, dcscd, &dstblock, &pb, &pbo);
897 if (sl >= CTL_MAX_LUNS || dl >= CTL_MAX_LUNS) {
898 ctl_set_sense(list->ctsio, /*current_error*/ 1,
899 /*sense_key*/ SSD_KEY_COPY_ABORTED,
900 /*asc*/ 0x08, /*ascq*/ 0x04,
901 SSD_ELEM_COMMAND, sizeof(csi), csi,
903 return (CTL_RETVAL_ERROR);
907 sdstp = &list->cscd[scscd].dtsp;
908 if (scsi_3btoul(sdstp->block_length) != 0)
909 srcblock = scsi_3btoul(sdstp->block_length);
910 ddstp = &list->cscd[dcscd].dtsp;
911 if (scsi_3btoul(ddstp->block_length) != 0)
912 dstblock = scsi_3btoul(ddstp->block_length);
913 numlba = scsi_2btoul(seg->number_of_blocks);
914 if (seg->flags & EC_SEG_DC)
915 numbytes = (off_t)numlba * dstblock;
917 numbytes = (off_t)numlba * srcblock;
918 srclba = scsi_8btou64(seg->src_lba);
919 dstlba = scsi_8btou64(seg->dst_lba);
921 // printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n",
922 // (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba),
923 // dl, scsi_8btou64(seg->dst_lba));
926 return (CTL_RETVAL_COMPLETE);
928 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
929 ctl_set_sense(list->ctsio, /*current_error*/ 1,
930 /*sense_key*/ SSD_KEY_COPY_ABORTED,
931 /*asc*/ 0x26, /*ascq*/ 0x0A,
932 SSD_ELEM_COMMAND, sizeof(csi), csi,
934 return (CTL_RETVAL_ERROR);
937 list->buf = malloc(numbytes, M_CTL, M_WAITOK);
938 list->segbytes = numbytes;
939 list->segsectors = numbytes / dstblock;
943 while (donebytes < numbytes) {
944 roundbytes = numbytes - donebytes;
945 if (roundbytes > TPC_MAX_IO_SIZE) {
946 roundbytes = TPC_MAX_IO_SIZE;
947 roundbytes -= roundbytes % dstblock;
949 adj = (dstlba * dstblock + roundbytes - pbo) % pb;
950 if (roundbytes > adj)
955 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
956 TAILQ_INIT(&tior->run);
958 TAILQ_INSERT_TAIL(&list->allio, tior, links);
959 tior->io = tpcl_alloc_io();
960 ctl_scsi_read_write(tior->io,
961 /*data_ptr*/ &list->buf[donebytes],
962 /*data_len*/ roundbytes,
965 /*minimum_cdb_size*/ 0,
967 /*num_blocks*/ roundbytes / srcblock,
968 /*tag_type*/ CTL_TAG_SIMPLE,
970 tior->io->io_hdr.retries = 3;
971 tior->target = SSD_FORWARDED_SDS_EXSRC;
974 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
976 tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
977 TAILQ_INIT(&tiow->run);
979 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
980 tiow->io = tpcl_alloc_io();
981 ctl_scsi_read_write(tiow->io,
982 /*data_ptr*/ &list->buf[donebytes],
983 /*data_len*/ roundbytes,
986 /*minimum_cdb_size*/ 0,
988 /*num_blocks*/ roundbytes / dstblock,
989 /*tag_type*/ CTL_TAG_SIMPLE,
991 tiow->io->io_hdr.retries = 3;
992 tiow->target = SSD_FORWARDED_SDS_EXDST;
995 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
997 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
998 TAILQ_INSERT_TAIL(&run, tior, rlinks);
1000 donebytes += roundbytes;
1001 srclba += roundbytes / srcblock;
1002 dstlba += roundbytes / dstblock;
1005 while ((tior = TAILQ_FIRST(&run)) != NULL) {
1006 TAILQ_REMOVE(&run, tior, rlinks);
1007 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1008 panic("tpcl_queue() error");
1012 return (CTL_RETVAL_QUEUED);
1016 tpc_process_verify(struct tpc_list *list)
1018 struct scsi_ec_segment_verify *seg;
1024 scsi_ulto4b(list->curseg, csi);
1025 if (list->stage == 1) {
1026 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1027 TAILQ_REMOVE(&list->allio, tio, links);
1028 ctl_free_io(tio->io);
1032 ctl_set_task_aborted(list->ctsio);
1033 return (CTL_RETVAL_ERROR);
1034 } else if (list->error) {
1035 tpc_set_io_error_sense(list);
1036 return (CTL_RETVAL_ERROR);
1038 return (CTL_RETVAL_COMPLETE);
1041 TAILQ_INIT(&list->allio);
1042 seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg];
1043 cscd = scsi_2btoul(seg->src_cscd);
1044 sl = tpc_resolve(list, cscd, NULL, NULL, NULL);
1045 if (sl >= CTL_MAX_LUNS) {
1046 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1047 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1048 /*asc*/ 0x08, /*ascq*/ 0x04,
1049 SSD_ELEM_COMMAND, sizeof(csi), csi,
1051 return (CTL_RETVAL_ERROR);
1054 // printf("Verify %ju\n", sl);
1056 if ((seg->tur & 0x01) == 0)
1057 return (CTL_RETVAL_COMPLETE);
1060 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
1061 TAILQ_INIT(&tio->run);
1063 TAILQ_INSERT_TAIL(&list->allio, tio, links);
1064 tio->io = tpcl_alloc_io();
1065 ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
1066 tio->io->io_hdr.retries = 3;
1067 tio->target = SSD_FORWARDED_SDS_EXSRC;
1070 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
1072 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
1073 panic("tpcl_queue() error");
1074 return (CTL_RETVAL_QUEUED);
1078 tpc_process_register_key(struct tpc_list *list)
1080 struct scsi_ec_segment_register_key *seg;
1087 scsi_ulto4b(list->curseg, csi);
1088 if (list->stage == 1) {
1089 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1090 TAILQ_REMOVE(&list->allio, tio, links);
1091 ctl_free_io(tio->io);
1094 free(list->buf, M_CTL);
1096 ctl_set_task_aborted(list->ctsio);
1097 return (CTL_RETVAL_ERROR);
1098 } else if (list->error) {
1099 tpc_set_io_error_sense(list);
1100 return (CTL_RETVAL_ERROR);
1102 return (CTL_RETVAL_COMPLETE);
1105 TAILQ_INIT(&list->allio);
1106 seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg];
1107 cscd = scsi_2btoul(seg->dst_cscd);
1108 dl = tpc_resolve(list, cscd, NULL, NULL, NULL);
1109 if (dl >= CTL_MAX_LUNS) {
1110 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1111 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1112 /*asc*/ 0x08, /*ascq*/ 0x04,
1113 SSD_ELEM_COMMAND, sizeof(csi), csi,
1115 return (CTL_RETVAL_ERROR);
1118 // printf("Register Key %ju\n", dl);
1121 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
1122 TAILQ_INIT(&tio->run);
1124 TAILQ_INSERT_TAIL(&list->allio, tio, links);
1125 tio->io = tpcl_alloc_io();
1126 datalen = sizeof(struct scsi_per_res_out_parms);
1127 list->buf = malloc(datalen, M_CTL, M_WAITOK);
1128 ctl_scsi_persistent_res_out(tio->io,
1129 list->buf, datalen, SPRO_REGISTER, -1,
1130 scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key),
1131 /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
1132 tio->io->io_hdr.retries = 3;
1133 tio->target = SSD_FORWARDED_SDS_EXDST;
1136 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
1138 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
1139 panic("tpcl_queue() error");
1140 return (CTL_RETVAL_QUEUED);
1144 tpc_ranges_length(struct scsi_range_desc *range, int nrange)
1149 for (r = 0; r < nrange; r++)
1150 length += scsi_4btoul(range[r].length);
1155 tpc_check_ranges_l(struct scsi_range_desc *range, int nrange, uint64_t maxlba,
1162 for (i = 0; i < nrange; i++) {
1163 b1 = scsi_8btou64(range[i].lba);
1164 l1 = scsi_4btoul(range[i].length);
1165 if (b1 + l1 < b1 || b1 + l1 > maxlba + 1) {
1166 *lba = MAX(b1, maxlba + 1);
1174 tpc_check_ranges_x(struct scsi_range_desc *range, int nrange)
1180 for (i = 0; i < nrange - 1; i++) {
1181 b1 = scsi_8btou64(range[i].lba);
1182 l1 = scsi_4btoul(range[i].length);
1183 for (j = i + 1; j < nrange; j++) {
1184 b2 = scsi_8btou64(range[j].lba);
1185 l2 = scsi_4btoul(range[j].length);
1186 if (b1 + l1 > b2 && b2 + l2 > b1)
1194 tpc_skip_ranges(struct scsi_range_desc *range, int nrange, off_t skip,
1195 int *srange, off_t *soffset)
1202 while (r < nrange) {
1203 if (skip - off < scsi_4btoul(range[r].length)) {
1205 *soffset = skip - off;
1208 off += scsi_4btoul(range[r].length);
1215 tpc_process_wut(struct tpc_list *list)
1217 struct tpc_io *tio, *tior, *tiow;
1220 off_t doffset, soffset;
1221 off_t srclba, dstlba, numbytes, donebytes, roundbytes;
1222 uint32_t srcblock, dstblock, pb, pbo, adj;
1224 if (list->stage > 0) {
1225 /* Cleanup after previous rounds. */
1226 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1227 TAILQ_REMOVE(&list->allio, tio, links);
1228 ctl_free_io(tio->io);
1231 free(list->buf, M_CTL);
1233 ctl_set_task_aborted(list->ctsio);
1234 return (CTL_RETVAL_ERROR);
1235 } else if (list->error) {
1236 if (list->fwd_scsi_status) {
1237 list->ctsio->io_hdr.status =
1238 CTL_SCSI_ERROR | CTL_AUTOSENSE;
1239 list->ctsio->scsi_status = list->fwd_scsi_status;
1240 list->ctsio->sense_data = list->fwd_sense_data;
1241 list->ctsio->sense_len = list->fwd_sense_len;
1243 ctl_set_invalid_field(list->ctsio,
1244 /*sks_valid*/ 0, /*command*/ 0,
1245 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
1247 return (CTL_RETVAL_ERROR);
1249 list->cursectors += list->segsectors;
1250 list->curbytes += list->segbytes;
1253 /* Check where we are on destination ranges list. */
1254 if (tpc_skip_ranges(list->range, list->nrange, list->cursectors,
1255 &drange, &doffset) != 0)
1256 return (CTL_RETVAL_COMPLETE);
1257 dstblock = list->lun->be_lun->blocksize;
1258 pb = dstblock << list->lun->be_lun->pblockexp;
1259 if (list->lun->be_lun->pblockoff > 0)
1260 pbo = pb - dstblock * list->lun->be_lun->pblockoff;
1264 /* Check where we are on source ranges list. */
1265 srcblock = list->token->blocksize;
1266 if (tpc_skip_ranges(list->token->range, list->token->nrange,
1267 list->offset_into_rod + list->cursectors * dstblock / srcblock,
1268 &srange, &soffset) != 0) {
1269 ctl_set_invalid_field(list->ctsio, /*sks_valid*/ 0,
1270 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
1271 return (CTL_RETVAL_ERROR);
1274 srclba = scsi_8btou64(list->token->range[srange].lba) + soffset;
1275 dstlba = scsi_8btou64(list->range[drange].lba) + doffset;
1276 numbytes = srcblock *
1277 (scsi_4btoul(list->token->range[srange].length) - soffset);
1278 numbytes = omin(numbytes, dstblock *
1279 (scsi_4btoul(list->range[drange].length) - doffset));
1280 if (numbytes > TPC_MAX_IOCHUNK_SIZE) {
1281 numbytes = TPC_MAX_IOCHUNK_SIZE;
1282 numbytes -= numbytes % dstblock;
1283 if (pb > dstblock) {
1284 adj = (dstlba * dstblock + numbytes - pbo) % pb;
1290 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
1291 ctl_set_invalid_field(list->ctsio, /*sks_valid*/ 0,
1292 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
1293 return (CTL_RETVAL_ERROR);
1296 list->buf = malloc(numbytes, M_CTL, M_WAITOK |
1297 (list->token == NULL ? M_ZERO : 0));
1298 list->segbytes = numbytes;
1299 list->segsectors = numbytes / dstblock;
1300 //printf("Copy chunk of %ju sectors from %ju to %ju\n", list->segsectors,
1305 TAILQ_INIT(&list->allio);
1306 while (donebytes < numbytes) {
1307 roundbytes = numbytes - donebytes;
1308 if (roundbytes > TPC_MAX_IO_SIZE) {
1309 roundbytes = TPC_MAX_IO_SIZE;
1310 roundbytes -= roundbytes % dstblock;
1311 if (pb > dstblock) {
1312 adj = (dstlba * dstblock + roundbytes - pbo) % pb;
1313 if (roundbytes > adj)
1318 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
1319 TAILQ_INIT(&tior->run);
1321 TAILQ_INSERT_TAIL(&list->allio, tior, links);
1322 tior->io = tpcl_alloc_io();
1323 ctl_scsi_read_write(tior->io,
1324 /*data_ptr*/ &list->buf[donebytes],
1325 /*data_len*/ roundbytes,
1328 /*minimum_cdb_size*/ 0,
1330 /*num_blocks*/ roundbytes / srcblock,
1331 /*tag_type*/ CTL_TAG_SIMPLE,
1333 tior->io->io_hdr.retries = 3;
1334 tior->lun = list->token->lun;
1335 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
1337 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
1338 TAILQ_INIT(&tiow->run);
1340 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
1341 tiow->io = tpcl_alloc_io();
1342 ctl_scsi_read_write(tiow->io,
1343 /*data_ptr*/ &list->buf[donebytes],
1344 /*data_len*/ roundbytes,
1347 /*minimum_cdb_size*/ 0,
1349 /*num_blocks*/ roundbytes / dstblock,
1350 /*tag_type*/ CTL_TAG_SIMPLE,
1352 tiow->io->io_hdr.retries = 3;
1353 tiow->lun = list->lun->lun;
1354 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
1356 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
1357 TAILQ_INSERT_TAIL(&run, tior, rlinks);
1359 donebytes += roundbytes;
1360 srclba += roundbytes / srcblock;
1361 dstlba += roundbytes / dstblock;
1364 while ((tior = TAILQ_FIRST(&run)) != NULL) {
1365 TAILQ_REMOVE(&run, tior, rlinks);
1366 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1367 panic("tpcl_queue() error");
1371 return (CTL_RETVAL_QUEUED);
1375 tpc_process_zero_wut(struct tpc_list *list)
1377 struct tpc_io *tio, *tiow;
1378 struct runl run, *prun;
1380 uint32_t dstblock, len;
1382 if (list->stage > 0) {
1384 /* Cleanup after previous rounds. */
1385 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1386 TAILQ_REMOVE(&list->allio, tio, links);
1387 ctl_free_io(tio->io);
1391 ctl_set_task_aborted(list->ctsio);
1392 return (CTL_RETVAL_ERROR);
1393 } else if (list->error) {
1394 if (list->fwd_scsi_status) {
1395 list->ctsio->io_hdr.status =
1396 CTL_SCSI_ERROR | CTL_AUTOSENSE;
1397 list->ctsio->scsi_status = list->fwd_scsi_status;
1398 list->ctsio->sense_data = list->fwd_sense_data;
1399 list->ctsio->sense_len = list->fwd_sense_len;
1401 ctl_set_invalid_field(list->ctsio,
1402 /*sks_valid*/ 0, /*command*/ 0,
1403 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
1405 return (CTL_RETVAL_ERROR);
1407 list->cursectors += list->segsectors;
1408 list->curbytes += list->segbytes;
1409 return (CTL_RETVAL_COMPLETE);
1412 dstblock = list->lun->be_lun->blocksize;
1416 TAILQ_INIT(&list->allio);
1417 list->segsectors = 0;
1418 for (r = 0; r < list->nrange; r++) {
1419 len = scsi_4btoul(list->range[r].length);
1423 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
1424 TAILQ_INIT(&tiow->run);
1426 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
1427 tiow->io = tpcl_alloc_io();
1428 ctl_scsi_write_same(tiow->io,
1432 /*lba*/ scsi_8btou64(list->range[r].lba),
1434 /*tag_type*/ CTL_TAG_SIMPLE,
1436 tiow->io->io_hdr.retries = 3;
1437 tiow->lun = list->lun->lun;
1438 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
1440 TAILQ_INSERT_TAIL(prun, tiow, rlinks);
1442 list->segsectors += len;
1444 list->segbytes = list->segsectors * dstblock;
1446 if (TAILQ_EMPTY(&run))
1449 while ((tiow = TAILQ_FIRST(&run)) != NULL) {
1450 TAILQ_REMOVE(&run, tiow, rlinks);
1451 if (tpcl_queue(tiow->io, tiow->lun) != CTL_RETVAL_COMPLETE)
1452 panic("tpcl_queue() error");
1456 return (CTL_RETVAL_QUEUED);
1460 tpc_process(struct tpc_list *list)
1462 struct ctl_lun *lun = list->lun;
1463 struct ctl_softc *softc = lun->ctl_softc;
1464 struct scsi_ec_segment *seg;
1465 struct ctl_scsiio *ctsio = list->ctsio;
1466 int retval = CTL_RETVAL_COMPLETE;
1469 if (list->service_action == EC_WUT) {
1470 if (list->token != NULL)
1471 retval = tpc_process_wut(list);
1473 retval = tpc_process_zero_wut(list);
1474 if (retval == CTL_RETVAL_QUEUED)
1476 if (retval == CTL_RETVAL_ERROR) {
1481 //printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg);
1482 while (list->curseg < list->nseg) {
1483 seg = list->seg[list->curseg];
1484 switch (seg->type_code) {
1486 retval = tpc_process_b2b(list);
1489 retval = tpc_process_verify(list);
1491 case EC_SEG_REGISTER_KEY:
1492 retval = tpc_process_register_key(list);
1495 scsi_ulto4b(list->curseg, csi);
1496 ctl_set_sense(ctsio, /*current_error*/ 1,
1497 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1498 /*asc*/ 0x26, /*ascq*/ 0x09,
1499 SSD_ELEM_COMMAND, sizeof(csi), csi,
1503 if (retval == CTL_RETVAL_QUEUED)
1505 if (retval == CTL_RETVAL_ERROR) {
1514 ctl_set_success(ctsio);
1517 //printf("ZZZ done\n");
1518 free(list->params, M_CTL);
1519 list->params = NULL;
1521 mtx_lock(&softc->tpc_lock);
1522 if (--list->token->active == 0)
1523 list->token->last_active = time_uptime;
1524 mtx_unlock(&softc->tpc_lock);
1527 mtx_lock(&lun->lun_lock);
1528 if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) {
1529 TAILQ_REMOVE(&lun->tpc_lists, list, links);
1532 list->completed = 1;
1533 list->last_active = time_uptime;
1534 list->sense_data = ctsio->sense_data;
1535 list->sense_len = ctsio->sense_len;
1536 list->scsi_status = ctsio->scsi_status;
1538 mtx_unlock(&lun->lun_lock);
1540 ctl_done((union ctl_io *)ctsio);
1544 * For any sort of check condition, busy, etc., we just retry. We do not
1545 * decrement the retry count for unit attention type errors. These are
1546 * normal, and we want to save the retry count for "real" errors. Otherwise,
1547 * we could end up with situations where a command will succeed in some
1548 * situations and fail in others, depending on whether a unit attention is
1549 * pending. Also, some of our error recovery actions, most notably the
1550 * LUN reset action, will cause a unit attention.
1552 * We can add more detail here later if necessary.
1554 static tpc_error_action
1555 tpc_checkcond_parse(union ctl_io *io)
1557 tpc_error_action error_action;
1558 int error_code, sense_key, asc, ascq;
1561 * Default to retrying the command.
1563 error_action = TPC_ERR_RETRY;
1565 scsi_extract_sense_len(&io->scsiio.sense_data,
1566 io->scsiio.sense_len,
1573 switch (error_code) {
1574 case SSD_DEFERRED_ERROR:
1575 case SSD_DESC_DEFERRED_ERROR:
1576 error_action |= TPC_ERR_NO_DECREMENT;
1578 case SSD_CURRENT_ERROR:
1579 case SSD_DESC_CURRENT_ERROR:
1581 switch (sense_key) {
1582 case SSD_KEY_UNIT_ATTENTION:
1583 error_action |= TPC_ERR_NO_DECREMENT;
1585 case SSD_KEY_HARDWARE_ERROR:
1587 * This is our generic "something bad happened"
1588 * error code. It often isn't recoverable.
1590 if ((asc == 0x44) && (ascq == 0x00))
1591 error_action = TPC_ERR_FAIL;
1593 case SSD_KEY_NOT_READY:
1595 * If the LUN is powered down, there likely isn't
1596 * much point in retrying right now.
1598 if ((asc == 0x04) && (ascq == 0x02))
1599 error_action = TPC_ERR_FAIL;
1601 * If the LUN is offline, there probably isn't much
1602 * point in retrying, either.
1604 if ((asc == 0x04) && (ascq == 0x03))
1605 error_action = TPC_ERR_FAIL;
1609 return (error_action);
1612 static tpc_error_action
1613 tpc_error_parse(union ctl_io *io)
1615 tpc_error_action error_action = TPC_ERR_RETRY;
1617 switch (io->io_hdr.io_type) {
1619 switch (io->io_hdr.status & CTL_STATUS_MASK) {
1620 case CTL_SCSI_ERROR:
1621 switch (io->scsiio.scsi_status) {
1622 case SCSI_STATUS_CHECK_COND:
1623 error_action = tpc_checkcond_parse(io);
1636 panic("%s: invalid ctl_io type %d\n", __func__,
1637 io->io_hdr.io_type);
1640 return (error_action);
1644 tpc_done(union ctl_io *io)
1646 struct tpc_io *tio, *tior;
1649 * Very minimal retry logic. We basically retry if we got an error
1650 * back, and the retry count is greater than 0. If we ever want
1651 * more sophisticated initiator type behavior, the CAM error
1652 * recovery code in ../common might be helpful.
1654 tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1655 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1656 && (io->io_hdr.retries > 0)) {
1657 ctl_io_status old_status;
1658 tpc_error_action error_action;
1660 error_action = tpc_error_parse(io);
1661 switch (error_action & TPC_ERR_MASK) {
1666 if ((error_action & TPC_ERR_NO_DECREMENT) == 0)
1667 io->io_hdr.retries--;
1668 old_status = io->io_hdr.status;
1669 io->io_hdr.status = CTL_STATUS_NONE;
1670 io->io_hdr.flags &= ~CTL_FLAG_ABORT;
1671 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
1672 if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) {
1673 printf("%s: error returned from ctl_queue()!\n",
1675 io->io_hdr.status = old_status;
1681 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
1682 tio->list->error = 1;
1683 if (io->io_hdr.io_type == CTL_IO_SCSI &&
1684 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR) {
1685 tio->list->fwd_scsi_status = io->scsiio.scsi_status;
1686 tio->list->fwd_sense_data = io->scsiio.sense_data;
1687 tio->list->fwd_sense_len = io->scsiio.sense_len;
1688 tio->list->fwd_target = tio->target;
1689 tio->list->fwd_cscd = tio->cscd;
1692 atomic_add_int(&tio->list->curops, 1);
1693 if (!tio->list->error && !tio->list->abort) {
1694 while ((tior = TAILQ_FIRST(&tio->run)) != NULL) {
1695 TAILQ_REMOVE(&tio->run, tior, rlinks);
1696 atomic_add_int(&tio->list->tbdio, 1);
1697 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1698 panic("tpcl_queue() error");
1701 if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1)
1702 tpc_process(tio->list);
1706 ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
1708 struct scsi_extended_copy *cdb;
1709 struct scsi_extended_copy_lid1_data *data;
1710 struct scsi_ec_cscd *cscd;
1711 struct scsi_ec_segment *seg;
1712 struct ctl_lun *lun;
1713 struct tpc_list *list, *tlist;
1716 int len, off, lencscd, lenseg, leninl, nseg;
1718 CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n"));
1720 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1721 cdb = (struct scsi_extended_copy *)ctsio->cdb;
1722 len = scsi_4btoul(cdb->length);
1725 ctl_set_success(ctsio);
1728 if (len < sizeof(struct scsi_extended_copy_lid1_data) ||
1729 len > sizeof(struct scsi_extended_copy_lid1_data) +
1730 TPC_MAX_LIST + TPC_MAX_INLINE) {
1731 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1732 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1737 * If we've got a kernel request that hasn't been malloced yet,
1738 * malloc it and tell the caller the data buffer is here.
1740 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1741 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1742 ctsio->kern_data_len = len;
1743 ctsio->kern_total_len = len;
1744 ctsio->kern_data_resid = 0;
1745 ctsio->kern_rel_offset = 0;
1746 ctsio->kern_sg_entries = 0;
1747 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1748 ctsio->be_move_done = ctl_config_move_done;
1749 ctl_datamove((union ctl_io *)ctsio);
1751 return (CTL_RETVAL_COMPLETE);
1754 data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr;
1755 lencscd = scsi_2btoul(data->cscd_list_length);
1756 lenseg = scsi_4btoul(data->segment_list_length);
1757 leninl = scsi_4btoul(data->inline_data_length);
1758 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1759 ctl_set_sense(ctsio, /*current_error*/ 1,
1760 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1761 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1764 if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) {
1765 ctl_set_sense(ctsio, /*current_error*/ 1,
1766 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1767 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1770 if (lencscd + lenseg > TPC_MAX_LIST ||
1771 leninl > TPC_MAX_INLINE ||
1772 len < sizeof(struct scsi_extended_copy_lid1_data) +
1773 lencscd + lenseg + leninl) {
1774 ctl_set_param_len_error(ctsio);
1778 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1779 list->service_action = cdb->service_action;
1780 value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
1781 if (value != NULL && strcmp(value, "on") == 0)
1782 list->init_port = -1;
1784 list->init_port = ctsio->io_hdr.nexus.targ_port;
1785 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
1786 list->list_id = data->list_identifier;
1787 list->flags = data->flags;
1788 list->params = ctsio->kern_data_ptr;
1789 list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1790 ptr = &data->data[0];
1791 for (off = 0; off < lencscd; off += sizeof(struct scsi_ec_cscd)) {
1792 cscd = (struct scsi_ec_cscd *)(ptr + off);
1793 if (cscd->type_code != EC_CSCD_ID) {
1795 ctl_set_sense(ctsio, /*current_error*/ 1,
1796 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1797 /*asc*/ 0x26, /*ascq*/ 0x07, SSD_ELEM_NONE);
1801 ptr = &data->data[lencscd];
1802 for (nseg = 0, off = 0; off < lenseg; nseg++) {
1803 if (nseg >= TPC_MAX_SEGS) {
1805 ctl_set_sense(ctsio, /*current_error*/ 1,
1806 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1807 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1810 seg = (struct scsi_ec_segment *)(ptr + off);
1811 if (seg->type_code != EC_SEG_B2B &&
1812 seg->type_code != EC_SEG_VERIFY &&
1813 seg->type_code != EC_SEG_REGISTER_KEY) {
1815 ctl_set_sense(ctsio, /*current_error*/ 1,
1816 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1817 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
1820 list->seg[nseg] = seg;
1821 off += sizeof(struct scsi_ec_segment) +
1822 scsi_2btoul(seg->descr_length);
1824 list->inl = &data->data[lencscd + lenseg];
1825 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1827 list->leninl = leninl;
1828 list->ctsio = ctsio;
1830 mtx_lock(&lun->lun_lock);
1831 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1832 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1833 if (tlist != NULL && !tlist->completed) {
1834 mtx_unlock(&lun->lun_lock);
1836 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1837 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1841 if (tlist != NULL) {
1842 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1846 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1847 mtx_unlock(&lun->lun_lock);
1850 return (CTL_RETVAL_COMPLETE);
1853 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
1854 free(ctsio->kern_data_ptr, M_CTL);
1855 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
1857 ctl_done((union ctl_io *)ctsio);
1858 return (CTL_RETVAL_COMPLETE);
1862 ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
1864 struct scsi_extended_copy *cdb;
1865 struct scsi_extended_copy_lid4_data *data;
1866 struct scsi_ec_cscd *cscd;
1867 struct scsi_ec_segment *seg;
1868 struct ctl_lun *lun;
1869 struct tpc_list *list, *tlist;
1872 int len, off, lencscd, lenseg, leninl, nseg;
1874 CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n"));
1876 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1877 cdb = (struct scsi_extended_copy *)ctsio->cdb;
1878 len = scsi_4btoul(cdb->length);
1881 ctl_set_success(ctsio);
1884 if (len < sizeof(struct scsi_extended_copy_lid4_data) ||
1885 len > sizeof(struct scsi_extended_copy_lid4_data) +
1886 TPC_MAX_LIST + TPC_MAX_INLINE) {
1887 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1888 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1893 * If we've got a kernel request that hasn't been malloced yet,
1894 * malloc it and tell the caller the data buffer is here.
1896 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1897 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1898 ctsio->kern_data_len = len;
1899 ctsio->kern_total_len = len;
1900 ctsio->kern_data_resid = 0;
1901 ctsio->kern_rel_offset = 0;
1902 ctsio->kern_sg_entries = 0;
1903 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1904 ctsio->be_move_done = ctl_config_move_done;
1905 ctl_datamove((union ctl_io *)ctsio);
1907 return (CTL_RETVAL_COMPLETE);
1910 data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr;
1911 lencscd = scsi_2btoul(data->cscd_list_length);
1912 lenseg = scsi_2btoul(data->segment_list_length);
1913 leninl = scsi_2btoul(data->inline_data_length);
1914 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1915 ctl_set_sense(ctsio, /*current_error*/ 1,
1916 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1917 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1920 if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) {
1921 ctl_set_sense(ctsio, /*current_error*/ 1,
1922 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1923 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1926 if (lencscd + lenseg > TPC_MAX_LIST ||
1927 leninl > TPC_MAX_INLINE ||
1928 len < sizeof(struct scsi_extended_copy_lid1_data) +
1929 lencscd + lenseg + leninl) {
1930 ctl_set_param_len_error(ctsio);
1934 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1935 list->service_action = cdb->service_action;
1936 value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
1937 if (value != NULL && strcmp(value, "on") == 0)
1938 list->init_port = -1;
1940 list->init_port = ctsio->io_hdr.nexus.targ_port;
1941 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
1942 list->list_id = scsi_4btoul(data->list_identifier);
1943 list->flags = data->flags;
1944 list->params = ctsio->kern_data_ptr;
1945 list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1946 ptr = &data->data[0];
1947 for (off = 0; off < lencscd; off += sizeof(struct scsi_ec_cscd)) {
1948 cscd = (struct scsi_ec_cscd *)(ptr + off);
1949 if (cscd->type_code != EC_CSCD_ID) {
1951 ctl_set_sense(ctsio, /*current_error*/ 1,
1952 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1953 /*asc*/ 0x26, /*ascq*/ 0x07, SSD_ELEM_NONE);
1957 ptr = &data->data[lencscd];
1958 for (nseg = 0, off = 0; off < lenseg; nseg++) {
1959 if (nseg >= TPC_MAX_SEGS) {
1961 ctl_set_sense(ctsio, /*current_error*/ 1,
1962 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1963 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1966 seg = (struct scsi_ec_segment *)(ptr + off);
1967 if (seg->type_code != EC_SEG_B2B &&
1968 seg->type_code != EC_SEG_VERIFY &&
1969 seg->type_code != EC_SEG_REGISTER_KEY) {
1971 ctl_set_sense(ctsio, /*current_error*/ 1,
1972 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1973 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
1976 list->seg[nseg] = seg;
1977 off += sizeof(struct scsi_ec_segment) +
1978 scsi_2btoul(seg->descr_length);
1980 list->inl = &data->data[lencscd + lenseg];
1981 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1983 list->leninl = leninl;
1984 list->ctsio = ctsio;
1986 mtx_lock(&lun->lun_lock);
1987 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1988 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1989 if (tlist != NULL && !tlist->completed) {
1990 mtx_unlock(&lun->lun_lock);
1992 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1993 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1997 if (tlist != NULL) {
1998 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
2002 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
2003 mtx_unlock(&lun->lun_lock);
2006 return (CTL_RETVAL_COMPLETE);
2009 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
2010 free(ctsio->kern_data_ptr, M_CTL);
2011 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
2013 ctl_done((union ctl_io *)ctsio);
2014 return (CTL_RETVAL_COMPLETE);
2018 tpc_create_token(struct ctl_lun *lun, struct ctl_port *port, off_t len,
2019 struct scsi_token *token)
2022 struct scsi_vpd_id_descriptor *idd = NULL;
2023 struct scsi_ec_cscd_id *cscd;
2024 struct scsi_read_capacity_data_long *dtsd;
2027 scsi_ulto4b(ROD_TYPE_AUR, token->type);
2028 scsi_ulto2b(0x01f8, token->length);
2029 scsi_u64to8b(atomic_fetchadd_int(&id, 1), &token->body[0]);
2031 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
2032 lun->lun_devid->data, lun->lun_devid->len,
2033 scsi_devid_is_lun_naa);
2034 if (idd == NULL && lun->lun_devid)
2035 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
2036 lun->lun_devid->data, lun->lun_devid->len,
2037 scsi_devid_is_lun_eui64);
2039 cscd = (struct scsi_ec_cscd_id *)&token->body[8];
2040 cscd->type_code = EC_CSCD_ID;
2041 cscd->luidt_pdt = T_DIRECT;
2042 memcpy(&cscd->codeset, idd, 4 + idd->length);
2043 scsi_ulto3b(lun->be_lun->blocksize, cscd->dtsp.block_length);
2045 scsi_u64to8b(0, &token->body[40]); /* XXX: Should be 128bit value. */
2046 scsi_u64to8b(len, &token->body[48]);
2048 /* ROD token device type specific data (RC16 without first field) */
2049 dtsd = (struct scsi_read_capacity_data_long *)&token->body[88 - 8];
2050 scsi_ulto4b(lun->be_lun->blocksize, dtsd->length);
2051 dtsd->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE;
2052 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, dtsd->lalba_lbp);
2053 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
2054 dtsd->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ;
2056 if (port->target_devid) {
2057 targid_len = port->target_devid->len;
2058 memcpy(&token->body[120], port->target_devid->data, targid_len);
2061 arc4rand(&token->body[120 + targid_len], 384 - targid_len, 0);
2065 ctl_populate_token(struct ctl_scsiio *ctsio)
2067 struct scsi_populate_token *cdb;
2068 struct scsi_populate_token_data *data;
2069 struct ctl_softc *softc;
2070 struct ctl_lun *lun;
2071 struct ctl_port *port;
2072 struct tpc_list *list, *tlist;
2073 struct tpc_token *token;
2075 int len, lendata, lendesc;
2077 CTL_DEBUG_PRINT(("ctl_populate_token\n"));
2079 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
2080 softc = lun->ctl_softc;
2081 port = softc->ctl_ports[ctsio->io_hdr.nexus.targ_port];
2082 cdb = (struct scsi_populate_token *)ctsio->cdb;
2083 len = scsi_4btoul(cdb->length);
2085 if (len < sizeof(struct scsi_populate_token_data) ||
2086 len > sizeof(struct scsi_populate_token_data) +
2087 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
2088 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
2089 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
2094 * If we've got a kernel request that hasn't been malloced yet,
2095 * malloc it and tell the caller the data buffer is here.
2097 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
2098 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
2099 ctsio->kern_data_len = len;
2100 ctsio->kern_total_len = len;
2101 ctsio->kern_data_resid = 0;
2102 ctsio->kern_rel_offset = 0;
2103 ctsio->kern_sg_entries = 0;
2104 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2105 ctsio->be_move_done = ctl_config_move_done;
2106 ctl_datamove((union ctl_io *)ctsio);
2108 return (CTL_RETVAL_COMPLETE);
2111 data = (struct scsi_populate_token_data *)ctsio->kern_data_ptr;
2112 lendata = scsi_2btoul(data->length);
2113 if (lendata < sizeof(struct scsi_populate_token_data) - 2 +
2114 sizeof(struct scsi_range_desc)) {
2115 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2116 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
2119 lendesc = scsi_2btoul(data->range_descriptor_length);
2120 if (lendesc < sizeof(struct scsi_range_desc) ||
2121 len < sizeof(struct scsi_populate_token_data) + lendesc ||
2122 lendata < sizeof(struct scsi_populate_token_data) - 2 + lendesc) {
2123 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2124 /*field*/ 14, /*bit_valid*/ 0, /*bit*/ 0);
2128 printf("PT(list=%u) flags=%x to=%d rt=%x len=%x\n",
2129 scsi_4btoul(cdb->list_identifier),
2130 data->flags, scsi_4btoul(data->inactivity_timeout),
2131 scsi_4btoul(data->rod_type),
2132 scsi_2btoul(data->range_descriptor_length));
2135 /* Validate INACTIVITY TIMEOUT field */
2136 if (scsi_4btoul(data->inactivity_timeout) > TPC_MAX_TOKEN_TIMEOUT) {
2137 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2138 /*command*/ 0, /*field*/ 4, /*bit_valid*/ 0,
2143 /* Validate ROD TYPE field */
2144 if ((data->flags & EC_PT_RTV) &&
2145 scsi_4btoul(data->rod_type) != ROD_TYPE_AUR) {
2146 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2147 /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0);
2151 /* Validate list of ranges */
2152 if (tpc_check_ranges_l(&data->desc[0],
2153 scsi_2btoul(data->range_descriptor_length) /
2154 sizeof(struct scsi_range_desc),
2155 lun->be_lun->maxlba, &lba) != 0) {
2156 ctl_set_lba_out_of_range(ctsio, lba);
2159 if (tpc_check_ranges_x(&data->desc[0],
2160 scsi_2btoul(data->range_descriptor_length) /
2161 sizeof(struct scsi_range_desc)) != 0) {
2162 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0,
2163 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2168 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
2169 list->service_action = cdb->service_action;
2170 list->init_port = ctsio->io_hdr.nexus.targ_port;
2171 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
2172 list->list_id = scsi_4btoul(cdb->list_identifier);
2173 list->flags = data->flags;
2174 list->ctsio = ctsio;
2176 mtx_lock(&lun->lun_lock);
2177 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
2178 if (tlist != NULL && !tlist->completed) {
2179 mtx_unlock(&lun->lun_lock);
2181 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2182 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2186 if (tlist != NULL) {
2187 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
2190 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
2191 mtx_unlock(&lun->lun_lock);
2193 token = malloc(sizeof(*token), M_CTL, M_WAITOK | M_ZERO);
2194 token->lun = lun->lun;
2195 token->blocksize = lun->be_lun->blocksize;
2196 token->params = ctsio->kern_data_ptr;
2197 token->range = &data->desc[0];
2198 token->nrange = scsi_2btoul(data->range_descriptor_length) /
2199 sizeof(struct scsi_range_desc);
2200 list->cursectors = tpc_ranges_length(token->range, token->nrange);
2201 list->curbytes = (off_t)list->cursectors * lun->be_lun->blocksize;
2202 tpc_create_token(lun, port, list->curbytes,
2203 (struct scsi_token *)token->token);
2205 token->last_active = time_uptime;
2206 token->timeout = scsi_4btoul(data->inactivity_timeout);
2207 if (token->timeout == 0)
2208 token->timeout = TPC_DFL_TOKEN_TIMEOUT;
2209 else if (token->timeout < TPC_MIN_TOKEN_TIMEOUT)
2210 token->timeout = TPC_MIN_TOKEN_TIMEOUT;
2211 memcpy(list->res_token, token->token, sizeof(list->res_token));
2212 list->res_token_valid = 1;
2214 list->completed = 1;
2215 list->last_active = time_uptime;
2216 mtx_lock(&softc->tpc_lock);
2217 TAILQ_INSERT_TAIL(&softc->tpc_tokens, token, links);
2218 mtx_unlock(&softc->tpc_lock);
2219 ctl_set_success(ctsio);
2220 ctl_done((union ctl_io *)ctsio);
2221 return (CTL_RETVAL_COMPLETE);
2224 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
2225 free(ctsio->kern_data_ptr, M_CTL);
2226 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
2228 ctl_done((union ctl_io *)ctsio);
2229 return (CTL_RETVAL_COMPLETE);
2233 ctl_write_using_token(struct ctl_scsiio *ctsio)
2235 struct scsi_write_using_token *cdb;
2236 struct scsi_write_using_token_data *data;
2237 struct ctl_softc *softc;
2238 struct ctl_lun *lun;
2239 struct tpc_list *list, *tlist;
2240 struct tpc_token *token;
2242 int len, lendata, lendesc;
2244 CTL_DEBUG_PRINT(("ctl_write_using_token\n"));
2246 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
2247 softc = lun->ctl_softc;
2248 cdb = (struct scsi_write_using_token *)ctsio->cdb;
2249 len = scsi_4btoul(cdb->length);
2251 if (len < sizeof(struct scsi_write_using_token_data) ||
2252 len > sizeof(struct scsi_write_using_token_data) +
2253 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
2254 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
2255 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
2260 * If we've got a kernel request that hasn't been malloced yet,
2261 * malloc it and tell the caller the data buffer is here.
2263 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
2264 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
2265 ctsio->kern_data_len = len;
2266 ctsio->kern_total_len = len;
2267 ctsio->kern_data_resid = 0;
2268 ctsio->kern_rel_offset = 0;
2269 ctsio->kern_sg_entries = 0;
2270 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2271 ctsio->be_move_done = ctl_config_move_done;
2272 ctl_datamove((union ctl_io *)ctsio);
2274 return (CTL_RETVAL_COMPLETE);
2277 data = (struct scsi_write_using_token_data *)ctsio->kern_data_ptr;
2278 lendata = scsi_2btoul(data->length);
2279 if (lendata < sizeof(struct scsi_write_using_token_data) - 2 +
2280 sizeof(struct scsi_range_desc)) {
2281 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2282 /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0);
2285 lendesc = scsi_2btoul(data->range_descriptor_length);
2286 if (lendesc < sizeof(struct scsi_range_desc) ||
2287 len < sizeof(struct scsi_write_using_token_data) + lendesc ||
2288 lendata < sizeof(struct scsi_write_using_token_data) - 2 + lendesc) {
2289 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2290 /*field*/ 534, /*bit_valid*/ 0, /*bit*/ 0);
2294 printf("WUT(list=%u) flags=%x off=%ju len=%x\n",
2295 scsi_4btoul(cdb->list_identifier),
2296 data->flags, scsi_8btou64(data->offset_into_rod),
2297 scsi_2btoul(data->range_descriptor_length));
2300 /* Validate list of ranges */
2301 if (tpc_check_ranges_l(&data->desc[0],
2302 scsi_2btoul(data->range_descriptor_length) /
2303 sizeof(struct scsi_range_desc),
2304 lun->be_lun->maxlba, &lba) != 0) {
2305 ctl_set_lba_out_of_range(ctsio, lba);
2308 if (tpc_check_ranges_x(&data->desc[0],
2309 scsi_2btoul(data->range_descriptor_length) /
2310 sizeof(struct scsi_range_desc)) != 0) {
2311 ctl_set_invalid_field(ctsio, /*sks_valid*/ 0,
2312 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2317 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
2318 list->service_action = cdb->service_action;
2319 list->init_port = ctsio->io_hdr.nexus.targ_port;
2320 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
2321 list->list_id = scsi_4btoul(cdb->list_identifier);
2322 list->flags = data->flags;
2323 list->params = ctsio->kern_data_ptr;
2324 list->range = &data->desc[0];
2325 list->nrange = scsi_2btoul(data->range_descriptor_length) /
2326 sizeof(struct scsi_range_desc);
2327 list->offset_into_rod = scsi_8btou64(data->offset_into_rod);
2328 list->ctsio = ctsio;
2330 mtx_lock(&lun->lun_lock);
2331 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
2332 if (tlist != NULL && !tlist->completed) {
2333 mtx_unlock(&lun->lun_lock);
2335 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2336 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2340 if (tlist != NULL) {
2341 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
2344 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
2345 mtx_unlock(&lun->lun_lock);
2347 /* Block device zero ROD token -> no token. */
2348 if (scsi_4btoul(data->rod_token) == ROD_TYPE_BLOCK_ZERO) {
2350 return (CTL_RETVAL_COMPLETE);
2353 mtx_lock(&softc->tpc_lock);
2354 TAILQ_FOREACH(token, &softc->tpc_tokens, links) {
2355 if (memcmp(token->token, data->rod_token,
2356 sizeof(data->rod_token)) == 0)
2359 if (token != NULL) {
2361 list->token = token;
2362 if (data->flags & EC_WUT_DEL_TKN)
2365 mtx_unlock(&softc->tpc_lock);
2366 if (token == NULL) {
2367 mtx_lock(&lun->lun_lock);
2368 TAILQ_REMOVE(&lun->tpc_lists, list, links);
2369 mtx_unlock(&lun->lun_lock);
2371 ctl_set_sense(ctsio, /*current_error*/ 1,
2372 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
2373 /*asc*/ 0x23, /*ascq*/ 0x04, SSD_ELEM_NONE);
2378 return (CTL_RETVAL_COMPLETE);
2381 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
2382 free(ctsio->kern_data_ptr, M_CTL);
2383 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
2385 ctl_done((union ctl_io *)ctsio);
2386 return (CTL_RETVAL_COMPLETE);
2390 ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
2392 struct ctl_lun *lun;
2393 struct scsi_receive_rod_token_information *cdb;
2394 struct scsi_receive_copy_status_lid4_data *data;
2395 struct tpc_list *list;
2396 struct tpc_list list_copy;
2399 int alloc_len, total_len, token_len;
2402 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
2404 cdb = (struct scsi_receive_rod_token_information *)ctsio->cdb;
2405 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
2407 retval = CTL_RETVAL_COMPLETE;
2409 list_id = scsi_4btoul(cdb->list_identifier);
2410 mtx_lock(&lun->lun_lock);
2411 list = tpc_find_list(lun, list_id,
2412 ctl_get_initindex(&ctsio->io_hdr.nexus));
2414 mtx_unlock(&lun->lun_lock);
2415 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2416 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
2418 ctl_done((union ctl_io *)ctsio);
2422 if (list->completed) {
2423 TAILQ_REMOVE(&lun->tpc_lists, list, links);
2426 mtx_unlock(&lun->lun_lock);
2428 token_len = list_copy.res_token_valid ? 2 + sizeof(list_copy.res_token) : 0;
2429 total_len = sizeof(*data) + list_copy.sense_len + 4 + token_len;
2430 alloc_len = scsi_4btoul(cdb->length);
2432 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
2434 ctsio->kern_sg_entries = 0;
2436 if (total_len < alloc_len) {
2437 ctsio->residual = alloc_len - total_len;
2438 ctsio->kern_data_len = total_len;
2439 ctsio->kern_total_len = total_len;
2441 ctsio->residual = 0;
2442 ctsio->kern_data_len = alloc_len;
2443 ctsio->kern_total_len = alloc_len;
2445 ctsio->kern_data_resid = 0;
2446 ctsio->kern_rel_offset = 0;
2448 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
2449 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len +
2450 4 + token_len, data->available_data);
2451 data->response_to_service_action = list_copy.service_action;
2452 if (list_copy.completed) {
2453 if (list_copy.error)
2454 data->copy_command_status = RCS_CCS_ERROR;
2455 else if (list_copy.abort)
2456 data->copy_command_status = RCS_CCS_ABORTED;
2458 data->copy_command_status = RCS_CCS_COMPLETED;
2460 data->copy_command_status = RCS_CCS_INPROG_FG;
2461 scsi_ulto2b(list_copy.curops, data->operation_counter);
2462 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
2463 data->transfer_count_units = RCS_TC_LBAS;
2464 scsi_u64to8b(list_copy.cursectors, data->transfer_count);
2465 scsi_ulto2b(list_copy.curseg, data->segments_processed);
2466 data->length_of_the_sense_data_field = list_copy.sense_len;
2467 data->sense_data_length = list_copy.sense_len;
2468 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
2470 ptr = &data->sense_data[data->length_of_the_sense_data_field];
2471 scsi_ulto4b(token_len, &ptr[0]);
2472 if (list_copy.res_token_valid) {
2473 scsi_ulto2b(0, &ptr[4]);
2474 memcpy(&ptr[6], list_copy.res_token, sizeof(list_copy.res_token));
2477 printf("RRTI(list=%u) valid=%d\n",
2478 scsi_4btoul(cdb->list_identifier), list_copy.res_token_valid);
2480 ctl_set_success(ctsio);
2481 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2482 ctsio->be_move_done = ctl_config_move_done;
2483 ctl_datamove((union ctl_io *)ctsio);
2488 ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio)
2490 struct ctl_softc *softc;
2491 struct ctl_lun *lun;
2492 struct scsi_report_all_rod_tokens *cdb;
2493 struct scsi_report_all_rod_tokens_data *data;
2494 struct tpc_token *token;
2496 int alloc_len, total_len, tokens, i;
2498 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
2500 cdb = (struct scsi_report_all_rod_tokens *)ctsio->cdb;
2501 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
2502 softc = lun->ctl_softc;
2504 retval = CTL_RETVAL_COMPLETE;
2507 mtx_lock(&softc->tpc_lock);
2508 TAILQ_FOREACH(token, &softc->tpc_tokens, links)
2510 mtx_unlock(&softc->tpc_lock);
2514 total_len = sizeof(*data) + tokens * 96;
2515 alloc_len = scsi_4btoul(cdb->length);
2517 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
2519 ctsio->kern_sg_entries = 0;
2521 if (total_len < alloc_len) {
2522 ctsio->residual = alloc_len - total_len;
2523 ctsio->kern_data_len = total_len;
2524 ctsio->kern_total_len = total_len;
2526 ctsio->residual = 0;
2527 ctsio->kern_data_len = alloc_len;
2528 ctsio->kern_total_len = alloc_len;
2530 ctsio->kern_data_resid = 0;
2531 ctsio->kern_rel_offset = 0;
2533 data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr;
2535 mtx_lock(&softc->tpc_lock);
2536 TAILQ_FOREACH(token, &softc->tpc_tokens, links) {
2539 memcpy(&data->rod_management_token_list[i * 96],
2543 mtx_unlock(&softc->tpc_lock);
2544 scsi_ulto4b(sizeof(*data) - 4 + i * 96, data->available_data);
2546 printf("RART tokens=%d\n", i);
2548 ctl_set_success(ctsio);
2549 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2550 ctsio->be_move_done = ctl_config_move_done;
2551 ctl_datamove((union ctl_io *)ctsio);