2 * Copyright (c) 2014 Alexander Motin <mav@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/types.h>
35 #include <sys/module.h>
36 #include <sys/mutex.h>
37 #include <sys/condvar.h>
38 #include <sys/malloc.h>
40 #include <sys/queue.h>
41 #include <sys/sysctl.h>
42 #include <machine/atomic.h>
45 #include <cam/scsi/scsi_all.h>
46 #include <cam/scsi/scsi_da.h>
47 #include <cam/ctl/ctl_io.h>
48 #include <cam/ctl/ctl.h>
49 #include <cam/ctl/ctl_frontend.h>
50 #include <cam/ctl/ctl_util.h>
51 #include <cam/ctl/ctl_backend.h>
52 #include <cam/ctl/ctl_ioctl.h>
53 #include <cam/ctl/ctl_ha.h>
54 #include <cam/ctl/ctl_private.h>
55 #include <cam/ctl/ctl_debug.h>
56 #include <cam/ctl/ctl_scsi_all.h>
57 #include <cam/ctl/ctl_tpc.h>
58 #include <cam/ctl/ctl_error.h>
60 #define TPC_MAX_CSCDS 64
61 #define TPC_MAX_SEGS 64
63 #define TPC_MAX_LIST 8192
64 #define TPC_MAX_INLINE 0
65 #define TPC_MAX_LISTS 255
66 #define TPC_MAX_IO_SIZE (1024 * 1024)
67 #define TPC_MAX_IOCHUNK_SIZE (TPC_MAX_IO_SIZE * 16)
68 #define TPC_MIN_TOKEN_TIMEOUT 1
69 #define TPC_DFL_TOKEN_TIMEOUT 60
70 #define TPC_MAX_TOKEN_TIMEOUT 600
72 MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC");
75 TPC_ERR_RETRY = 0x000,
78 TPC_ERR_NO_DECREMENT = 0x100
82 TAILQ_HEAD(runl, tpc_io);
86 struct tpc_list *list;
88 TAILQ_ENTRY(tpc_io) rlinks;
89 TAILQ_ENTRY(tpc_io) links;
97 struct scsi_range_desc *range;
102 TAILQ_ENTRY(tpc_token) links;
106 uint8_t service_action;
112 struct scsi_ec_cscd *cscd;
113 struct scsi_ec_segment *seg[TPC_MAX_SEGS];
118 struct tpc_token *token;
119 struct scsi_range_desc *range;
121 off_t offset_into_rod;
136 TAILQ_HEAD(, tpc_io) allio;
137 struct scsi_sense_data sense_data;
140 struct ctl_scsiio *ctsio;
143 uint8_t res_token[512];
144 TAILQ_ENTRY(tpc_list) links;
148 tpc_timeout(void *arg)
150 struct ctl_softc *softc = arg;
152 struct tpc_token *token, *ttoken;
153 struct tpc_list *list, *tlist;
155 /* Free completed lists with expired timeout. */
156 STAILQ_FOREACH(lun, &softc->lun_list, links) {
157 mtx_lock(&lun->lun_lock);
158 TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) {
159 if (!list->completed || time_uptime < list->last_active +
160 TPC_DFL_TOKEN_TIMEOUT)
162 TAILQ_REMOVE(&lun->tpc_lists, list, links);
165 mtx_unlock(&lun->lun_lock);
168 /* Free inactive ROD tokens with expired timeout. */
169 mtx_lock(&softc->tpc_lock);
170 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
172 time_uptime < token->last_active + token->timeout + 1)
174 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
175 free(token->params, M_CTL);
178 mtx_unlock(&softc->tpc_lock);
179 callout_schedule(&softc->tpc_timeout, hz);
183 ctl_tpc_init(struct ctl_softc *softc)
186 mtx_init(&softc->tpc_lock, "CTL TPC mutex", NULL, MTX_DEF);
187 TAILQ_INIT(&softc->tpc_tokens);
188 callout_init_mtx(&softc->tpc_timeout, &softc->ctl_lock, 0);
189 callout_reset(&softc->tpc_timeout, hz, tpc_timeout, softc);
193 ctl_tpc_shutdown(struct ctl_softc *softc)
195 struct tpc_token *token;
197 callout_drain(&softc->tpc_timeout);
199 /* Free ROD tokens. */
200 mtx_lock(&softc->tpc_lock);
201 while ((token = TAILQ_FIRST(&softc->tpc_tokens)) != NULL) {
202 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
203 free(token->params, M_CTL);
206 mtx_unlock(&softc->tpc_lock);
207 mtx_destroy(&softc->tpc_lock);
211 ctl_tpc_lun_init(struct ctl_lun *lun)
214 TAILQ_INIT(&lun->tpc_lists);
218 ctl_tpc_lun_shutdown(struct ctl_lun *lun)
220 struct ctl_softc *softc = lun->ctl_softc;
221 struct tpc_list *list;
222 struct tpc_token *token, *ttoken;
224 /* Free lists for this LUN. */
225 while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) {
226 TAILQ_REMOVE(&lun->tpc_lists, list, links);
227 KASSERT(list->completed,
228 ("Not completed TPC (%p) on shutdown", list));
232 /* Free ROD tokens for this LUN. */
233 mtx_lock(&softc->tpc_lock);
234 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
235 if (token->lun != lun->lun || token->active)
237 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
238 free(token->params, M_CTL);
241 mtx_unlock(&softc->tpc_lock);
245 ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
247 struct scsi_vpd_tpc *tpc_ptr;
248 struct scsi_vpd_tpc_descriptor *d_ptr;
249 struct scsi_vpd_tpc_descriptor_bdrl *bdrl_ptr;
250 struct scsi_vpd_tpc_descriptor_sc *sc_ptr;
251 struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr;
252 struct scsi_vpd_tpc_descriptor_pd *pd_ptr;
253 struct scsi_vpd_tpc_descriptor_sd *sd_ptr;
254 struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr;
255 struct scsi_vpd_tpc_descriptor_rtf *rtf_ptr;
256 struct scsi_vpd_tpc_descriptor_rtf_block *rtfb_ptr;
257 struct scsi_vpd_tpc_descriptor_srt *srt_ptr;
258 struct scsi_vpd_tpc_descriptor_srtd *srtd_ptr;
259 struct scsi_vpd_tpc_descriptor_gco *gco_ptr;
263 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
265 data_len = sizeof(struct scsi_vpd_tpc) +
266 sizeof(struct scsi_vpd_tpc_descriptor_bdrl) +
267 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) +
268 2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 11, 4) +
269 sizeof(struct scsi_vpd_tpc_descriptor_pd) +
270 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) +
271 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) +
272 sizeof(struct scsi_vpd_tpc_descriptor_rtf) +
273 sizeof(struct scsi_vpd_tpc_descriptor_rtf_block) +
274 sizeof(struct scsi_vpd_tpc_descriptor_srt) +
275 2*sizeof(struct scsi_vpd_tpc_descriptor_srtd) +
276 sizeof(struct scsi_vpd_tpc_descriptor_gco);
278 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
279 tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr;
280 ctsio->kern_sg_entries = 0;
282 if (data_len < alloc_len) {
283 ctsio->residual = alloc_len - data_len;
284 ctsio->kern_data_len = data_len;
285 ctsio->kern_total_len = data_len;
288 ctsio->kern_data_len = alloc_len;
289 ctsio->kern_total_len = alloc_len;
291 ctsio->kern_data_resid = 0;
292 ctsio->kern_rel_offset = 0;
293 ctsio->kern_sg_entries = 0;
296 * The control device is always connected. The disk device, on the
297 * other hand, may not be online all the time.
300 tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
301 lun->be_lun->lun_type;
303 tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
304 tpc_ptr->page_code = SVPD_SCSI_TPC;
305 scsi_ulto2b(data_len - 4, tpc_ptr->page_length);
307 /* Block Device ROD Limits */
308 d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0];
309 bdrl_ptr = (struct scsi_vpd_tpc_descriptor_bdrl *)d_ptr;
310 scsi_ulto2b(SVPD_TPC_BDRL, bdrl_ptr->desc_type);
311 scsi_ulto2b(sizeof(*bdrl_ptr) - 4, bdrl_ptr->desc_length);
312 scsi_ulto2b(TPC_MAX_SEGS, bdrl_ptr->maximum_ranges);
313 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
314 bdrl_ptr->maximum_inactivity_timeout);
315 scsi_ulto4b(TPC_DFL_TOKEN_TIMEOUT,
316 bdrl_ptr->default_inactivity_timeout);
317 scsi_u64to8b(0, bdrl_ptr->maximum_token_transfer_size);
318 scsi_u64to8b(0, bdrl_ptr->optimal_transfer_count);
320 /* Supported commands */
321 d_ptr = (struct scsi_vpd_tpc_descriptor *)
322 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
323 sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr;
324 scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type);
325 sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 11;
326 scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length);
327 scd_ptr = &sc_ptr->descr[0];
328 scd_ptr->opcode = EXTENDED_COPY;
329 scd_ptr->sa_length = 5;
330 scd_ptr->supported_service_actions[0] = EC_EC_LID1;
331 scd_ptr->supported_service_actions[1] = EC_EC_LID4;
332 scd_ptr->supported_service_actions[2] = EC_PT;
333 scd_ptr->supported_service_actions[3] = EC_WUT;
334 scd_ptr->supported_service_actions[4] = EC_COA;
335 scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *)
336 &scd_ptr->supported_service_actions[scd_ptr->sa_length];
337 scd_ptr->opcode = RECEIVE_COPY_STATUS;
338 scd_ptr->sa_length = 6;
339 scd_ptr->supported_service_actions[0] = RCS_RCS_LID1;
340 scd_ptr->supported_service_actions[1] = RCS_RCFD;
341 scd_ptr->supported_service_actions[2] = RCS_RCS_LID4;
342 scd_ptr->supported_service_actions[3] = RCS_RCOP;
343 scd_ptr->supported_service_actions[4] = RCS_RRTI;
344 scd_ptr->supported_service_actions[5] = RCS_RART;
346 /* Parameter data. */
347 d_ptr = (struct scsi_vpd_tpc_descriptor *)
348 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
349 pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr;
350 scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type);
351 scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length);
352 scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count);
353 scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count);
354 scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length);
355 scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length);
357 /* Supported Descriptors */
358 d_ptr = (struct scsi_vpd_tpc_descriptor *)
359 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
360 sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr;
361 scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type);
362 scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length);
363 sd_ptr->list_length = 4;
364 sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B;
365 sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY;
366 sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY;
367 sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID;
369 /* Supported CSCD Descriptor IDs */
370 d_ptr = (struct scsi_vpd_tpc_descriptor *)
371 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
372 sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr;
373 scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type);
374 scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length);
375 scsi_ulto2b(2, sdid_ptr->list_length);
376 scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]);
378 /* ROD Token Features */
379 d_ptr = (struct scsi_vpd_tpc_descriptor *)
380 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
381 rtf_ptr = (struct scsi_vpd_tpc_descriptor_rtf *)d_ptr;
382 scsi_ulto2b(SVPD_TPC_RTF, rtf_ptr->desc_type);
383 scsi_ulto2b(sizeof(*rtf_ptr) - 4 + sizeof(*rtfb_ptr), rtf_ptr->desc_length);
384 rtf_ptr->remote_tokens = 0;
385 scsi_ulto4b(TPC_MIN_TOKEN_TIMEOUT, rtf_ptr->minimum_token_lifetime);
386 scsi_ulto4b(UINT32_MAX, rtf_ptr->maximum_token_lifetime);
387 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
388 rtf_ptr->maximum_token_inactivity_timeout);
389 scsi_ulto2b(sizeof(*rtfb_ptr), rtf_ptr->type_specific_features_length);
390 rtfb_ptr = (struct scsi_vpd_tpc_descriptor_rtf_block *)
391 &rtf_ptr->type_specific_features;
392 rtfb_ptr->type_format = SVPD_TPC_RTF_BLOCK;
393 scsi_ulto2b(sizeof(*rtfb_ptr) - 4, rtfb_ptr->desc_length);
394 scsi_ulto2b(0, rtfb_ptr->optimal_length_granularity);
395 scsi_u64to8b(0, rtfb_ptr->maximum_bytes);
396 scsi_u64to8b(0, rtfb_ptr->optimal_bytes);
397 scsi_u64to8b(UINT64_MAX, rtfb_ptr->optimal_bytes_to_token_per_segment);
398 scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE,
399 rtfb_ptr->optimal_bytes_from_token_per_segment);
401 /* Supported ROD Tokens */
402 d_ptr = (struct scsi_vpd_tpc_descriptor *)
403 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
404 srt_ptr = (struct scsi_vpd_tpc_descriptor_srt *)d_ptr;
405 scsi_ulto2b(SVPD_TPC_SRT, srt_ptr->desc_type);
406 scsi_ulto2b(sizeof(*srt_ptr) - 4 + 2*sizeof(*srtd_ptr), srt_ptr->desc_length);
407 scsi_ulto2b(2*sizeof(*srtd_ptr), srt_ptr->rod_type_descriptors_length);
408 srtd_ptr = (struct scsi_vpd_tpc_descriptor_srtd *)
409 &srt_ptr->rod_type_descriptors;
410 scsi_ulto4b(ROD_TYPE_AUR, srtd_ptr->rod_type);
411 srtd_ptr->flags = SVPD_TPC_SRTD_TIN | SVPD_TPC_SRTD_TOUT;
412 scsi_ulto2b(0, srtd_ptr->preference_indicator);
414 scsi_ulto4b(ROD_TYPE_BLOCK_ZERO, srtd_ptr->rod_type);
415 srtd_ptr->flags = SVPD_TPC_SRTD_TIN;
416 scsi_ulto2b(0, srtd_ptr->preference_indicator);
418 /* General Copy Operations */
419 d_ptr = (struct scsi_vpd_tpc_descriptor *)
420 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
421 gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr;
422 scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type);
423 scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length);
424 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies);
425 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies);
426 scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length);
427 gco_ptr->data_segment_granularity = 0;
428 gco_ptr->inline_data_granularity = 0;
430 ctl_set_success(ctsio);
431 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
432 ctsio->be_move_done = ctl_config_move_done;
433 ctl_datamove((union ctl_io *)ctsio);
435 return (CTL_RETVAL_COMPLETE);
439 ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
441 struct scsi_receive_copy_operating_parameters *cdb;
442 struct scsi_receive_copy_operating_parameters_data *data;
444 int alloc_len, total_len;
446 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
448 cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb;
450 retval = CTL_RETVAL_COMPLETE;
452 total_len = sizeof(*data) + 4;
453 alloc_len = scsi_4btoul(cdb->length);
455 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
457 ctsio->kern_sg_entries = 0;
459 if (total_len < alloc_len) {
460 ctsio->residual = alloc_len - total_len;
461 ctsio->kern_data_len = total_len;
462 ctsio->kern_total_len = total_len;
465 ctsio->kern_data_len = alloc_len;
466 ctsio->kern_total_len = alloc_len;
468 ctsio->kern_data_resid = 0;
469 ctsio->kern_rel_offset = 0;
471 data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr;
472 scsi_ulto4b(sizeof(*data) - 4 + 4, data->length);
473 data->snlid = RCOP_SNLID;
474 scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count);
475 scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count);
476 scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length);
477 scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length);
478 scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length);
479 scsi_ulto4b(0, data->held_data_limit);
480 scsi_ulto4b(0, data->maximum_stream_device_transfer_size);
481 scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies);
482 data->maximum_concurrent_copies = TPC_MAX_LISTS;
483 data->data_segment_granularity = 0;
484 data->inline_data_granularity = 0;
485 data->held_data_granularity = 0;
486 data->implemented_descriptor_list_length = 4;
487 data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B;
488 data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY;
489 data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY;
490 data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID;
492 ctl_set_success(ctsio);
493 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
494 ctsio->be_move_done = ctl_config_move_done;
495 ctl_datamove((union ctl_io *)ctsio);
499 static struct tpc_list *
500 tpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx)
502 struct tpc_list *list;
504 mtx_assert(&lun->lun_lock, MA_OWNED);
505 TAILQ_FOREACH(list, &lun->tpc_lists, links) {
506 if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
507 EC_LIST_ID_USAGE_NONE && list->list_id == list_id &&
508 list->init_idx == init_idx)
515 ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
518 struct scsi_receive_copy_status_lid1 *cdb;
519 struct scsi_receive_copy_status_lid1_data *data;
520 struct tpc_list *list;
521 struct tpc_list list_copy;
523 int alloc_len, total_len;
526 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n"));
528 cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb;
529 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
531 retval = CTL_RETVAL_COMPLETE;
533 list_id = cdb->list_identifier;
534 mtx_lock(&lun->lun_lock);
535 list = tpc_find_list(lun, list_id,
536 ctl_get_initindex(&ctsio->io_hdr.nexus));
538 mtx_unlock(&lun->lun_lock);
539 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
540 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
542 ctl_done((union ctl_io *)ctsio);
546 if (list->completed) {
547 TAILQ_REMOVE(&lun->tpc_lists, list, links);
550 mtx_unlock(&lun->lun_lock);
552 total_len = sizeof(*data);
553 alloc_len = scsi_4btoul(cdb->length);
555 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
557 ctsio->kern_sg_entries = 0;
559 if (total_len < alloc_len) {
560 ctsio->residual = alloc_len - total_len;
561 ctsio->kern_data_len = total_len;
562 ctsio->kern_total_len = total_len;
565 ctsio->kern_data_len = alloc_len;
566 ctsio->kern_total_len = alloc_len;
568 ctsio->kern_data_resid = 0;
569 ctsio->kern_rel_offset = 0;
571 data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr;
572 scsi_ulto4b(sizeof(*data) - 4, data->available_data);
573 if (list_copy.completed) {
574 if (list_copy.error || list_copy.abort)
575 data->copy_command_status = RCS_CCS_ERROR;
577 data->copy_command_status = RCS_CCS_COMPLETED;
579 data->copy_command_status = RCS_CCS_INPROG;
580 scsi_ulto2b(list_copy.curseg, data->segments_processed);
581 if (list_copy.curbytes <= UINT32_MAX) {
582 data->transfer_count_units = RCS_TC_BYTES;
583 scsi_ulto4b(list_copy.curbytes, data->transfer_count);
585 data->transfer_count_units = RCS_TC_MBYTES;
586 scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count);
589 ctl_set_success(ctsio);
590 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
591 ctsio->be_move_done = ctl_config_move_done;
592 ctl_datamove((union ctl_io *)ctsio);
597 ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
600 struct scsi_receive_copy_failure_details *cdb;
601 struct scsi_receive_copy_failure_details_data *data;
602 struct tpc_list *list;
603 struct tpc_list list_copy;
605 int alloc_len, total_len;
608 CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n"));
610 cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb;
611 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
613 retval = CTL_RETVAL_COMPLETE;
615 list_id = cdb->list_identifier;
616 mtx_lock(&lun->lun_lock);
617 list = tpc_find_list(lun, list_id,
618 ctl_get_initindex(&ctsio->io_hdr.nexus));
619 if (list == NULL || !list->completed) {
620 mtx_unlock(&lun->lun_lock);
621 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
622 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
624 ctl_done((union ctl_io *)ctsio);
628 TAILQ_REMOVE(&lun->tpc_lists, list, links);
630 mtx_unlock(&lun->lun_lock);
632 total_len = sizeof(*data) + list_copy.sense_len;
633 alloc_len = scsi_4btoul(cdb->length);
635 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
637 ctsio->kern_sg_entries = 0;
639 if (total_len < alloc_len) {
640 ctsio->residual = alloc_len - total_len;
641 ctsio->kern_data_len = total_len;
642 ctsio->kern_total_len = total_len;
645 ctsio->kern_data_len = alloc_len;
646 ctsio->kern_total_len = alloc_len;
648 ctsio->kern_data_resid = 0;
649 ctsio->kern_rel_offset = 0;
651 data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr;
652 if (list_copy.completed && (list_copy.error || list_copy.abort)) {
653 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
654 data->available_data);
655 data->copy_command_status = RCS_CCS_ERROR;
657 scsi_ulto4b(0, data->available_data);
658 scsi_ulto2b(list_copy.sense_len, data->sense_data_length);
659 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
661 ctl_set_success(ctsio);
662 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
663 ctsio->be_move_done = ctl_config_move_done;
664 ctl_datamove((union ctl_io *)ctsio);
669 ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
672 struct scsi_receive_copy_status_lid4 *cdb;
673 struct scsi_receive_copy_status_lid4_data *data;
674 struct tpc_list *list;
675 struct tpc_list list_copy;
677 int alloc_len, total_len;
680 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n"));
682 cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb;
683 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
685 retval = CTL_RETVAL_COMPLETE;
687 list_id = scsi_4btoul(cdb->list_identifier);
688 mtx_lock(&lun->lun_lock);
689 list = tpc_find_list(lun, list_id,
690 ctl_get_initindex(&ctsio->io_hdr.nexus));
692 mtx_unlock(&lun->lun_lock);
693 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
694 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
696 ctl_done((union ctl_io *)ctsio);
700 if (list->completed) {
701 TAILQ_REMOVE(&lun->tpc_lists, list, links);
704 mtx_unlock(&lun->lun_lock);
706 total_len = sizeof(*data) + list_copy.sense_len;
707 alloc_len = scsi_4btoul(cdb->length);
709 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
711 ctsio->kern_sg_entries = 0;
713 if (total_len < alloc_len) {
714 ctsio->residual = alloc_len - total_len;
715 ctsio->kern_data_len = total_len;
716 ctsio->kern_total_len = total_len;
719 ctsio->kern_data_len = alloc_len;
720 ctsio->kern_total_len = alloc_len;
722 ctsio->kern_data_resid = 0;
723 ctsio->kern_rel_offset = 0;
725 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
726 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
727 data->available_data);
728 data->response_to_service_action = list_copy.service_action;
729 if (list_copy.completed) {
731 data->copy_command_status = RCS_CCS_ERROR;
732 else if (list_copy.abort)
733 data->copy_command_status = RCS_CCS_ABORTED;
735 data->copy_command_status = RCS_CCS_COMPLETED;
737 data->copy_command_status = RCS_CCS_INPROG_FG;
738 scsi_ulto2b(list_copy.curops, data->operation_counter);
739 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
740 data->transfer_count_units = RCS_TC_BYTES;
741 scsi_u64to8b(list_copy.curbytes, data->transfer_count);
742 scsi_ulto2b(list_copy.curseg, data->segments_processed);
743 data->length_of_the_sense_data_field = list_copy.sense_len;
744 data->sense_data_length = list_copy.sense_len;
745 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
747 ctl_set_success(ctsio);
748 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
749 ctsio->be_move_done = ctl_config_move_done;
750 ctl_datamove((union ctl_io *)ctsio);
755 ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
758 struct scsi_copy_operation_abort *cdb;
759 struct tpc_list *list;
763 CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n"));
765 cdb = (struct scsi_copy_operation_abort *)ctsio->cdb;
766 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
768 retval = CTL_RETVAL_COMPLETE;
770 list_id = scsi_4btoul(cdb->list_identifier);
771 mtx_lock(&lun->lun_lock);
772 list = tpc_find_list(lun, list_id,
773 ctl_get_initindex(&ctsio->io_hdr.nexus));
775 mtx_unlock(&lun->lun_lock);
776 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
777 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
779 ctl_done((union ctl_io *)ctsio);
783 mtx_unlock(&lun->lun_lock);
785 ctl_set_success(ctsio);
786 ctl_done((union ctl_io *)ctsio);
791 tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss,
792 uint32_t *pb, uint32_t *pbo)
796 if (ss && list->lun->be_lun)
797 *ss = list->lun->be_lun->blocksize;
798 if (pb && list->lun->be_lun)
799 *pb = list->lun->be_lun->blocksize <<
800 list->lun->be_lun->pblockexp;
801 if (pbo && list->lun->be_lun)
802 *pbo = list->lun->be_lun->blocksize *
803 list->lun->be_lun->pblockoff;
804 return (list->lun->lun);
806 if (idx >= list->ncscd)
808 return (tpcl_resolve(list->lun->ctl_softc,
809 list->init_port, &list->cscd[idx], ss, pb, pbo));
813 tpc_process_b2b(struct tpc_list *list)
815 struct scsi_ec_segment_b2b *seg;
816 struct scsi_ec_cscd_dtsp *sdstp, *ddstp;
817 struct tpc_io *tior, *tiow;
820 off_t srclba, dstlba, numbytes, donebytes, roundbytes;
822 uint32_t srcblock, dstblock, pb, pbo, adj;
825 scsi_ulto4b(list->curseg, csi);
826 if (list->stage == 1) {
827 while ((tior = TAILQ_FIRST(&list->allio)) != NULL) {
828 TAILQ_REMOVE(&list->allio, tior, links);
829 ctl_free_io(tior->io);
832 free(list->buf, M_CTL);
834 ctl_set_task_aborted(list->ctsio);
835 return (CTL_RETVAL_ERROR);
836 } else if (list->error) {
837 ctl_set_sense(list->ctsio, /*current_error*/ 1,
838 /*sense_key*/ SSD_KEY_COPY_ABORTED,
839 /*asc*/ 0x0d, /*ascq*/ 0x01,
840 SSD_ELEM_COMMAND, csi, sizeof(csi),
842 return (CTL_RETVAL_ERROR);
844 list->cursectors += list->segsectors;
845 list->curbytes += list->segbytes;
846 return (CTL_RETVAL_COMPLETE);
849 TAILQ_INIT(&list->allio);
850 seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg];
851 sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), &srcblock, NULL, NULL);
852 dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), &dstblock, &pb, &pbo);
853 if (sl >= CTL_MAX_LUNS || dl >= CTL_MAX_LUNS) {
854 ctl_set_sense(list->ctsio, /*current_error*/ 1,
855 /*sense_key*/ SSD_KEY_COPY_ABORTED,
856 /*asc*/ 0x08, /*ascq*/ 0x04,
857 SSD_ELEM_COMMAND, csi, sizeof(csi),
859 return (CTL_RETVAL_ERROR);
863 sdstp = &list->cscd[scsi_2btoul(seg->src_cscd)].dtsp;
864 if (scsi_3btoul(sdstp->block_length) != 0)
865 srcblock = scsi_3btoul(sdstp->block_length);
866 ddstp = &list->cscd[scsi_2btoul(seg->dst_cscd)].dtsp;
867 if (scsi_3btoul(ddstp->block_length) != 0)
868 dstblock = scsi_3btoul(ddstp->block_length);
869 numlba = scsi_2btoul(seg->number_of_blocks);
870 if (seg->flags & EC_SEG_DC)
871 numbytes = (off_t)numlba * dstblock;
873 numbytes = (off_t)numlba * srcblock;
874 srclba = scsi_8btou64(seg->src_lba);
875 dstlba = scsi_8btou64(seg->dst_lba);
877 // printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n",
878 // (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba),
879 // dl, scsi_8btou64(seg->dst_lba));
882 return (CTL_RETVAL_COMPLETE);
884 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
885 ctl_set_sense(list->ctsio, /*current_error*/ 1,
886 /*sense_key*/ SSD_KEY_COPY_ABORTED,
887 /*asc*/ 0x26, /*ascq*/ 0x0A,
888 SSD_ELEM_COMMAND, csi, sizeof(csi),
890 return (CTL_RETVAL_ERROR);
893 list->buf = malloc(numbytes, M_CTL, M_WAITOK);
894 list->segbytes = numbytes;
895 list->segsectors = numbytes / dstblock;
899 while (donebytes < numbytes) {
900 roundbytes = numbytes - donebytes;
901 if (roundbytes > TPC_MAX_IO_SIZE) {
902 roundbytes = TPC_MAX_IO_SIZE;
903 roundbytes -= roundbytes % dstblock;
905 adj = (dstlba * dstblock + roundbytes - pbo) % pb;
906 if (roundbytes > adj)
911 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
912 TAILQ_INIT(&tior->run);
914 TAILQ_INSERT_TAIL(&list->allio, tior, links);
915 tior->io = tpcl_alloc_io();
916 ctl_scsi_read_write(tior->io,
917 /*data_ptr*/ &list->buf[donebytes],
918 /*data_len*/ roundbytes,
921 /*minimum_cdb_size*/ 0,
923 /*num_blocks*/ roundbytes / srcblock,
924 /*tag_type*/ CTL_TAG_SIMPLE,
926 tior->io->io_hdr.retries = 3;
928 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
930 tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
931 TAILQ_INIT(&tiow->run);
933 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
934 tiow->io = tpcl_alloc_io();
935 ctl_scsi_read_write(tiow->io,
936 /*data_ptr*/ &list->buf[donebytes],
937 /*data_len*/ roundbytes,
940 /*minimum_cdb_size*/ 0,
942 /*num_blocks*/ roundbytes / dstblock,
943 /*tag_type*/ CTL_TAG_SIMPLE,
945 tiow->io->io_hdr.retries = 3;
947 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
949 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
950 TAILQ_INSERT_TAIL(&run, tior, rlinks);
952 donebytes += roundbytes;
953 srclba += roundbytes / srcblock;
954 dstlba += roundbytes / dstblock;
957 while ((tior = TAILQ_FIRST(&run)) != NULL) {
958 TAILQ_REMOVE(&run, tior, rlinks);
959 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
960 panic("tpcl_queue() error");
964 return (CTL_RETVAL_QUEUED);
968 tpc_process_verify(struct tpc_list *list)
970 struct scsi_ec_segment_verify *seg;
975 scsi_ulto4b(list->curseg, csi);
976 if (list->stage == 1) {
977 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
978 TAILQ_REMOVE(&list->allio, tio, links);
979 ctl_free_io(tio->io);
983 ctl_set_task_aborted(list->ctsio);
984 return (CTL_RETVAL_ERROR);
985 } else if (list->error) {
986 ctl_set_sense(list->ctsio, /*current_error*/ 1,
987 /*sense_key*/ SSD_KEY_COPY_ABORTED,
988 /*asc*/ 0x0d, /*ascq*/ 0x01,
989 SSD_ELEM_COMMAND, csi, sizeof(csi),
991 return (CTL_RETVAL_ERROR);
993 return (CTL_RETVAL_COMPLETE);
996 TAILQ_INIT(&list->allio);
997 seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg];
998 sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), NULL, NULL, NULL);
999 if (sl >= CTL_MAX_LUNS) {
1000 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1001 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1002 /*asc*/ 0x08, /*ascq*/ 0x04,
1003 SSD_ELEM_COMMAND, csi, sizeof(csi),
1005 return (CTL_RETVAL_ERROR);
1008 // printf("Verify %ju\n", sl);
1010 if ((seg->tur & 0x01) == 0)
1011 return (CTL_RETVAL_COMPLETE);
1014 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
1015 TAILQ_INIT(&tio->run);
1017 TAILQ_INSERT_TAIL(&list->allio, tio, links);
1018 tio->io = tpcl_alloc_io();
1019 ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
1020 tio->io->io_hdr.retries = 3;
1022 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
1024 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
1025 panic("tpcl_queue() error");
1026 return (CTL_RETVAL_QUEUED);
1030 tpc_process_register_key(struct tpc_list *list)
1032 struct scsi_ec_segment_register_key *seg;
1038 scsi_ulto4b(list->curseg, csi);
1039 if (list->stage == 1) {
1040 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1041 TAILQ_REMOVE(&list->allio, tio, links);
1042 ctl_free_io(tio->io);
1045 free(list->buf, M_CTL);
1047 ctl_set_task_aborted(list->ctsio);
1048 return (CTL_RETVAL_ERROR);
1049 } else if (list->error) {
1050 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1051 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1052 /*asc*/ 0x0d, /*ascq*/ 0x01,
1053 SSD_ELEM_COMMAND, csi, sizeof(csi),
1055 return (CTL_RETVAL_ERROR);
1057 return (CTL_RETVAL_COMPLETE);
1060 TAILQ_INIT(&list->allio);
1061 seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg];
1062 dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), NULL, NULL, NULL);
1063 if (dl >= CTL_MAX_LUNS) {
1064 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1065 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1066 /*asc*/ 0x08, /*ascq*/ 0x04,
1067 SSD_ELEM_COMMAND, csi, sizeof(csi),
1069 return (CTL_RETVAL_ERROR);
1072 // printf("Register Key %ju\n", dl);
1075 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
1076 TAILQ_INIT(&tio->run);
1078 TAILQ_INSERT_TAIL(&list->allio, tio, links);
1079 tio->io = tpcl_alloc_io();
1080 datalen = sizeof(struct scsi_per_res_out_parms);
1081 list->buf = malloc(datalen, M_CTL, M_WAITOK);
1082 ctl_scsi_persistent_res_out(tio->io,
1083 list->buf, datalen, SPRO_REGISTER, -1,
1084 scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key),
1085 /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
1086 tio->io->io_hdr.retries = 3;
1088 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
1090 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
1091 panic("tpcl_queue() error");
1092 return (CTL_RETVAL_QUEUED);
1096 tpc_ranges_length(struct scsi_range_desc *range, int nrange)
1101 for (r = 0; r < nrange; r++)
1102 length += scsi_4btoul(range[r].length);
1107 tpc_skip_ranges(struct scsi_range_desc *range, int nrange, off_t skip,
1108 int *srange, off_t *soffset)
1115 while (r < nrange) {
1116 if (skip - off < scsi_4btoul(range[r].length)) {
1118 *soffset = skip - off;
1121 off += scsi_4btoul(range[r].length);
1128 tpc_process_wut(struct tpc_list *list)
1130 struct tpc_io *tio, *tior, *tiow;
1131 struct runl run, *prun;
1133 off_t doffset, soffset;
1134 off_t srclba, dstlba, numbytes, donebytes, roundbytes;
1135 uint32_t srcblock, dstblock, pb, pbo, adj;
1137 if (list->stage > 0) {
1138 /* Cleanup after previous rounds. */
1139 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1140 TAILQ_REMOVE(&list->allio, tio, links);
1141 ctl_free_io(tio->io);
1144 free(list->buf, M_CTL);
1146 ctl_set_task_aborted(list->ctsio);
1147 return (CTL_RETVAL_ERROR);
1148 } else if (list->error) {
1149 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1150 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1151 /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
1152 return (CTL_RETVAL_ERROR);
1154 list->cursectors += list->segsectors;
1155 list->curbytes += list->segbytes;
1158 /* Check where we are on destination ranges list. */
1159 if (tpc_skip_ranges(list->range, list->nrange, list->cursectors,
1160 &drange, &doffset) != 0)
1161 return (CTL_RETVAL_COMPLETE);
1162 dstblock = list->lun->be_lun->blocksize;
1163 pb = dstblock << list->lun->be_lun->pblockexp;
1164 if (list->lun->be_lun->pblockoff > 0)
1165 pbo = pb - dstblock * list->lun->be_lun->pblockoff;
1169 /* Check where we are on source ranges list. */
1170 srcblock = list->token->blocksize;
1171 if (tpc_skip_ranges(list->token->range, list->token->nrange,
1172 list->offset_into_rod + list->cursectors * dstblock / srcblock,
1173 &srange, &soffset) != 0) {
1174 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1175 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1176 /*asc*/ 0x0d, /*ascq*/ 0x04, SSD_ELEM_NONE);
1177 return (CTL_RETVAL_ERROR);
1180 srclba = scsi_8btou64(list->token->range[srange].lba) + soffset;
1181 dstlba = scsi_8btou64(list->range[drange].lba) + doffset;
1182 numbytes = srcblock *
1183 (scsi_4btoul(list->token->range[srange].length) - soffset);
1184 numbytes = omin(numbytes, dstblock *
1185 (scsi_4btoul(list->range[drange].length) - doffset));
1186 if (numbytes > TPC_MAX_IOCHUNK_SIZE) {
1187 numbytes = TPC_MAX_IOCHUNK_SIZE;
1188 numbytes -= numbytes % dstblock;
1189 if (pb > dstblock) {
1190 adj = (dstlba * dstblock + numbytes - pbo) % pb;
1196 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
1197 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1198 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1199 /*asc*/ 0x26, /*ascq*/ 0x0A, SSD_ELEM_NONE);
1200 return (CTL_RETVAL_ERROR);
1203 list->buf = malloc(numbytes, M_CTL, M_WAITOK |
1204 (list->token == NULL ? M_ZERO : 0));
1205 list->segbytes = numbytes;
1206 list->segsectors = numbytes / dstblock;
1207 //printf("Copy chunk of %ju sectors from %ju to %ju\n", list->segsectors,
1213 TAILQ_INIT(&list->allio);
1214 while (donebytes < numbytes) {
1215 roundbytes = numbytes - donebytes;
1216 if (roundbytes > TPC_MAX_IO_SIZE) {
1217 roundbytes = TPC_MAX_IO_SIZE;
1218 roundbytes -= roundbytes % dstblock;
1219 if (pb > dstblock) {
1220 adj = (dstlba * dstblock + roundbytes - pbo) % pb;
1221 if (roundbytes > adj)
1226 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
1227 TAILQ_INIT(&tior->run);
1229 TAILQ_INSERT_TAIL(&list->allio, tior, links);
1230 tior->io = tpcl_alloc_io();
1231 ctl_scsi_read_write(tior->io,
1232 /*data_ptr*/ &list->buf[donebytes],
1233 /*data_len*/ roundbytes,
1236 /*minimum_cdb_size*/ 0,
1238 /*num_blocks*/ roundbytes / srcblock,
1239 /*tag_type*/ CTL_TAG_SIMPLE,
1241 tior->io->io_hdr.retries = 3;
1242 tior->lun = list->token->lun;
1243 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
1245 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
1246 TAILQ_INIT(&tiow->run);
1248 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
1249 tiow->io = tpcl_alloc_io();
1250 ctl_scsi_read_write(tiow->io,
1251 /*data_ptr*/ &list->buf[donebytes],
1252 /*data_len*/ roundbytes,
1255 /*minimum_cdb_size*/ 0,
1257 /*num_blocks*/ roundbytes / dstblock,
1258 /*tag_type*/ CTL_TAG_SIMPLE,
1260 tiow->io->io_hdr.retries = 3;
1261 tiow->lun = list->lun->lun;
1262 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
1264 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
1265 TAILQ_INSERT_TAIL(prun, tior, rlinks);
1267 donebytes += roundbytes;
1268 srclba += roundbytes / srcblock;
1269 dstlba += roundbytes / dstblock;
1272 while ((tior = TAILQ_FIRST(&run)) != NULL) {
1273 TAILQ_REMOVE(&run, tior, rlinks);
1274 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1275 panic("tpcl_queue() error");
1279 return (CTL_RETVAL_QUEUED);
1283 tpc_process_zero_wut(struct tpc_list *list)
1285 struct tpc_io *tio, *tiow;
1286 struct runl run, *prun;
1288 uint32_t dstblock, len;
1290 if (list->stage > 0) {
1292 /* Cleanup after previous rounds. */
1293 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1294 TAILQ_REMOVE(&list->allio, tio, links);
1295 ctl_free_io(tio->io);
1298 free(list->buf, M_CTL);
1300 ctl_set_task_aborted(list->ctsio);
1301 return (CTL_RETVAL_ERROR);
1302 } else if (list->error) {
1303 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1304 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1305 /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
1306 return (CTL_RETVAL_ERROR);
1308 list->cursectors += list->segsectors;
1309 list->curbytes += list->segbytes;
1310 return (CTL_RETVAL_COMPLETE);
1313 dstblock = list->lun->be_lun->blocksize;
1314 list->buf = malloc(dstblock, M_CTL, M_WAITOK | M_ZERO);
1318 TAILQ_INIT(&list->allio);
1319 list->segsectors = 0;
1320 for (r = 0; r < list->nrange; r++) {
1321 len = scsi_4btoul(list->range[r].length);
1325 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
1326 TAILQ_INIT(&tiow->run);
1328 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
1329 tiow->io = tpcl_alloc_io();
1330 ctl_scsi_write_same(tiow->io,
1331 /*data_ptr*/ list->buf,
1332 /*data_len*/ dstblock,
1334 /*lba*/ scsi_8btou64(list->range[r].lba),
1336 /*tag_type*/ CTL_TAG_SIMPLE,
1338 tiow->io->io_hdr.retries = 3;
1339 tiow->lun = list->lun->lun;
1340 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
1342 TAILQ_INSERT_TAIL(prun, tiow, rlinks);
1344 list->segsectors += len;
1346 list->segbytes = list->segsectors * dstblock;
1348 if (TAILQ_EMPTY(&run))
1351 while ((tiow = TAILQ_FIRST(&run)) != NULL) {
1352 TAILQ_REMOVE(&run, tiow, rlinks);
1353 if (tpcl_queue(tiow->io, tiow->lun) != CTL_RETVAL_COMPLETE)
1354 panic("tpcl_queue() error");
1358 return (CTL_RETVAL_QUEUED);
1362 tpc_process(struct tpc_list *list)
1364 struct ctl_lun *lun = list->lun;
1365 struct ctl_softc *softc = lun->ctl_softc;
1366 struct scsi_ec_segment *seg;
1367 struct ctl_scsiio *ctsio = list->ctsio;
1368 int retval = CTL_RETVAL_COMPLETE;
1371 if (list->service_action == EC_WUT) {
1372 if (list->token != NULL)
1373 retval = tpc_process_wut(list);
1375 retval = tpc_process_zero_wut(list);
1376 if (retval == CTL_RETVAL_QUEUED)
1378 if (retval == CTL_RETVAL_ERROR) {
1383 //printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg);
1384 while (list->curseg < list->nseg) {
1385 seg = list->seg[list->curseg];
1386 switch (seg->type_code) {
1388 retval = tpc_process_b2b(list);
1391 retval = tpc_process_verify(list);
1393 case EC_SEG_REGISTER_KEY:
1394 retval = tpc_process_register_key(list);
1397 scsi_ulto4b(list->curseg, csi);
1398 ctl_set_sense(ctsio, /*current_error*/ 1,
1399 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1400 /*asc*/ 0x26, /*ascq*/ 0x09,
1401 SSD_ELEM_COMMAND, csi, sizeof(csi),
1405 if (retval == CTL_RETVAL_QUEUED)
1407 if (retval == CTL_RETVAL_ERROR) {
1416 ctl_set_success(ctsio);
1419 //printf("ZZZ done\n");
1420 free(list->params, M_CTL);
1421 list->params = NULL;
1423 mtx_lock(&softc->tpc_lock);
1424 if (--list->token->active == 0)
1425 list->token->last_active = time_uptime;
1426 mtx_unlock(&softc->tpc_lock);
1429 mtx_lock(&lun->lun_lock);
1430 if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) {
1431 TAILQ_REMOVE(&lun->tpc_lists, list, links);
1434 list->completed = 1;
1435 list->last_active = time_uptime;
1436 list->sense_data = ctsio->sense_data;
1437 list->sense_len = ctsio->sense_len;
1438 list->scsi_status = ctsio->scsi_status;
1440 mtx_unlock(&lun->lun_lock);
1442 ctl_done((union ctl_io *)ctsio);
1446 * For any sort of check condition, busy, etc., we just retry. We do not
1447 * decrement the retry count for unit attention type errors. These are
1448 * normal, and we want to save the retry count for "real" errors. Otherwise,
1449 * we could end up with situations where a command will succeed in some
1450 * situations and fail in others, depending on whether a unit attention is
1451 * pending. Also, some of our error recovery actions, most notably the
1452 * LUN reset action, will cause a unit attention.
1454 * We can add more detail here later if necessary.
1456 static tpc_error_action
1457 tpc_checkcond_parse(union ctl_io *io)
1459 tpc_error_action error_action;
1460 int error_code, sense_key, asc, ascq;
1463 * Default to retrying the command.
1465 error_action = TPC_ERR_RETRY;
1467 scsi_extract_sense_len(&io->scsiio.sense_data,
1468 io->scsiio.sense_len,
1475 switch (error_code) {
1476 case SSD_DEFERRED_ERROR:
1477 case SSD_DESC_DEFERRED_ERROR:
1478 error_action |= TPC_ERR_NO_DECREMENT;
1480 case SSD_CURRENT_ERROR:
1481 case SSD_DESC_CURRENT_ERROR:
1483 switch (sense_key) {
1484 case SSD_KEY_UNIT_ATTENTION:
1485 error_action |= TPC_ERR_NO_DECREMENT;
1487 case SSD_KEY_HARDWARE_ERROR:
1489 * This is our generic "something bad happened"
1490 * error code. It often isn't recoverable.
1492 if ((asc == 0x44) && (ascq == 0x00))
1493 error_action = TPC_ERR_FAIL;
1495 case SSD_KEY_NOT_READY:
1497 * If the LUN is powered down, there likely isn't
1498 * much point in retrying right now.
1500 if ((asc == 0x04) && (ascq == 0x02))
1501 error_action = TPC_ERR_FAIL;
1503 * If the LUN is offline, there probably isn't much
1504 * point in retrying, either.
1506 if ((asc == 0x04) && (ascq == 0x03))
1507 error_action = TPC_ERR_FAIL;
1511 return (error_action);
1514 static tpc_error_action
1515 tpc_error_parse(union ctl_io *io)
1517 tpc_error_action error_action = TPC_ERR_RETRY;
1519 switch (io->io_hdr.io_type) {
1521 switch (io->io_hdr.status & CTL_STATUS_MASK) {
1522 case CTL_SCSI_ERROR:
1523 switch (io->scsiio.scsi_status) {
1524 case SCSI_STATUS_CHECK_COND:
1525 error_action = tpc_checkcond_parse(io);
1538 panic("%s: invalid ctl_io type %d\n", __func__,
1539 io->io_hdr.io_type);
1542 return (error_action);
1546 tpc_done(union ctl_io *io)
1548 struct tpc_io *tio, *tior;
1551 * Very minimal retry logic. We basically retry if we got an error
1552 * back, and the retry count is greater than 0. If we ever want
1553 * more sophisticated initiator type behavior, the CAM error
1554 * recovery code in ../common might be helpful.
1556 tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1557 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1558 && (io->io_hdr.retries > 0)) {
1559 ctl_io_status old_status;
1560 tpc_error_action error_action;
1562 error_action = tpc_error_parse(io);
1563 switch (error_action & TPC_ERR_MASK) {
1568 if ((error_action & TPC_ERR_NO_DECREMENT) == 0)
1569 io->io_hdr.retries--;
1570 old_status = io->io_hdr.status;
1571 io->io_hdr.status = CTL_STATUS_NONE;
1572 io->io_hdr.flags &= ~CTL_FLAG_ABORT;
1573 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
1574 if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) {
1575 printf("%s: error returned from ctl_queue()!\n",
1577 io->io_hdr.status = old_status;
1583 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1584 tio->list->error = 1;
1586 atomic_add_int(&tio->list->curops, 1);
1587 if (!tio->list->error && !tio->list->abort) {
1588 while ((tior = TAILQ_FIRST(&tio->run)) != NULL) {
1589 TAILQ_REMOVE(&tio->run, tior, rlinks);
1590 atomic_add_int(&tio->list->tbdio, 1);
1591 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1592 panic("tpcl_queue() error");
1595 if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1)
1596 tpc_process(tio->list);
1600 ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
1602 struct scsi_extended_copy *cdb;
1603 struct scsi_extended_copy_lid1_data *data;
1604 struct ctl_lun *lun;
1605 struct tpc_list *list, *tlist;
1608 int len, off, lencscd, lenseg, leninl, nseg;
1610 CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n"));
1612 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1613 cdb = (struct scsi_extended_copy *)ctsio->cdb;
1614 len = scsi_4btoul(cdb->length);
1617 ctl_set_success(ctsio);
1620 if (len < sizeof(struct scsi_extended_copy_lid1_data) ||
1621 len > sizeof(struct scsi_extended_copy_lid1_data) +
1622 TPC_MAX_LIST + TPC_MAX_INLINE) {
1623 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1624 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1629 * If we've got a kernel request that hasn't been malloced yet,
1630 * malloc it and tell the caller the data buffer is here.
1632 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1633 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1634 ctsio->kern_data_len = len;
1635 ctsio->kern_total_len = len;
1636 ctsio->kern_data_resid = 0;
1637 ctsio->kern_rel_offset = 0;
1638 ctsio->kern_sg_entries = 0;
1639 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1640 ctsio->be_move_done = ctl_config_move_done;
1641 ctl_datamove((union ctl_io *)ctsio);
1643 return (CTL_RETVAL_COMPLETE);
1646 data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr;
1647 lencscd = scsi_2btoul(data->cscd_list_length);
1648 lenseg = scsi_4btoul(data->segment_list_length);
1649 leninl = scsi_4btoul(data->inline_data_length);
1650 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1651 ctl_set_sense(ctsio, /*current_error*/ 1,
1652 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1653 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1656 if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) {
1657 ctl_set_sense(ctsio, /*current_error*/ 1,
1658 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1659 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1662 if (lencscd + lenseg > TPC_MAX_LIST ||
1663 leninl > TPC_MAX_INLINE ||
1664 len < sizeof(struct scsi_extended_copy_lid1_data) +
1665 lencscd + lenseg + leninl) {
1666 ctl_set_param_len_error(ctsio);
1670 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1671 list->service_action = cdb->service_action;
1672 value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
1673 if (value != NULL && strcmp(value, "on") == 0)
1674 list->init_port = -1;
1676 list->init_port = ctsio->io_hdr.nexus.targ_port;
1677 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
1678 list->list_id = data->list_identifier;
1679 list->flags = data->flags;
1680 list->params = ctsio->kern_data_ptr;
1681 list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1682 ptr = &data->data[lencscd];
1683 for (nseg = 0, off = 0; off < lenseg; nseg++) {
1684 if (nseg >= TPC_MAX_SEGS) {
1686 ctl_set_sense(ctsio, /*current_error*/ 1,
1687 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1688 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1691 list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off);
1692 off += sizeof(struct scsi_ec_segment) +
1693 scsi_2btoul(list->seg[nseg]->descr_length);
1695 list->inl = &data->data[lencscd + lenseg];
1696 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1698 list->leninl = leninl;
1699 list->ctsio = ctsio;
1701 mtx_lock(&lun->lun_lock);
1702 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1703 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1704 if (tlist != NULL && !tlist->completed) {
1705 mtx_unlock(&lun->lun_lock);
1707 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1708 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1712 if (tlist != NULL) {
1713 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1717 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1718 mtx_unlock(&lun->lun_lock);
1721 return (CTL_RETVAL_COMPLETE);
1724 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
1725 free(ctsio->kern_data_ptr, M_CTL);
1726 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
1728 ctl_done((union ctl_io *)ctsio);
1729 return (CTL_RETVAL_COMPLETE);
1733 ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
1735 struct scsi_extended_copy *cdb;
1736 struct scsi_extended_copy_lid4_data *data;
1737 struct ctl_lun *lun;
1738 struct tpc_list *list, *tlist;
1741 int len, off, lencscd, lenseg, leninl, nseg;
1743 CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n"));
1745 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1746 cdb = (struct scsi_extended_copy *)ctsio->cdb;
1747 len = scsi_4btoul(cdb->length);
1750 ctl_set_success(ctsio);
1753 if (len < sizeof(struct scsi_extended_copy_lid4_data) ||
1754 len > sizeof(struct scsi_extended_copy_lid4_data) +
1755 TPC_MAX_LIST + TPC_MAX_INLINE) {
1756 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1757 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1762 * If we've got a kernel request that hasn't been malloced yet,
1763 * malloc it and tell the caller the data buffer is here.
1765 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1766 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1767 ctsio->kern_data_len = len;
1768 ctsio->kern_total_len = len;
1769 ctsio->kern_data_resid = 0;
1770 ctsio->kern_rel_offset = 0;
1771 ctsio->kern_sg_entries = 0;
1772 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1773 ctsio->be_move_done = ctl_config_move_done;
1774 ctl_datamove((union ctl_io *)ctsio);
1776 return (CTL_RETVAL_COMPLETE);
1779 data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr;
1780 lencscd = scsi_2btoul(data->cscd_list_length);
1781 lenseg = scsi_2btoul(data->segment_list_length);
1782 leninl = scsi_2btoul(data->inline_data_length);
1783 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1784 ctl_set_sense(ctsio, /*current_error*/ 1,
1785 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1786 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1789 if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) {
1790 ctl_set_sense(ctsio, /*current_error*/ 1,
1791 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1792 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1795 if (lencscd + lenseg > TPC_MAX_LIST ||
1796 leninl > TPC_MAX_INLINE ||
1797 len < sizeof(struct scsi_extended_copy_lid1_data) +
1798 lencscd + lenseg + leninl) {
1799 ctl_set_param_len_error(ctsio);
1803 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1804 list->service_action = cdb->service_action;
1805 value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
1806 if (value != NULL && strcmp(value, "on") == 0)
1807 list->init_port = -1;
1809 list->init_port = ctsio->io_hdr.nexus.targ_port;
1810 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
1811 list->list_id = scsi_4btoul(data->list_identifier);
1812 list->flags = data->flags;
1813 list->params = ctsio->kern_data_ptr;
1814 list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1815 ptr = &data->data[lencscd];
1816 for (nseg = 0, off = 0; off < lenseg; nseg++) {
1817 if (nseg >= TPC_MAX_SEGS) {
1819 ctl_set_sense(ctsio, /*current_error*/ 1,
1820 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1821 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1824 list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off);
1825 off += sizeof(struct scsi_ec_segment) +
1826 scsi_2btoul(list->seg[nseg]->descr_length);
1828 list->inl = &data->data[lencscd + lenseg];
1829 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1831 list->leninl = leninl;
1832 list->ctsio = ctsio;
1834 mtx_lock(&lun->lun_lock);
1835 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1836 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1837 if (tlist != NULL && !tlist->completed) {
1838 mtx_unlock(&lun->lun_lock);
1840 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1841 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1845 if (tlist != NULL) {
1846 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1850 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1851 mtx_unlock(&lun->lun_lock);
1854 return (CTL_RETVAL_COMPLETE);
1857 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
1858 free(ctsio->kern_data_ptr, M_CTL);
1859 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
1861 ctl_done((union ctl_io *)ctsio);
1862 return (CTL_RETVAL_COMPLETE);
1866 tpc_create_token(struct ctl_lun *lun, struct ctl_port *port, off_t len,
1867 struct scsi_token *token)
1870 struct scsi_vpd_id_descriptor *idd = NULL;
1871 struct scsi_ec_cscd_id *cscd;
1872 struct scsi_read_capacity_data_long *dtsd;
1875 scsi_ulto4b(ROD_TYPE_AUR, token->type);
1876 scsi_ulto2b(0x01f8, token->length);
1877 scsi_u64to8b(atomic_fetchadd_int(&id, 1), &token->body[0]);
1879 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
1880 lun->lun_devid->data, lun->lun_devid->len,
1881 scsi_devid_is_lun_naa);
1882 if (idd == NULL && lun->lun_devid)
1883 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
1884 lun->lun_devid->data, lun->lun_devid->len,
1885 scsi_devid_is_lun_eui64);
1887 cscd = (struct scsi_ec_cscd_id *)&token->body[8];
1888 cscd->type_code = EC_CSCD_ID;
1889 cscd->luidt_pdt = T_DIRECT;
1890 memcpy(&cscd->codeset, idd, 4 + idd->length);
1891 scsi_ulto3b(lun->be_lun->blocksize, cscd->dtsp.block_length);
1893 scsi_u64to8b(0, &token->body[40]); /* XXX: Should be 128bit value. */
1894 scsi_u64to8b(len, &token->body[48]);
1896 /* ROD token device type specific data (RC16 without first field) */
1897 dtsd = (struct scsi_read_capacity_data_long *)&token->body[88 - 8];
1898 scsi_ulto4b(lun->be_lun->blocksize, dtsd->length);
1899 dtsd->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE;
1900 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, dtsd->lalba_lbp);
1901 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
1902 dtsd->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ;
1904 if (port->target_devid) {
1905 targid_len = port->target_devid->len;
1906 memcpy(&token->body[120], port->target_devid->data, targid_len);
1909 arc4rand(&token->body[120 + targid_len], 384 - targid_len, 0);
1913 ctl_populate_token(struct ctl_scsiio *ctsio)
1915 struct scsi_populate_token *cdb;
1916 struct scsi_populate_token_data *data;
1917 struct ctl_softc *softc;
1918 struct ctl_lun *lun;
1919 struct ctl_port *port;
1920 struct tpc_list *list, *tlist;
1921 struct tpc_token *token;
1924 CTL_DEBUG_PRINT(("ctl_populate_token\n"));
1926 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1927 softc = lun->ctl_softc;
1928 port = softc->ctl_ports[ctsio->io_hdr.nexus.targ_port];
1929 cdb = (struct scsi_populate_token *)ctsio->cdb;
1930 len = scsi_4btoul(cdb->length);
1932 if (len < sizeof(struct scsi_populate_token_data) ||
1933 len > sizeof(struct scsi_populate_token_data) +
1934 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
1935 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1936 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1941 * If we've got a kernel request that hasn't been malloced yet,
1942 * malloc it and tell the caller the data buffer is here.
1944 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1945 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1946 ctsio->kern_data_len = len;
1947 ctsio->kern_total_len = len;
1948 ctsio->kern_data_resid = 0;
1949 ctsio->kern_rel_offset = 0;
1950 ctsio->kern_sg_entries = 0;
1951 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1952 ctsio->be_move_done = ctl_config_move_done;
1953 ctl_datamove((union ctl_io *)ctsio);
1955 return (CTL_RETVAL_COMPLETE);
1958 data = (struct scsi_populate_token_data *)ctsio->kern_data_ptr;
1959 lendesc = scsi_2btoul(data->range_descriptor_length);
1960 if (len < sizeof(struct scsi_populate_token_data) + lendesc) {
1961 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1962 /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1966 printf("PT(list=%u) flags=%x to=%d rt=%x len=%x\n",
1967 scsi_4btoul(cdb->list_identifier),
1968 data->flags, scsi_4btoul(data->inactivity_timeout),
1969 scsi_4btoul(data->rod_type),
1970 scsi_2btoul(data->range_descriptor_length));
1972 if ((data->flags & EC_PT_RTV) &&
1973 scsi_4btoul(data->rod_type) != ROD_TYPE_AUR) {
1974 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1975 /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0);
1979 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1980 list->service_action = cdb->service_action;
1981 list->init_port = ctsio->io_hdr.nexus.targ_port;
1982 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
1983 list->list_id = scsi_4btoul(cdb->list_identifier);
1984 list->flags = data->flags;
1985 list->ctsio = ctsio;
1987 mtx_lock(&lun->lun_lock);
1988 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1989 if (tlist != NULL && !tlist->completed) {
1990 mtx_unlock(&lun->lun_lock);
1992 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1993 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1997 if (tlist != NULL) {
1998 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
2001 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
2002 mtx_unlock(&lun->lun_lock);
2004 token = malloc(sizeof(*token), M_CTL, M_WAITOK | M_ZERO);
2005 token->lun = lun->lun;
2006 token->blocksize = lun->be_lun->blocksize;
2007 token->params = ctsio->kern_data_ptr;
2008 token->range = &data->desc[0];
2009 token->nrange = scsi_2btoul(data->range_descriptor_length) /
2010 sizeof(struct scsi_range_desc);
2011 list->cursectors = tpc_ranges_length(token->range, token->nrange);
2012 list->curbytes = (off_t)list->cursectors * lun->be_lun->blocksize;
2013 tpc_create_token(lun, port, list->curbytes,
2014 (struct scsi_token *)token->token);
2016 token->last_active = time_uptime;
2017 token->timeout = scsi_4btoul(data->inactivity_timeout);
2018 if (token->timeout == 0)
2019 token->timeout = TPC_DFL_TOKEN_TIMEOUT;
2020 else if (token->timeout < TPC_MIN_TOKEN_TIMEOUT)
2021 token->timeout = TPC_MIN_TOKEN_TIMEOUT;
2022 else if (token->timeout > TPC_MAX_TOKEN_TIMEOUT) {
2023 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2024 /*command*/ 0, /*field*/ 4, /*bit_valid*/ 0,
2027 memcpy(list->res_token, token->token, sizeof(list->res_token));
2028 list->res_token_valid = 1;
2030 list->completed = 1;
2031 list->last_active = time_uptime;
2032 mtx_lock(&softc->tpc_lock);
2033 TAILQ_INSERT_TAIL(&softc->tpc_tokens, token, links);
2034 mtx_unlock(&softc->tpc_lock);
2035 ctl_set_success(ctsio);
2036 ctl_done((union ctl_io *)ctsio);
2037 return (CTL_RETVAL_COMPLETE);
2040 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
2041 free(ctsio->kern_data_ptr, M_CTL);
2042 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
2044 ctl_done((union ctl_io *)ctsio);
2045 return (CTL_RETVAL_COMPLETE);
2049 ctl_write_using_token(struct ctl_scsiio *ctsio)
2051 struct scsi_write_using_token *cdb;
2052 struct scsi_write_using_token_data *data;
2053 struct ctl_softc *softc;
2054 struct ctl_lun *lun;
2055 struct tpc_list *list, *tlist;
2056 struct tpc_token *token;
2059 CTL_DEBUG_PRINT(("ctl_write_using_token\n"));
2061 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
2062 softc = lun->ctl_softc;
2063 cdb = (struct scsi_write_using_token *)ctsio->cdb;
2064 len = scsi_4btoul(cdb->length);
2066 if (len < sizeof(struct scsi_populate_token_data) ||
2067 len > sizeof(struct scsi_populate_token_data) +
2068 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
2069 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
2070 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
2075 * If we've got a kernel request that hasn't been malloced yet,
2076 * malloc it and tell the caller the data buffer is here.
2078 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
2079 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
2080 ctsio->kern_data_len = len;
2081 ctsio->kern_total_len = len;
2082 ctsio->kern_data_resid = 0;
2083 ctsio->kern_rel_offset = 0;
2084 ctsio->kern_sg_entries = 0;
2085 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2086 ctsio->be_move_done = ctl_config_move_done;
2087 ctl_datamove((union ctl_io *)ctsio);
2089 return (CTL_RETVAL_COMPLETE);
2092 data = (struct scsi_write_using_token_data *)ctsio->kern_data_ptr;
2093 lendesc = scsi_2btoul(data->range_descriptor_length);
2094 if (len < sizeof(struct scsi_populate_token_data) + lendesc) {
2095 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2096 /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
2100 printf("WUT(list=%u) flags=%x off=%ju len=%x\n",
2101 scsi_4btoul(cdb->list_identifier),
2102 data->flags, scsi_8btou64(data->offset_into_rod),
2103 scsi_2btoul(data->range_descriptor_length));
2105 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
2106 list->service_action = cdb->service_action;
2107 list->init_port = ctsio->io_hdr.nexus.targ_port;
2108 list->init_idx = ctl_get_initindex(&ctsio->io_hdr.nexus);
2109 list->list_id = scsi_4btoul(cdb->list_identifier);
2110 list->flags = data->flags;
2111 list->params = ctsio->kern_data_ptr;
2112 list->range = &data->desc[0];
2113 list->nrange = scsi_2btoul(data->range_descriptor_length) /
2114 sizeof(struct scsi_range_desc);
2115 list->offset_into_rod = scsi_8btou64(data->offset_into_rod);
2116 list->ctsio = ctsio;
2118 mtx_lock(&lun->lun_lock);
2119 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
2120 if (tlist != NULL && !tlist->completed) {
2121 mtx_unlock(&lun->lun_lock);
2123 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2124 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2128 if (tlist != NULL) {
2129 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
2132 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
2133 mtx_unlock(&lun->lun_lock);
2135 /* Block device zero ROD token -> no token. */
2136 if (scsi_4btoul(data->rod_token) == ROD_TYPE_BLOCK_ZERO) {
2138 return (CTL_RETVAL_COMPLETE);
2141 mtx_lock(&softc->tpc_lock);
2142 TAILQ_FOREACH(token, &softc->tpc_tokens, links) {
2143 if (memcmp(token->token, data->rod_token,
2144 sizeof(data->rod_token)) == 0)
2147 if (token != NULL) {
2149 list->token = token;
2150 if (data->flags & EC_WUT_DEL_TKN)
2153 mtx_unlock(&softc->tpc_lock);
2154 if (token == NULL) {
2155 mtx_lock(&lun->lun_lock);
2156 TAILQ_REMOVE(&lun->tpc_lists, list, links);
2157 mtx_unlock(&lun->lun_lock);
2159 ctl_set_sense(ctsio, /*current_error*/ 1,
2160 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
2161 /*asc*/ 0x23, /*ascq*/ 0x04, SSD_ELEM_NONE);
2166 return (CTL_RETVAL_COMPLETE);
2169 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
2170 free(ctsio->kern_data_ptr, M_CTL);
2171 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
2173 ctl_done((union ctl_io *)ctsio);
2174 return (CTL_RETVAL_COMPLETE);
2178 ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
2180 struct ctl_lun *lun;
2181 struct scsi_receive_rod_token_information *cdb;
2182 struct scsi_receive_copy_status_lid4_data *data;
2183 struct tpc_list *list;
2184 struct tpc_list list_copy;
2187 int alloc_len, total_len, token_len;
2190 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
2192 cdb = (struct scsi_receive_rod_token_information *)ctsio->cdb;
2193 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
2195 retval = CTL_RETVAL_COMPLETE;
2197 list_id = scsi_4btoul(cdb->list_identifier);
2198 mtx_lock(&lun->lun_lock);
2199 list = tpc_find_list(lun, list_id,
2200 ctl_get_initindex(&ctsio->io_hdr.nexus));
2202 mtx_unlock(&lun->lun_lock);
2203 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2204 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
2206 ctl_done((union ctl_io *)ctsio);
2210 if (list->completed) {
2211 TAILQ_REMOVE(&lun->tpc_lists, list, links);
2214 mtx_unlock(&lun->lun_lock);
2216 token_len = list_copy.res_token_valid ? 2 + sizeof(list_copy.res_token) : 0;
2217 total_len = sizeof(*data) + list_copy.sense_len + 4 + token_len;
2218 alloc_len = scsi_4btoul(cdb->length);
2220 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
2222 ctsio->kern_sg_entries = 0;
2224 if (total_len < alloc_len) {
2225 ctsio->residual = alloc_len - total_len;
2226 ctsio->kern_data_len = total_len;
2227 ctsio->kern_total_len = total_len;
2229 ctsio->residual = 0;
2230 ctsio->kern_data_len = alloc_len;
2231 ctsio->kern_total_len = alloc_len;
2233 ctsio->kern_data_resid = 0;
2234 ctsio->kern_rel_offset = 0;
2236 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
2237 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len +
2238 4 + token_len, data->available_data);
2239 data->response_to_service_action = list_copy.service_action;
2240 if (list_copy.completed) {
2241 if (list_copy.error)
2242 data->copy_command_status = RCS_CCS_ERROR;
2243 else if (list_copy.abort)
2244 data->copy_command_status = RCS_CCS_ABORTED;
2246 data->copy_command_status = RCS_CCS_COMPLETED;
2248 data->copy_command_status = RCS_CCS_INPROG_FG;
2249 scsi_ulto2b(list_copy.curops, data->operation_counter);
2250 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
2251 data->transfer_count_units = RCS_TC_LBAS;
2252 scsi_u64to8b(list_copy.cursectors, data->transfer_count);
2253 scsi_ulto2b(list_copy.curseg, data->segments_processed);
2254 data->length_of_the_sense_data_field = list_copy.sense_len;
2255 data->sense_data_length = list_copy.sense_len;
2256 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
2258 ptr = &data->sense_data[data->length_of_the_sense_data_field];
2259 scsi_ulto4b(token_len, &ptr[0]);
2260 if (list_copy.res_token_valid) {
2261 scsi_ulto2b(0, &ptr[4]);
2262 memcpy(&ptr[6], list_copy.res_token, sizeof(list_copy.res_token));
2265 printf("RRTI(list=%u) valid=%d\n",
2266 scsi_4btoul(cdb->list_identifier), list_copy.res_token_valid);
2268 ctl_set_success(ctsio);
2269 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2270 ctsio->be_move_done = ctl_config_move_done;
2271 ctl_datamove((union ctl_io *)ctsio);
2276 ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio)
2278 struct ctl_softc *softc;
2279 struct ctl_lun *lun;
2280 struct scsi_report_all_rod_tokens *cdb;
2281 struct scsi_report_all_rod_tokens_data *data;
2282 struct tpc_token *token;
2284 int alloc_len, total_len, tokens, i;
2286 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
2288 cdb = (struct scsi_report_all_rod_tokens *)ctsio->cdb;
2289 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
2290 softc = lun->ctl_softc;
2292 retval = CTL_RETVAL_COMPLETE;
2295 mtx_lock(&softc->tpc_lock);
2296 TAILQ_FOREACH(token, &softc->tpc_tokens, links)
2298 mtx_unlock(&softc->tpc_lock);
2302 total_len = sizeof(*data) + tokens * 96;
2303 alloc_len = scsi_4btoul(cdb->length);
2305 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
2307 ctsio->kern_sg_entries = 0;
2309 if (total_len < alloc_len) {
2310 ctsio->residual = alloc_len - total_len;
2311 ctsio->kern_data_len = total_len;
2312 ctsio->kern_total_len = total_len;
2314 ctsio->residual = 0;
2315 ctsio->kern_data_len = alloc_len;
2316 ctsio->kern_total_len = alloc_len;
2318 ctsio->kern_data_resid = 0;
2319 ctsio->kern_rel_offset = 0;
2321 data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr;
2323 mtx_lock(&softc->tpc_lock);
2324 TAILQ_FOREACH(token, &softc->tpc_tokens, links) {
2327 memcpy(&data->rod_management_token_list[i * 96],
2331 mtx_unlock(&softc->tpc_lock);
2332 scsi_ulto4b(sizeof(*data) - 4 + i * 96, data->available_data);
2334 printf("RART tokens=%d\n", i);
2336 ctl_set_success(ctsio);
2337 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2338 ctsio->be_move_done = ctl_config_move_done;
2339 ctl_datamove((union ctl_io *)ctsio);