2 * Copyright (c) 2014 Alexander Motin <mav@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/types.h>
35 #include <sys/module.h>
36 #include <sys/mutex.h>
37 #include <sys/condvar.h>
38 #include <sys/malloc.h>
40 #include <sys/queue.h>
41 #include <sys/sysctl.h>
42 #include <machine/atomic.h>
45 #include <cam/scsi/scsi_all.h>
46 #include <cam/scsi/scsi_da.h>
47 #include <cam/ctl/ctl_io.h>
48 #include <cam/ctl/ctl.h>
49 #include <cam/ctl/ctl_frontend.h>
50 #include <cam/ctl/ctl_frontend_internal.h>
51 #include <cam/ctl/ctl_util.h>
52 #include <cam/ctl/ctl_backend.h>
53 #include <cam/ctl/ctl_ioctl.h>
54 #include <cam/ctl/ctl_ha.h>
55 #include <cam/ctl/ctl_private.h>
56 #include <cam/ctl/ctl_debug.h>
57 #include <cam/ctl/ctl_scsi_all.h>
58 #include <cam/ctl/ctl_tpc.h>
59 #include <cam/ctl/ctl_error.h>
61 #define TPC_MAX_CSCDS 64
62 #define TPC_MAX_SEGS 64
64 #define TPC_MAX_LIST 8192
65 #define TPC_MAX_INLINE 0
66 #define TPC_MAX_LISTS 255
67 #define TPC_MAX_IO_SIZE (1024 * 1024)
68 #define TPC_MAX_IOCHUNK_SIZE (TPC_MAX_IO_SIZE * 16)
69 #define TPC_MIN_TOKEN_TIMEOUT 1
70 #define TPC_DFL_TOKEN_TIMEOUT 60
71 #define TPC_MAX_TOKEN_TIMEOUT 600
73 MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC");
76 TPC_ERR_RETRY = 0x000,
79 TPC_ERR_NO_DECREMENT = 0x100
83 TAILQ_HEAD(runl, tpc_io);
87 struct tpc_list *list;
89 TAILQ_ENTRY(tpc_io) rlinks;
90 TAILQ_ENTRY(tpc_io) links;
98 struct scsi_range_desc *range;
103 TAILQ_ENTRY(tpc_token) links;
107 uint8_t service_action;
113 struct scsi_ec_cscd *cscd;
114 struct scsi_ec_segment *seg[TPC_MAX_SEGS];
119 struct tpc_token *token;
120 struct scsi_range_desc *range;
122 off_t offset_into_rod;
137 TAILQ_HEAD(, tpc_io) allio;
138 struct scsi_sense_data sense_data;
141 struct ctl_scsiio *ctsio;
144 uint8_t res_token[512];
145 TAILQ_ENTRY(tpc_list) links;
149 tpc_timeout(void *arg)
151 struct ctl_softc *softc = arg;
153 struct tpc_token *token, *ttoken;
154 struct tpc_list *list, *tlist;
156 /* Free completed lists with expired timeout. */
157 STAILQ_FOREACH(lun, &softc->lun_list, links) {
158 mtx_lock(&lun->lun_lock);
159 TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) {
160 if (!list->completed || time_uptime < list->last_active +
161 TPC_DFL_TOKEN_TIMEOUT)
163 TAILQ_REMOVE(&lun->tpc_lists, list, links);
166 mtx_unlock(&lun->lun_lock);
169 /* Free inactive ROD tokens with expired timeout. */
170 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
172 time_uptime < token->last_active + token->timeout + 1)
174 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
175 free(token->params, M_CTL);
178 callout_schedule(&softc->tpc_timeout, hz);
182 ctl_tpc_init(struct ctl_softc *softc)
185 TAILQ_INIT(&softc->tpc_tokens);
186 callout_init_mtx(&softc->tpc_timeout, &softc->ctl_lock, 0);
187 callout_reset(&softc->tpc_timeout, hz, tpc_timeout, softc);
191 ctl_tpc_shutdown(struct ctl_softc *softc)
193 struct tpc_token *token;
195 callout_drain(&softc->tpc_timeout);
197 /* Free ROD tokens. */
198 mtx_lock(&softc->ctl_lock);
199 while ((token = TAILQ_FIRST(&softc->tpc_tokens)) != NULL) {
200 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
201 free(token->params, M_CTL);
204 mtx_unlock(&softc->ctl_lock);
208 ctl_tpc_lun_init(struct ctl_lun *lun)
211 TAILQ_INIT(&lun->tpc_lists);
215 ctl_tpc_lun_shutdown(struct ctl_lun *lun)
217 struct ctl_softc *softc = lun->ctl_softc;
218 struct tpc_list *list;
219 struct tpc_token *token, *ttoken;
221 /* Free lists for this LUN. */
222 while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) {
223 TAILQ_REMOVE(&lun->tpc_lists, list, links);
224 KASSERT(list->completed,
225 ("Not completed TPC (%p) on shutdown", list));
229 /* Free ROD tokens for this LUN. */
230 mtx_assert(&softc->ctl_lock, MA_OWNED);
231 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
232 if (token->lun != lun->lun || token->active)
234 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
235 free(token->params, M_CTL);
241 ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
243 struct scsi_vpd_tpc *tpc_ptr;
244 struct scsi_vpd_tpc_descriptor *d_ptr;
245 struct scsi_vpd_tpc_descriptor_bdrl *bdrl_ptr;
246 struct scsi_vpd_tpc_descriptor_sc *sc_ptr;
247 struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr;
248 struct scsi_vpd_tpc_descriptor_pd *pd_ptr;
249 struct scsi_vpd_tpc_descriptor_sd *sd_ptr;
250 struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr;
251 struct scsi_vpd_tpc_descriptor_rtf *rtf_ptr;
252 struct scsi_vpd_tpc_descriptor_rtf_block *rtfb_ptr;
253 struct scsi_vpd_tpc_descriptor_srt *srt_ptr;
254 struct scsi_vpd_tpc_descriptor_srtd *srtd_ptr;
255 struct scsi_vpd_tpc_descriptor_gco *gco_ptr;
259 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
261 data_len = sizeof(struct scsi_vpd_tpc) +
262 sizeof(struct scsi_vpd_tpc_descriptor_bdrl) +
263 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) +
264 2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 11, 4) +
265 sizeof(struct scsi_vpd_tpc_descriptor_pd) +
266 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) +
267 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) +
268 sizeof(struct scsi_vpd_tpc_descriptor_rtf) +
269 sizeof(struct scsi_vpd_tpc_descriptor_rtf_block) +
270 sizeof(struct scsi_vpd_tpc_descriptor_srt) +
271 2*sizeof(struct scsi_vpd_tpc_descriptor_srtd) +
272 sizeof(struct scsi_vpd_tpc_descriptor_gco);
274 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
275 tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr;
276 ctsio->kern_sg_entries = 0;
278 if (data_len < alloc_len) {
279 ctsio->residual = alloc_len - data_len;
280 ctsio->kern_data_len = data_len;
281 ctsio->kern_total_len = data_len;
284 ctsio->kern_data_len = alloc_len;
285 ctsio->kern_total_len = alloc_len;
287 ctsio->kern_data_resid = 0;
288 ctsio->kern_rel_offset = 0;
289 ctsio->kern_sg_entries = 0;
292 * The control device is always connected. The disk device, on the
293 * other hand, may not be online all the time.
296 tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
297 lun->be_lun->lun_type;
299 tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
300 tpc_ptr->page_code = SVPD_SCSI_TPC;
301 scsi_ulto2b(data_len - 4, tpc_ptr->page_length);
303 /* Block Device ROD Limits */
304 d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0];
305 bdrl_ptr = (struct scsi_vpd_tpc_descriptor_bdrl *)d_ptr;
306 scsi_ulto2b(SVPD_TPC_BDRL, bdrl_ptr->desc_type);
307 scsi_ulto2b(sizeof(*bdrl_ptr) - 4, bdrl_ptr->desc_length);
308 scsi_ulto2b(TPC_MAX_SEGS, bdrl_ptr->maximum_ranges);
309 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
310 bdrl_ptr->maximum_inactivity_timeout);
311 scsi_ulto4b(TPC_DFL_TOKEN_TIMEOUT,
312 bdrl_ptr->default_inactivity_timeout);
313 scsi_u64to8b(0, bdrl_ptr->maximum_token_transfer_size);
314 scsi_u64to8b(0, bdrl_ptr->optimal_transfer_count);
316 /* Supported commands */
317 d_ptr = (struct scsi_vpd_tpc_descriptor *)
318 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
319 sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr;
320 scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type);
321 sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 11;
322 scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length);
323 scd_ptr = &sc_ptr->descr[0];
324 scd_ptr->opcode = EXTENDED_COPY;
325 scd_ptr->sa_length = 5;
326 scd_ptr->supported_service_actions[0] = EC_EC_LID1;
327 scd_ptr->supported_service_actions[1] = EC_EC_LID4;
328 scd_ptr->supported_service_actions[2] = EC_PT;
329 scd_ptr->supported_service_actions[3] = EC_WUT;
330 scd_ptr->supported_service_actions[4] = EC_COA;
331 scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *)
332 &scd_ptr->supported_service_actions[scd_ptr->sa_length];
333 scd_ptr->opcode = RECEIVE_COPY_STATUS;
334 scd_ptr->sa_length = 6;
335 scd_ptr->supported_service_actions[0] = RCS_RCS_LID1;
336 scd_ptr->supported_service_actions[1] = RCS_RCFD;
337 scd_ptr->supported_service_actions[2] = RCS_RCS_LID4;
338 scd_ptr->supported_service_actions[3] = RCS_RCOP;
339 scd_ptr->supported_service_actions[4] = RCS_RRTI;
340 scd_ptr->supported_service_actions[5] = RCS_RART;
342 /* Parameter data. */
343 d_ptr = (struct scsi_vpd_tpc_descriptor *)
344 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
345 pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr;
346 scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type);
347 scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length);
348 scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count);
349 scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count);
350 scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length);
351 scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length);
353 /* Supported Descriptors */
354 d_ptr = (struct scsi_vpd_tpc_descriptor *)
355 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
356 sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr;
357 scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type);
358 scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length);
359 sd_ptr->list_length = 4;
360 sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B;
361 sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY;
362 sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY;
363 sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID;
365 /* Supported CSCD Descriptor IDs */
366 d_ptr = (struct scsi_vpd_tpc_descriptor *)
367 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
368 sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr;
369 scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type);
370 scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length);
371 scsi_ulto2b(2, sdid_ptr->list_length);
372 scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]);
374 /* ROD Token Features */
375 d_ptr = (struct scsi_vpd_tpc_descriptor *)
376 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
377 rtf_ptr = (struct scsi_vpd_tpc_descriptor_rtf *)d_ptr;
378 scsi_ulto2b(SVPD_TPC_RTF, rtf_ptr->desc_type);
379 scsi_ulto2b(sizeof(*rtf_ptr) - 4 + sizeof(*rtfb_ptr), rtf_ptr->desc_length);
380 rtf_ptr->remote_tokens = 0;
381 scsi_ulto4b(TPC_MIN_TOKEN_TIMEOUT, rtf_ptr->minimum_token_lifetime);
382 scsi_ulto4b(UINT32_MAX, rtf_ptr->maximum_token_lifetime);
383 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
384 rtf_ptr->maximum_token_inactivity_timeout);
385 scsi_ulto2b(sizeof(*rtfb_ptr), rtf_ptr->type_specific_features_length);
386 rtfb_ptr = (struct scsi_vpd_tpc_descriptor_rtf_block *)
387 &rtf_ptr->type_specific_features;
388 rtfb_ptr->type_format = SVPD_TPC_RTF_BLOCK;
389 scsi_ulto2b(sizeof(*rtfb_ptr) - 4, rtfb_ptr->desc_length);
390 scsi_ulto2b(0, rtfb_ptr->optimal_length_granularity);
391 scsi_u64to8b(0, rtfb_ptr->maximum_bytes);
392 scsi_u64to8b(0, rtfb_ptr->optimal_bytes);
393 scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE,
394 rtfb_ptr->optimal_bytes_to_token_per_segment);
395 scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE,
396 rtfb_ptr->optimal_bytes_from_token_per_segment);
398 /* Supported ROD Tokens */
399 d_ptr = (struct scsi_vpd_tpc_descriptor *)
400 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
401 srt_ptr = (struct scsi_vpd_tpc_descriptor_srt *)d_ptr;
402 scsi_ulto2b(SVPD_TPC_SRT, srt_ptr->desc_type);
403 scsi_ulto2b(sizeof(*srt_ptr) - 4 + 2*sizeof(*srtd_ptr), srt_ptr->desc_length);
404 scsi_ulto2b(2*sizeof(*srtd_ptr), srt_ptr->rod_type_descriptors_length);
405 srtd_ptr = (struct scsi_vpd_tpc_descriptor_srtd *)
406 &srt_ptr->rod_type_descriptors;
407 scsi_ulto4b(ROD_TYPE_AUR, srtd_ptr->rod_type);
408 srtd_ptr->flags = SVPD_TPC_SRTD_TIN | SVPD_TPC_SRTD_TOUT;
409 scsi_ulto2b(0, srtd_ptr->preference_indicator);
411 scsi_ulto4b(ROD_TYPE_BLOCK_ZERO, srtd_ptr->rod_type);
412 srtd_ptr->flags = SVPD_TPC_SRTD_TIN;
413 scsi_ulto2b(0, srtd_ptr->preference_indicator);
415 /* General Copy Operations */
416 d_ptr = (struct scsi_vpd_tpc_descriptor *)
417 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
418 gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr;
419 scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type);
420 scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length);
421 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies);
422 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies);
423 scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length);
424 gco_ptr->data_segment_granularity = 0;
425 gco_ptr->inline_data_granularity = 0;
427 ctl_set_success(ctsio);
428 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
429 ctsio->be_move_done = ctl_config_move_done;
430 ctl_datamove((union ctl_io *)ctsio);
432 return (CTL_RETVAL_COMPLETE);
436 ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
438 struct scsi_receive_copy_operating_parameters *cdb;
439 struct scsi_receive_copy_operating_parameters_data *data;
441 int alloc_len, total_len;
443 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
445 cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb;
447 retval = CTL_RETVAL_COMPLETE;
449 total_len = sizeof(*data) + 4;
450 alloc_len = scsi_4btoul(cdb->length);
452 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
454 ctsio->kern_sg_entries = 0;
456 if (total_len < alloc_len) {
457 ctsio->residual = alloc_len - total_len;
458 ctsio->kern_data_len = total_len;
459 ctsio->kern_total_len = total_len;
462 ctsio->kern_data_len = alloc_len;
463 ctsio->kern_total_len = alloc_len;
465 ctsio->kern_data_resid = 0;
466 ctsio->kern_rel_offset = 0;
468 data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr;
469 scsi_ulto4b(sizeof(*data) - 4 + 4, data->length);
470 data->snlid = RCOP_SNLID;
471 scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count);
472 scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count);
473 scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length);
474 scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length);
475 scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length);
476 scsi_ulto4b(0, data->held_data_limit);
477 scsi_ulto4b(0, data->maximum_stream_device_transfer_size);
478 scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies);
479 data->maximum_concurrent_copies = TPC_MAX_LISTS;
480 data->data_segment_granularity = 0;
481 data->inline_data_granularity = 0;
482 data->held_data_granularity = 0;
483 data->implemented_descriptor_list_length = 4;
484 data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B;
485 data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY;
486 data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY;
487 data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID;
489 ctl_set_success(ctsio);
490 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
491 ctsio->be_move_done = ctl_config_move_done;
492 ctl_datamove((union ctl_io *)ctsio);
496 static struct tpc_list *
497 tpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx)
499 struct tpc_list *list;
501 mtx_assert(&lun->lun_lock, MA_OWNED);
502 TAILQ_FOREACH(list, &lun->tpc_lists, links) {
503 if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
504 EC_LIST_ID_USAGE_NONE && list->list_id == list_id &&
505 list->init_idx == init_idx)
512 ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
515 struct scsi_receive_copy_status_lid1 *cdb;
516 struct scsi_receive_copy_status_lid1_data *data;
517 struct tpc_list *list;
518 struct tpc_list list_copy;
520 int alloc_len, total_len;
523 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n"));
525 cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb;
526 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
528 retval = CTL_RETVAL_COMPLETE;
530 list_id = cdb->list_identifier;
531 mtx_lock(&lun->lun_lock);
532 list = tpc_find_list(lun, list_id,
533 ctl_get_resindex(&ctsio->io_hdr.nexus));
535 mtx_unlock(&lun->lun_lock);
536 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
537 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
539 ctl_done((union ctl_io *)ctsio);
543 if (list->completed) {
544 TAILQ_REMOVE(&lun->tpc_lists, list, links);
547 mtx_unlock(&lun->lun_lock);
549 total_len = sizeof(*data);
550 alloc_len = scsi_4btoul(cdb->length);
552 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
554 ctsio->kern_sg_entries = 0;
556 if (total_len < alloc_len) {
557 ctsio->residual = alloc_len - total_len;
558 ctsio->kern_data_len = total_len;
559 ctsio->kern_total_len = total_len;
562 ctsio->kern_data_len = alloc_len;
563 ctsio->kern_total_len = alloc_len;
565 ctsio->kern_data_resid = 0;
566 ctsio->kern_rel_offset = 0;
568 data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr;
569 scsi_ulto4b(sizeof(*data) - 4, data->available_data);
570 if (list_copy.completed) {
571 if (list_copy.error || list_copy.abort)
572 data->copy_command_status = RCS_CCS_ERROR;
574 data->copy_command_status = RCS_CCS_COMPLETED;
576 data->copy_command_status = RCS_CCS_INPROG;
577 scsi_ulto2b(list_copy.curseg, data->segments_processed);
578 if (list_copy.curbytes <= UINT32_MAX) {
579 data->transfer_count_units = RCS_TC_BYTES;
580 scsi_ulto4b(list_copy.curbytes, data->transfer_count);
582 data->transfer_count_units = RCS_TC_MBYTES;
583 scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count);
586 ctl_set_success(ctsio);
587 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
588 ctsio->be_move_done = ctl_config_move_done;
589 ctl_datamove((union ctl_io *)ctsio);
594 ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
597 struct scsi_receive_copy_failure_details *cdb;
598 struct scsi_receive_copy_failure_details_data *data;
599 struct tpc_list *list;
600 struct tpc_list list_copy;
602 int alloc_len, total_len;
605 CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n"));
607 cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb;
608 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
610 retval = CTL_RETVAL_COMPLETE;
612 list_id = cdb->list_identifier;
613 mtx_lock(&lun->lun_lock);
614 list = tpc_find_list(lun, list_id,
615 ctl_get_resindex(&ctsio->io_hdr.nexus));
616 if (list == NULL || !list->completed) {
617 mtx_unlock(&lun->lun_lock);
618 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
619 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
621 ctl_done((union ctl_io *)ctsio);
625 TAILQ_REMOVE(&lun->tpc_lists, list, links);
627 mtx_unlock(&lun->lun_lock);
629 total_len = sizeof(*data) + list_copy.sense_len;
630 alloc_len = scsi_4btoul(cdb->length);
632 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
634 ctsio->kern_sg_entries = 0;
636 if (total_len < alloc_len) {
637 ctsio->residual = alloc_len - total_len;
638 ctsio->kern_data_len = total_len;
639 ctsio->kern_total_len = total_len;
642 ctsio->kern_data_len = alloc_len;
643 ctsio->kern_total_len = alloc_len;
645 ctsio->kern_data_resid = 0;
646 ctsio->kern_rel_offset = 0;
648 data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr;
649 if (list_copy.completed && (list_copy.error || list_copy.abort)) {
650 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
651 data->available_data);
652 data->copy_command_status = RCS_CCS_ERROR;
654 scsi_ulto4b(0, data->available_data);
655 scsi_ulto2b(list_copy.sense_len, data->sense_data_length);
656 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
658 ctl_set_success(ctsio);
659 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
660 ctsio->be_move_done = ctl_config_move_done;
661 ctl_datamove((union ctl_io *)ctsio);
666 ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
669 struct scsi_receive_copy_status_lid4 *cdb;
670 struct scsi_receive_copy_status_lid4_data *data;
671 struct tpc_list *list;
672 struct tpc_list list_copy;
674 int alloc_len, total_len;
677 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n"));
679 cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb;
680 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
682 retval = CTL_RETVAL_COMPLETE;
684 list_id = scsi_4btoul(cdb->list_identifier);
685 mtx_lock(&lun->lun_lock);
686 list = tpc_find_list(lun, list_id,
687 ctl_get_resindex(&ctsio->io_hdr.nexus));
689 mtx_unlock(&lun->lun_lock);
690 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
691 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
693 ctl_done((union ctl_io *)ctsio);
697 if (list->completed) {
698 TAILQ_REMOVE(&lun->tpc_lists, list, links);
701 mtx_unlock(&lun->lun_lock);
703 total_len = sizeof(*data) + list_copy.sense_len;
704 alloc_len = scsi_4btoul(cdb->length);
706 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
708 ctsio->kern_sg_entries = 0;
710 if (total_len < alloc_len) {
711 ctsio->residual = alloc_len - total_len;
712 ctsio->kern_data_len = total_len;
713 ctsio->kern_total_len = total_len;
716 ctsio->kern_data_len = alloc_len;
717 ctsio->kern_total_len = alloc_len;
719 ctsio->kern_data_resid = 0;
720 ctsio->kern_rel_offset = 0;
722 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
723 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
724 data->available_data);
725 data->response_to_service_action = list_copy.service_action;
726 if (list_copy.completed) {
728 data->copy_command_status = RCS_CCS_ERROR;
729 else if (list_copy.abort)
730 data->copy_command_status = RCS_CCS_ABORTED;
732 data->copy_command_status = RCS_CCS_COMPLETED;
734 data->copy_command_status = RCS_CCS_INPROG_FG;
735 scsi_ulto2b(list_copy.curops, data->operation_counter);
736 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
737 data->transfer_count_units = RCS_TC_BYTES;
738 scsi_u64to8b(list_copy.curbytes, data->transfer_count);
739 scsi_ulto2b(list_copy.curseg, data->segments_processed);
740 data->length_of_the_sense_data_field = list_copy.sense_len;
741 data->sense_data_length = list_copy.sense_len;
742 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
744 ctl_set_success(ctsio);
745 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
746 ctsio->be_move_done = ctl_config_move_done;
747 ctl_datamove((union ctl_io *)ctsio);
752 ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
755 struct scsi_copy_operation_abort *cdb;
756 struct tpc_list *list;
760 CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n"));
762 cdb = (struct scsi_copy_operation_abort *)ctsio->cdb;
763 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
765 retval = CTL_RETVAL_COMPLETE;
767 list_id = scsi_4btoul(cdb->list_identifier);
768 mtx_lock(&lun->lun_lock);
769 list = tpc_find_list(lun, list_id,
770 ctl_get_resindex(&ctsio->io_hdr.nexus));
772 mtx_unlock(&lun->lun_lock);
773 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
774 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
776 ctl_done((union ctl_io *)ctsio);
780 mtx_unlock(&lun->lun_lock);
782 ctl_set_success(ctsio);
783 ctl_done((union ctl_io *)ctsio);
788 tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss,
789 uint32_t *pb, uint32_t *pbo)
793 if (ss && list->lun->be_lun)
794 *ss = list->lun->be_lun->blocksize;
795 if (pb && list->lun->be_lun)
796 *pb = list->lun->be_lun->blocksize <<
797 list->lun->be_lun->pblockexp;
798 if (pbo && list->lun->be_lun)
799 *pbo = list->lun->be_lun->blocksize *
800 list->lun->be_lun->pblockoff;
801 return (list->lun->lun);
803 if (idx >= list->ncscd)
805 return (tpcl_resolve(list->lun->ctl_softc,
806 list->init_port, &list->cscd[idx], ss, pb, pbo));
810 tpc_process_b2b(struct tpc_list *list)
812 struct scsi_ec_segment_b2b *seg;
813 struct scsi_ec_cscd_dtsp *sdstp, *ddstp;
814 struct tpc_io *tior, *tiow;
815 struct runl run, *prun;
817 off_t srclba, dstlba, numbytes, donebytes, roundbytes;
819 uint32_t srcblock, dstblock, pb, pbo, adj;
821 if (list->stage == 1) {
822 while ((tior = TAILQ_FIRST(&list->allio)) != NULL) {
823 TAILQ_REMOVE(&list->allio, tior, links);
824 ctl_free_io(tior->io);
827 free(list->buf, M_CTL);
829 ctl_set_task_aborted(list->ctsio);
830 return (CTL_RETVAL_ERROR);
831 } else if (list->error) {
832 ctl_set_sense(list->ctsio, /*current_error*/ 1,
833 /*sense_key*/ SSD_KEY_COPY_ABORTED,
834 /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
835 return (CTL_RETVAL_ERROR);
837 list->cursectors += list->segsectors;
838 list->curbytes += list->segbytes;
839 return (CTL_RETVAL_COMPLETE);
842 TAILQ_INIT(&list->allio);
843 seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg];
844 sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), &srcblock, NULL, NULL);
845 dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), &dstblock, &pb, &pbo);
846 if (sl >= CTL_MAX_LUNS || dl >= CTL_MAX_LUNS) {
847 ctl_set_sense(list->ctsio, /*current_error*/ 1,
848 /*sense_key*/ SSD_KEY_COPY_ABORTED,
849 /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
850 return (CTL_RETVAL_ERROR);
854 sdstp = &list->cscd[scsi_2btoul(seg->src_cscd)].dtsp;
855 if (scsi_3btoul(sdstp->block_length) != 0)
856 srcblock = scsi_3btoul(sdstp->block_length);
857 ddstp = &list->cscd[scsi_2btoul(seg->dst_cscd)].dtsp;
858 if (scsi_3btoul(ddstp->block_length) != 0)
859 dstblock = scsi_3btoul(ddstp->block_length);
860 numlba = scsi_2btoul(seg->number_of_blocks);
861 if (seg->flags & EC_SEG_DC)
862 numbytes = (off_t)numlba * dstblock;
864 numbytes = (off_t)numlba * srcblock;
865 srclba = scsi_8btou64(seg->src_lba);
866 dstlba = scsi_8btou64(seg->dst_lba);
868 // printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n",
869 // (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba),
870 // dl, scsi_8btou64(seg->dst_lba));
873 return (CTL_RETVAL_COMPLETE);
875 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
876 ctl_set_sense(list->ctsio, /*current_error*/ 1,
877 /*sense_key*/ SSD_KEY_COPY_ABORTED,
878 /*asc*/ 0x26, /*ascq*/ 0x0A, SSD_ELEM_NONE);
879 return (CTL_RETVAL_ERROR);
882 list->buf = malloc(numbytes, M_CTL, M_WAITOK);
883 list->segbytes = numbytes;
884 list->segsectors = numbytes / dstblock;
889 while (donebytes < numbytes) {
890 roundbytes = numbytes - donebytes;
891 if (roundbytes > TPC_MAX_IO_SIZE) {
892 roundbytes = TPC_MAX_IO_SIZE;
893 roundbytes -= roundbytes % dstblock;
895 adj = (dstlba * dstblock + roundbytes - pbo) % pb;
896 if (roundbytes > adj)
901 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
902 TAILQ_INIT(&tior->run);
904 TAILQ_INSERT_TAIL(&list->allio, tior, links);
905 tior->io = tpcl_alloc_io();
906 ctl_scsi_read_write(tior->io,
907 /*data_ptr*/ &list->buf[donebytes],
908 /*data_len*/ roundbytes,
911 /*minimum_cdb_size*/ 0,
913 /*num_blocks*/ roundbytes / srcblock,
914 /*tag_type*/ CTL_TAG_SIMPLE,
916 tior->io->io_hdr.retries = 3;
918 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
920 tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
921 TAILQ_INIT(&tiow->run);
923 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
924 tiow->io = tpcl_alloc_io();
925 ctl_scsi_read_write(tiow->io,
926 /*data_ptr*/ &list->buf[donebytes],
927 /*data_len*/ roundbytes,
930 /*minimum_cdb_size*/ 0,
932 /*num_blocks*/ roundbytes / dstblock,
933 /*tag_type*/ CTL_TAG_SIMPLE,
935 tiow->io->io_hdr.retries = 3;
937 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
939 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
940 TAILQ_INSERT_TAIL(prun, tior, rlinks);
942 donebytes += roundbytes;
943 srclba += roundbytes / srcblock;
944 dstlba += roundbytes / dstblock;
947 while ((tior = TAILQ_FIRST(&run)) != NULL) {
948 TAILQ_REMOVE(&run, tior, rlinks);
949 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
950 panic("tpcl_queue() error");
954 return (CTL_RETVAL_QUEUED);
958 tpc_process_verify(struct tpc_list *list)
960 struct scsi_ec_segment_verify *seg;
964 if (list->stage == 1) {
965 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
966 TAILQ_REMOVE(&list->allio, tio, links);
967 ctl_free_io(tio->io);
971 ctl_set_task_aborted(list->ctsio);
972 return (CTL_RETVAL_ERROR);
973 } else if (list->error) {
974 ctl_set_sense(list->ctsio, /*current_error*/ 1,
975 /*sense_key*/ SSD_KEY_COPY_ABORTED,
976 /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
977 return (CTL_RETVAL_ERROR);
979 return (CTL_RETVAL_COMPLETE);
982 TAILQ_INIT(&list->allio);
983 seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg];
984 sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), NULL, NULL, NULL);
985 if (sl >= CTL_MAX_LUNS) {
986 ctl_set_sense(list->ctsio, /*current_error*/ 1,
987 /*sense_key*/ SSD_KEY_COPY_ABORTED,
988 /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
989 return (CTL_RETVAL_ERROR);
992 // printf("Verify %ju\n", sl);
994 if ((seg->tur & 0x01) == 0)
995 return (CTL_RETVAL_COMPLETE);
998 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
999 TAILQ_INIT(&tio->run);
1001 TAILQ_INSERT_TAIL(&list->allio, tio, links);
1002 tio->io = tpcl_alloc_io();
1003 ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
1004 tio->io->io_hdr.retries = 3;
1006 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
1008 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
1009 panic("tpcl_queue() error");
1010 return (CTL_RETVAL_QUEUED);
1014 tpc_process_register_key(struct tpc_list *list)
1016 struct scsi_ec_segment_register_key *seg;
1021 if (list->stage == 1) {
1022 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1023 TAILQ_REMOVE(&list->allio, tio, links);
1024 ctl_free_io(tio->io);
1027 free(list->buf, M_CTL);
1029 ctl_set_task_aborted(list->ctsio);
1030 return (CTL_RETVAL_ERROR);
1031 } else if (list->error) {
1032 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1033 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1034 /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
1035 return (CTL_RETVAL_ERROR);
1037 return (CTL_RETVAL_COMPLETE);
1040 TAILQ_INIT(&list->allio);
1041 seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg];
1042 dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), NULL, NULL, NULL);
1043 if (dl >= CTL_MAX_LUNS) {
1044 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1045 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1046 /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
1047 return (CTL_RETVAL_ERROR);
1050 // printf("Register Key %ju\n", dl);
1053 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
1054 TAILQ_INIT(&tio->run);
1056 TAILQ_INSERT_TAIL(&list->allio, tio, links);
1057 tio->io = tpcl_alloc_io();
1058 datalen = sizeof(struct scsi_per_res_out_parms);
1059 list->buf = malloc(datalen, M_CTL, M_WAITOK);
1060 ctl_scsi_persistent_res_out(tio->io,
1061 list->buf, datalen, SPRO_REGISTER, -1,
1062 scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key),
1063 /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
1064 tio->io->io_hdr.retries = 3;
1066 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
1068 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
1069 panic("tpcl_queue() error");
1070 return (CTL_RETVAL_QUEUED);
1074 tpc_ranges_length(struct scsi_range_desc *range, int nrange)
1079 for (r = 0; r < nrange; r++)
1080 length += scsi_4btoul(range[r].length);
1085 tpc_skip_ranges(struct scsi_range_desc *range, int nrange, off_t skip,
1086 int *srange, off_t *soffset)
1093 while (r < nrange) {
1094 if (skip - off < scsi_4btoul(range[r].length)) {
1096 *soffset = skip - off;
1099 off += scsi_4btoul(range[r].length);
1106 tpc_process_wut(struct tpc_list *list)
1108 struct tpc_io *tio, *tior, *tiow;
1109 struct runl run, *prun;
1111 off_t doffset, soffset;
1112 off_t srclba, dstlba, numbytes, donebytes, roundbytes;
1113 uint32_t srcblock, dstblock, pb, pbo, adj;
1115 if (list->stage > 0) {
1116 /* Cleanup after previous rounds. */
1117 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1118 TAILQ_REMOVE(&list->allio, tio, links);
1119 ctl_free_io(tio->io);
1122 free(list->buf, M_CTL);
1124 ctl_set_task_aborted(list->ctsio);
1125 return (CTL_RETVAL_ERROR);
1126 } else if (list->error) {
1127 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1128 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1129 /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
1130 return (CTL_RETVAL_ERROR);
1132 list->cursectors += list->segsectors;
1133 list->curbytes += list->segbytes;
1136 /* Check where we are on destination ranges list. */
1137 if (tpc_skip_ranges(list->range, list->nrange, list->cursectors,
1138 &drange, &doffset) != 0)
1139 return (CTL_RETVAL_COMPLETE);
1140 dstblock = list->lun->be_lun->blocksize;
1141 pb = dstblock << list->lun->be_lun->pblockexp;
1142 if (list->lun->be_lun->pblockoff > 0)
1143 pbo = pb - dstblock * list->lun->be_lun->pblockoff;
1147 /* Check where we are on source ranges list. */
1148 srcblock = list->token->blocksize;
1149 if (tpc_skip_ranges(list->token->range, list->token->nrange,
1150 list->offset_into_rod + list->cursectors * dstblock / srcblock,
1151 &srange, &soffset) != 0) {
1152 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1153 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1154 /*asc*/ 0x0d, /*ascq*/ 0x04, SSD_ELEM_NONE);
1155 return (CTL_RETVAL_ERROR);
1158 srclba = scsi_8btou64(list->token->range[srange].lba) + soffset;
1159 dstlba = scsi_8btou64(list->range[drange].lba) + doffset;
1160 numbytes = srcblock *
1161 (scsi_4btoul(list->token->range[srange].length) - soffset);
1162 numbytes = omin(numbytes, dstblock *
1163 (scsi_4btoul(list->range[drange].length) - doffset));
1164 if (numbytes > TPC_MAX_IOCHUNK_SIZE) {
1165 numbytes = TPC_MAX_IOCHUNK_SIZE;
1166 numbytes -= numbytes % dstblock;
1167 if (pb > dstblock) {
1168 adj = (dstlba * dstblock + numbytes - pbo) % pb;
1174 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
1175 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1176 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1177 /*asc*/ 0x26, /*ascq*/ 0x0A, SSD_ELEM_NONE);
1178 return (CTL_RETVAL_ERROR);
1181 list->buf = malloc(numbytes, M_CTL, M_WAITOK |
1182 (list->token == NULL ? M_ZERO : 0));
1183 list->segbytes = numbytes;
1184 list->segsectors = numbytes / dstblock;
1185 //printf("Copy chunk of %ju sectors from %ju to %ju\n", list->segsectors,
1191 TAILQ_INIT(&list->allio);
1192 while (donebytes < numbytes) {
1193 roundbytes = numbytes - donebytes;
1194 if (roundbytes > TPC_MAX_IO_SIZE) {
1195 roundbytes = TPC_MAX_IO_SIZE;
1196 roundbytes -= roundbytes % dstblock;
1197 if (pb > dstblock) {
1198 adj = (dstlba * dstblock + roundbytes - pbo) % pb;
1199 if (roundbytes > adj)
1204 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
1205 TAILQ_INIT(&tior->run);
1207 TAILQ_INSERT_TAIL(&list->allio, tior, links);
1208 tior->io = tpcl_alloc_io();
1209 ctl_scsi_read_write(tior->io,
1210 /*data_ptr*/ &list->buf[donebytes],
1211 /*data_len*/ roundbytes,
1214 /*minimum_cdb_size*/ 0,
1216 /*num_blocks*/ roundbytes / srcblock,
1217 /*tag_type*/ CTL_TAG_SIMPLE,
1219 tior->io->io_hdr.retries = 3;
1220 tior->lun = list->token->lun;
1221 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
1223 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
1224 TAILQ_INIT(&tiow->run);
1226 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
1227 tiow->io = tpcl_alloc_io();
1228 ctl_scsi_read_write(tiow->io,
1229 /*data_ptr*/ &list->buf[donebytes],
1230 /*data_len*/ roundbytes,
1233 /*minimum_cdb_size*/ 0,
1235 /*num_blocks*/ roundbytes / dstblock,
1236 /*tag_type*/ CTL_TAG_SIMPLE,
1238 tiow->io->io_hdr.retries = 3;
1239 tiow->lun = list->lun->lun;
1240 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
1242 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
1243 TAILQ_INSERT_TAIL(prun, tior, rlinks);
1245 donebytes += roundbytes;
1246 srclba += roundbytes / srcblock;
1247 dstlba += roundbytes / dstblock;
1250 while ((tior = TAILQ_FIRST(&run)) != NULL) {
1251 TAILQ_REMOVE(&run, tior, rlinks);
1252 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1253 panic("tpcl_queue() error");
1257 return (CTL_RETVAL_QUEUED);
1261 tpc_process_zero_wut(struct tpc_list *list)
1263 struct tpc_io *tio, *tiow;
1264 struct runl run, *prun;
1266 uint32_t dstblock, len;
1268 if (list->stage > 0) {
1270 /* Cleanup after previous rounds. */
1271 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1272 TAILQ_REMOVE(&list->allio, tio, links);
1273 ctl_free_io(tio->io);
1276 free(list->buf, M_CTL);
1278 ctl_set_task_aborted(list->ctsio);
1279 return (CTL_RETVAL_ERROR);
1280 } else if (list->error) {
1281 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1282 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1283 /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
1284 return (CTL_RETVAL_ERROR);
1286 list->cursectors += list->segsectors;
1287 list->curbytes += list->segbytes;
1288 return (CTL_RETVAL_COMPLETE);
1291 dstblock = list->lun->be_lun->blocksize;
1292 list->buf = malloc(dstblock, M_CTL, M_WAITOK | M_ZERO);
1296 TAILQ_INIT(&list->allio);
1297 list->segsectors = 0;
1298 for (r = 0; r < list->nrange; r++) {
1299 len = scsi_4btoul(list->range[r].length);
1303 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
1304 TAILQ_INIT(&tiow->run);
1306 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
1307 tiow->io = tpcl_alloc_io();
1308 ctl_scsi_write_same(tiow->io,
1309 /*data_ptr*/ list->buf,
1310 /*data_len*/ dstblock,
1312 /*lba*/ scsi_8btou64(list->range[r].lba),
1314 /*tag_type*/ CTL_TAG_SIMPLE,
1316 tiow->io->io_hdr.retries = 3;
1317 tiow->lun = list->lun->lun;
1318 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
1320 TAILQ_INSERT_TAIL(prun, tiow, rlinks);
1322 list->segsectors += len;
1324 list->segbytes = list->segsectors * dstblock;
1326 if (TAILQ_EMPTY(&run))
1329 while ((tiow = TAILQ_FIRST(&run)) != NULL) {
1330 TAILQ_REMOVE(&run, tiow, rlinks);
1331 if (tpcl_queue(tiow->io, tiow->lun) != CTL_RETVAL_COMPLETE)
1332 panic("tpcl_queue() error");
1336 return (CTL_RETVAL_QUEUED);
1340 tpc_process(struct tpc_list *list)
1342 struct ctl_lun *lun = list->lun;
1343 struct ctl_softc *softc = lun->ctl_softc;
1344 struct scsi_ec_segment *seg;
1345 struct ctl_scsiio *ctsio = list->ctsio;
1346 int retval = CTL_RETVAL_COMPLETE;
1348 if (list->service_action == EC_WUT) {
1349 if (list->token != NULL)
1350 retval = tpc_process_wut(list);
1352 retval = tpc_process_zero_wut(list);
1353 if (retval == CTL_RETVAL_QUEUED)
1355 if (retval == CTL_RETVAL_ERROR) {
1360 //printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg);
1361 while (list->curseg < list->nseg) {
1362 seg = list->seg[list->curseg];
1363 switch (seg->type_code) {
1365 retval = tpc_process_b2b(list);
1368 retval = tpc_process_verify(list);
1370 case EC_SEG_REGISTER_KEY:
1371 retval = tpc_process_register_key(list);
1374 ctl_set_sense(ctsio, /*current_error*/ 1,
1375 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1376 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
1379 if (retval == CTL_RETVAL_QUEUED)
1381 if (retval == CTL_RETVAL_ERROR) {
1390 ctl_set_success(ctsio);
1393 //printf("ZZZ done\n");
1394 free(list->params, M_CTL);
1395 list->params = NULL;
1397 mtx_lock(&softc->ctl_lock);
1398 if (--list->token->active == 0)
1399 list->token->last_active = time_uptime;
1400 mtx_unlock(&softc->ctl_lock);
1403 mtx_lock(&lun->lun_lock);
1404 if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) {
1405 TAILQ_REMOVE(&lun->tpc_lists, list, links);
1408 list->completed = 1;
1409 list->last_active = time_uptime;
1410 list->sense_data = ctsio->sense_data;
1411 list->sense_len = ctsio->sense_len;
1412 list->scsi_status = ctsio->scsi_status;
1414 mtx_unlock(&lun->lun_lock);
1416 ctl_done((union ctl_io *)ctsio);
1420 * For any sort of check condition, busy, etc., we just retry. We do not
1421 * decrement the retry count for unit attention type errors. These are
1422 * normal, and we want to save the retry count for "real" errors. Otherwise,
1423 * we could end up with situations where a command will succeed in some
1424 * situations and fail in others, depending on whether a unit attention is
1425 * pending. Also, some of our error recovery actions, most notably the
1426 * LUN reset action, will cause a unit attention.
1428 * We can add more detail here later if necessary.
1430 static tpc_error_action
1431 tpc_checkcond_parse(union ctl_io *io)
1433 tpc_error_action error_action;
1434 int error_code, sense_key, asc, ascq;
1437 * Default to retrying the command.
1439 error_action = TPC_ERR_RETRY;
1441 scsi_extract_sense_len(&io->scsiio.sense_data,
1442 io->scsiio.sense_len,
1449 switch (error_code) {
1450 case SSD_DEFERRED_ERROR:
1451 case SSD_DESC_DEFERRED_ERROR:
1452 error_action |= TPC_ERR_NO_DECREMENT;
1454 case SSD_CURRENT_ERROR:
1455 case SSD_DESC_CURRENT_ERROR:
1457 switch (sense_key) {
1458 case SSD_KEY_UNIT_ATTENTION:
1459 error_action |= TPC_ERR_NO_DECREMENT;
1461 case SSD_KEY_HARDWARE_ERROR:
1463 * This is our generic "something bad happened"
1464 * error code. It often isn't recoverable.
1466 if ((asc == 0x44) && (ascq == 0x00))
1467 error_action = TPC_ERR_FAIL;
1469 case SSD_KEY_NOT_READY:
1471 * If the LUN is powered down, there likely isn't
1472 * much point in retrying right now.
1474 if ((asc == 0x04) && (ascq == 0x02))
1475 error_action = TPC_ERR_FAIL;
1477 * If the LUN is offline, there probably isn't much
1478 * point in retrying, either.
1480 if ((asc == 0x04) && (ascq == 0x03))
1481 error_action = TPC_ERR_FAIL;
1485 return (error_action);
1488 static tpc_error_action
1489 tpc_error_parse(union ctl_io *io)
1491 tpc_error_action error_action = TPC_ERR_RETRY;
1493 switch (io->io_hdr.io_type) {
1495 switch (io->io_hdr.status & CTL_STATUS_MASK) {
1496 case CTL_SCSI_ERROR:
1497 switch (io->scsiio.scsi_status) {
1498 case SCSI_STATUS_CHECK_COND:
1499 error_action = tpc_checkcond_parse(io);
1512 panic("%s: invalid ctl_io type %d\n", __func__,
1513 io->io_hdr.io_type);
1516 return (error_action);
1520 tpc_done(union ctl_io *io)
1522 struct tpc_io *tio, *tior;
1525 * Very minimal retry logic. We basically retry if we got an error
1526 * back, and the retry count is greater than 0. If we ever want
1527 * more sophisticated initiator type behavior, the CAM error
1528 * recovery code in ../common might be helpful.
1530 tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1531 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1532 && (io->io_hdr.retries > 0)) {
1533 ctl_io_status old_status;
1534 tpc_error_action error_action;
1536 error_action = tpc_error_parse(io);
1537 switch (error_action & TPC_ERR_MASK) {
1542 if ((error_action & TPC_ERR_NO_DECREMENT) == 0)
1543 io->io_hdr.retries--;
1544 old_status = io->io_hdr.status;
1545 io->io_hdr.status = CTL_STATUS_NONE;
1546 io->io_hdr.flags &= ~CTL_FLAG_ABORT;
1547 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
1548 if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) {
1549 printf("%s: error returned from ctl_queue()!\n",
1551 io->io_hdr.status = old_status;
1557 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1558 tio->list->error = 1;
1560 atomic_add_int(&tio->list->curops, 1);
1561 if (!tio->list->error && !tio->list->abort) {
1562 while ((tior = TAILQ_FIRST(&tio->run)) != NULL) {
1563 TAILQ_REMOVE(&tio->run, tior, rlinks);
1564 atomic_add_int(&tio->list->tbdio, 1);
1565 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1566 panic("tpcl_queue() error");
1569 if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1)
1570 tpc_process(tio->list);
1574 ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
1576 struct scsi_extended_copy *cdb;
1577 struct scsi_extended_copy_lid1_data *data;
1578 struct ctl_lun *lun;
1579 struct tpc_list *list, *tlist;
1582 int len, off, lencscd, lenseg, leninl, nseg;
1584 CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n"));
1586 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1587 cdb = (struct scsi_extended_copy *)ctsio->cdb;
1588 len = scsi_4btoul(cdb->length);
1590 if (len < sizeof(struct scsi_extended_copy_lid1_data) ||
1591 len > sizeof(struct scsi_extended_copy_lid1_data) +
1592 TPC_MAX_LIST + TPC_MAX_INLINE) {
1593 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1594 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1599 * If we've got a kernel request that hasn't been malloced yet,
1600 * malloc it and tell the caller the data buffer is here.
1602 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1603 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1604 ctsio->kern_data_len = len;
1605 ctsio->kern_total_len = len;
1606 ctsio->kern_data_resid = 0;
1607 ctsio->kern_rel_offset = 0;
1608 ctsio->kern_sg_entries = 0;
1609 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1610 ctsio->be_move_done = ctl_config_move_done;
1611 ctl_datamove((union ctl_io *)ctsio);
1613 return (CTL_RETVAL_COMPLETE);
1616 data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr;
1617 lencscd = scsi_2btoul(data->cscd_list_length);
1618 lenseg = scsi_4btoul(data->segment_list_length);
1619 leninl = scsi_4btoul(data->inline_data_length);
1620 if (len < sizeof(struct scsi_extended_copy_lid1_data) +
1621 lencscd + lenseg + leninl ||
1622 leninl > TPC_MAX_INLINE) {
1623 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1624 /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1627 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1628 ctl_set_sense(ctsio, /*current_error*/ 1,
1629 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1630 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1633 if (lencscd + lenseg > TPC_MAX_LIST) {
1634 ctl_set_param_len_error(ctsio);
1638 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1639 list->service_action = cdb->service_action;
1640 value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
1641 if (value != NULL && strcmp(value, "on") == 0)
1642 list->init_port = -1;
1644 list->init_port = ctsio->io_hdr.nexus.targ_port;
1645 list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
1646 list->list_id = data->list_identifier;
1647 list->flags = data->flags;
1648 list->params = ctsio->kern_data_ptr;
1649 list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1650 ptr = &data->data[lencscd];
1651 for (nseg = 0, off = 0; off < lenseg; nseg++) {
1652 if (nseg >= TPC_MAX_SEGS) {
1654 ctl_set_sense(ctsio, /*current_error*/ 1,
1655 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1656 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1659 list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off);
1660 off += sizeof(struct scsi_ec_segment) +
1661 scsi_2btoul(list->seg[nseg]->descr_length);
1663 list->inl = &data->data[lencscd + lenseg];
1664 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1666 list->leninl = leninl;
1667 list->ctsio = ctsio;
1669 mtx_lock(&lun->lun_lock);
1670 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1671 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1672 if (tlist != NULL && !tlist->completed) {
1673 mtx_unlock(&lun->lun_lock);
1675 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1676 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1680 if (tlist != NULL) {
1681 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1685 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1686 mtx_unlock(&lun->lun_lock);
1689 return (CTL_RETVAL_COMPLETE);
1692 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
1693 free(ctsio->kern_data_ptr, M_CTL);
1694 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
1696 ctl_done((union ctl_io *)ctsio);
1697 return (CTL_RETVAL_COMPLETE);
1701 ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
1703 struct scsi_extended_copy *cdb;
1704 struct scsi_extended_copy_lid4_data *data;
1705 struct ctl_lun *lun;
1706 struct tpc_list *list, *tlist;
1709 int len, off, lencscd, lenseg, leninl, nseg;
1711 CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n"));
1713 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1714 cdb = (struct scsi_extended_copy *)ctsio->cdb;
1715 len = scsi_4btoul(cdb->length);
1717 if (len < sizeof(struct scsi_extended_copy_lid4_data) ||
1718 len > sizeof(struct scsi_extended_copy_lid4_data) +
1719 TPC_MAX_LIST + TPC_MAX_INLINE) {
1720 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1721 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1726 * If we've got a kernel request that hasn't been malloced yet,
1727 * malloc it and tell the caller the data buffer is here.
1729 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1730 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1731 ctsio->kern_data_len = len;
1732 ctsio->kern_total_len = len;
1733 ctsio->kern_data_resid = 0;
1734 ctsio->kern_rel_offset = 0;
1735 ctsio->kern_sg_entries = 0;
1736 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1737 ctsio->be_move_done = ctl_config_move_done;
1738 ctl_datamove((union ctl_io *)ctsio);
1740 return (CTL_RETVAL_COMPLETE);
1743 data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr;
1744 lencscd = scsi_2btoul(data->cscd_list_length);
1745 lenseg = scsi_2btoul(data->segment_list_length);
1746 leninl = scsi_2btoul(data->inline_data_length);
1747 if (len < sizeof(struct scsi_extended_copy_lid4_data) +
1748 lencscd + lenseg + leninl ||
1749 leninl > TPC_MAX_INLINE) {
1750 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1751 /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1754 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1755 ctl_set_sense(ctsio, /*current_error*/ 1,
1756 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1757 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1760 if (lencscd + lenseg > TPC_MAX_LIST) {
1761 ctl_set_param_len_error(ctsio);
1765 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1766 list->service_action = cdb->service_action;
1767 value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
1768 if (value != NULL && strcmp(value, "on") == 0)
1769 list->init_port = -1;
1771 list->init_port = ctsio->io_hdr.nexus.targ_port;
1772 list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
1773 list->list_id = scsi_4btoul(data->list_identifier);
1774 list->flags = data->flags;
1775 list->params = ctsio->kern_data_ptr;
1776 list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1777 ptr = &data->data[lencscd];
1778 for (nseg = 0, off = 0; off < lenseg; nseg++) {
1779 if (nseg >= TPC_MAX_SEGS) {
1781 ctl_set_sense(ctsio, /*current_error*/ 1,
1782 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1783 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1786 list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off);
1787 off += sizeof(struct scsi_ec_segment) +
1788 scsi_2btoul(list->seg[nseg]->descr_length);
1790 list->inl = &data->data[lencscd + lenseg];
1791 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1793 list->leninl = leninl;
1794 list->ctsio = ctsio;
1796 mtx_lock(&lun->lun_lock);
1797 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1798 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1799 if (tlist != NULL && !tlist->completed) {
1800 mtx_unlock(&lun->lun_lock);
1802 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1803 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1807 if (tlist != NULL) {
1808 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1812 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1813 mtx_unlock(&lun->lun_lock);
1816 return (CTL_RETVAL_COMPLETE);
1819 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
1820 free(ctsio->kern_data_ptr, M_CTL);
1821 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
1823 ctl_done((union ctl_io *)ctsio);
1824 return (CTL_RETVAL_COMPLETE);
1828 tpc_create_token(struct ctl_lun *lun, struct ctl_port *port, off_t len,
1829 struct scsi_token *token)
1832 struct scsi_vpd_id_descriptor *idd = NULL;
1833 struct scsi_ec_cscd_id *cscd;
1834 struct scsi_read_capacity_data_long *dtsd;
1837 scsi_ulto4b(ROD_TYPE_AUR, token->type);
1838 scsi_ulto2b(0x01f8, token->length);
1839 scsi_u64to8b(atomic_fetchadd_int(&id, 1), &token->body[0]);
1841 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
1842 lun->lun_devid->data, lun->lun_devid->len,
1843 scsi_devid_is_lun_naa);
1844 if (idd == NULL && lun->lun_devid)
1845 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
1846 lun->lun_devid->data, lun->lun_devid->len,
1847 scsi_devid_is_lun_eui64);
1849 cscd = (struct scsi_ec_cscd_id *)&token->body[8];
1850 cscd->type_code = EC_CSCD_ID;
1851 cscd->luidt_pdt = T_DIRECT;
1852 memcpy(&cscd->codeset, idd, 4 + idd->length);
1853 scsi_ulto3b(lun->be_lun->blocksize, cscd->dtsp.block_length);
1855 scsi_u64to8b(0, &token->body[40]); /* XXX: Should be 128bit value. */
1856 scsi_u64to8b(len, &token->body[48]);
1858 /* ROD token device type specific data (RC16 without first field) */
1859 dtsd = (struct scsi_read_capacity_data_long *)&token->body[88 - 8];
1860 scsi_ulto4b(lun->be_lun->blocksize, dtsd->length);
1861 dtsd->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE;
1862 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, dtsd->lalba_lbp);
1863 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
1864 dtsd->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ;
1866 if (port->target_devid) {
1867 targid_len = port->target_devid->len;
1868 memcpy(&token->body[120], port->target_devid->data, targid_len);
1871 arc4rand(&token->body[120 + targid_len], 384 - targid_len, 0);
1875 ctl_populate_token(struct ctl_scsiio *ctsio)
1877 struct scsi_populate_token *cdb;
1878 struct scsi_populate_token_data *data;
1879 struct ctl_softc *softc;
1880 struct ctl_lun *lun;
1881 struct ctl_port *port;
1882 struct tpc_list *list, *tlist;
1883 struct tpc_token *token;
1886 CTL_DEBUG_PRINT(("ctl_populate_token\n"));
1888 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1889 softc = lun->ctl_softc;
1890 port = softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
1891 cdb = (struct scsi_populate_token *)ctsio->cdb;
1892 len = scsi_4btoul(cdb->length);
1894 if (len < sizeof(struct scsi_populate_token_data) ||
1895 len > sizeof(struct scsi_populate_token_data) +
1896 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
1897 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1898 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1903 * If we've got a kernel request that hasn't been malloced yet,
1904 * malloc it and tell the caller the data buffer is here.
1906 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1907 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1908 ctsio->kern_data_len = len;
1909 ctsio->kern_total_len = len;
1910 ctsio->kern_data_resid = 0;
1911 ctsio->kern_rel_offset = 0;
1912 ctsio->kern_sg_entries = 0;
1913 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1914 ctsio->be_move_done = ctl_config_move_done;
1915 ctl_datamove((union ctl_io *)ctsio);
1917 return (CTL_RETVAL_COMPLETE);
1920 data = (struct scsi_populate_token_data *)ctsio->kern_data_ptr;
1921 lendesc = scsi_2btoul(data->range_descriptor_length);
1922 if (len < sizeof(struct scsi_populate_token_data) + lendesc) {
1923 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1924 /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1928 printf("PT(list=%u) flags=%x to=%d rt=%x len=%x\n",
1929 scsi_4btoul(cdb->list_identifier),
1930 data->flags, scsi_4btoul(data->inactivity_timeout),
1931 scsi_4btoul(data->rod_type),
1932 scsi_2btoul(data->range_descriptor_length));
1934 if ((data->flags & EC_PT_RTV) &&
1935 scsi_4btoul(data->rod_type) != ROD_TYPE_AUR) {
1936 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1937 /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0);
1941 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1942 list->service_action = cdb->service_action;
1943 list->init_port = ctsio->io_hdr.nexus.targ_port;
1944 list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
1945 list->list_id = scsi_4btoul(cdb->list_identifier);
1946 list->flags = data->flags;
1947 list->ctsio = ctsio;
1949 mtx_lock(&lun->lun_lock);
1950 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1951 if (tlist != NULL && !tlist->completed) {
1952 mtx_unlock(&lun->lun_lock);
1954 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1955 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1959 if (tlist != NULL) {
1960 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1963 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1964 mtx_unlock(&lun->lun_lock);
1966 token = malloc(sizeof(*token), M_CTL, M_WAITOK | M_ZERO);
1967 token->lun = lun->lun;
1968 token->blocksize = lun->be_lun->blocksize;
1969 token->params = ctsio->kern_data_ptr;
1970 token->range = &data->desc[0];
1971 token->nrange = scsi_2btoul(data->range_descriptor_length) /
1972 sizeof(struct scsi_range_desc);
1973 list->cursectors = tpc_ranges_length(token->range, token->nrange);
1974 list->curbytes = (off_t)list->cursectors * lun->be_lun->blocksize;
1975 tpc_create_token(lun, port, list->curbytes,
1976 (struct scsi_token *)token->token);
1978 token->last_active = time_uptime;
1979 token->timeout = scsi_4btoul(data->inactivity_timeout);
1980 if (token->timeout == 0)
1981 token->timeout = TPC_DFL_TOKEN_TIMEOUT;
1982 else if (token->timeout < TPC_MIN_TOKEN_TIMEOUT)
1983 token->timeout = TPC_MIN_TOKEN_TIMEOUT;
1984 else if (token->timeout > TPC_MAX_TOKEN_TIMEOUT) {
1985 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1986 /*command*/ 0, /*field*/ 4, /*bit_valid*/ 0,
1989 memcpy(list->res_token, token->token, sizeof(list->res_token));
1990 list->res_token_valid = 1;
1992 list->completed = 1;
1993 list->last_active = time_uptime;
1994 mtx_lock(&softc->ctl_lock);
1995 TAILQ_INSERT_TAIL(&softc->tpc_tokens, token, links);
1996 mtx_unlock(&softc->ctl_lock);
1997 ctl_set_success(ctsio);
1998 ctl_done((union ctl_io *)ctsio);
1999 return (CTL_RETVAL_COMPLETE);
2002 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
2003 free(ctsio->kern_data_ptr, M_CTL);
2004 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
2006 ctl_done((union ctl_io *)ctsio);
2007 return (CTL_RETVAL_COMPLETE);
2011 ctl_write_using_token(struct ctl_scsiio *ctsio)
2013 struct scsi_write_using_token *cdb;
2014 struct scsi_write_using_token_data *data;
2015 struct ctl_softc *softc;
2016 struct ctl_lun *lun;
2017 struct tpc_list *list, *tlist;
2018 struct tpc_token *token;
2021 CTL_DEBUG_PRINT(("ctl_write_using_token\n"));
2023 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
2024 softc = lun->ctl_softc;
2025 cdb = (struct scsi_write_using_token *)ctsio->cdb;
2026 len = scsi_4btoul(cdb->length);
2028 if (len < sizeof(struct scsi_populate_token_data) ||
2029 len > sizeof(struct scsi_populate_token_data) +
2030 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
2031 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
2032 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
2037 * If we've got a kernel request that hasn't been malloced yet,
2038 * malloc it and tell the caller the data buffer is here.
2040 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
2041 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
2042 ctsio->kern_data_len = len;
2043 ctsio->kern_total_len = len;
2044 ctsio->kern_data_resid = 0;
2045 ctsio->kern_rel_offset = 0;
2046 ctsio->kern_sg_entries = 0;
2047 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2048 ctsio->be_move_done = ctl_config_move_done;
2049 ctl_datamove((union ctl_io *)ctsio);
2051 return (CTL_RETVAL_COMPLETE);
2054 data = (struct scsi_write_using_token_data *)ctsio->kern_data_ptr;
2055 lendesc = scsi_2btoul(data->range_descriptor_length);
2056 if (len < sizeof(struct scsi_populate_token_data) + lendesc) {
2057 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2058 /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
2062 printf("WUT(list=%u) flags=%x off=%ju len=%x\n",
2063 scsi_4btoul(cdb->list_identifier),
2064 data->flags, scsi_8btou64(data->offset_into_rod),
2065 scsi_2btoul(data->range_descriptor_length));
2067 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
2068 list->service_action = cdb->service_action;
2069 list->init_port = ctsio->io_hdr.nexus.targ_port;
2070 list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
2071 list->list_id = scsi_4btoul(cdb->list_identifier);
2072 list->flags = data->flags;
2073 list->params = ctsio->kern_data_ptr;
2074 list->range = &data->desc[0];
2075 list->nrange = scsi_2btoul(data->range_descriptor_length) /
2076 sizeof(struct scsi_range_desc);
2077 list->offset_into_rod = scsi_8btou64(data->offset_into_rod);
2078 list->ctsio = ctsio;
2080 mtx_lock(&lun->lun_lock);
2081 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
2082 if (tlist != NULL && !tlist->completed) {
2083 mtx_unlock(&lun->lun_lock);
2085 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2086 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2090 if (tlist != NULL) {
2091 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
2094 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
2095 mtx_unlock(&lun->lun_lock);
2097 /* Block device zero ROD token -> no token. */
2098 if (scsi_4btoul(data->rod_token) == ROD_TYPE_BLOCK_ZERO) {
2100 return (CTL_RETVAL_COMPLETE);
2103 mtx_lock(&softc->ctl_lock);
2104 TAILQ_FOREACH(token, &softc->tpc_tokens, links) {
2105 if (memcmp(token->token, data->rod_token,
2106 sizeof(data->rod_token)) == 0)
2109 if (token != NULL) {
2111 list->token = token;
2112 if (data->flags & EC_WUT_DEL_TKN)
2115 mtx_unlock(&softc->ctl_lock);
2116 if (token == NULL) {
2117 mtx_lock(&lun->lun_lock);
2118 TAILQ_REMOVE(&lun->tpc_lists, list, links);
2119 mtx_unlock(&lun->lun_lock);
2121 ctl_set_sense(ctsio, /*current_error*/ 1,
2122 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
2123 /*asc*/ 0x23, /*ascq*/ 0x04, SSD_ELEM_NONE);
2128 return (CTL_RETVAL_COMPLETE);
2131 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
2132 free(ctsio->kern_data_ptr, M_CTL);
2133 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
2135 ctl_done((union ctl_io *)ctsio);
2136 return (CTL_RETVAL_COMPLETE);
2140 ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
2142 struct ctl_lun *lun;
2143 struct scsi_receive_rod_token_information *cdb;
2144 struct scsi_receive_copy_status_lid4_data *data;
2145 struct tpc_list *list;
2146 struct tpc_list list_copy;
2149 int alloc_len, total_len, token_len;
2152 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
2154 cdb = (struct scsi_receive_rod_token_information *)ctsio->cdb;
2155 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
2157 retval = CTL_RETVAL_COMPLETE;
2159 list_id = scsi_4btoul(cdb->list_identifier);
2160 mtx_lock(&lun->lun_lock);
2161 list = tpc_find_list(lun, list_id,
2162 ctl_get_resindex(&ctsio->io_hdr.nexus));
2164 mtx_unlock(&lun->lun_lock);
2165 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2166 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
2168 ctl_done((union ctl_io *)ctsio);
2172 if (list->completed) {
2173 TAILQ_REMOVE(&lun->tpc_lists, list, links);
2176 mtx_unlock(&lun->lun_lock);
2178 token_len = list_copy.res_token_valid ? 2 + sizeof(list_copy.res_token) : 0;
2179 total_len = sizeof(*data) + list_copy.sense_len + 4 + token_len;
2180 alloc_len = scsi_4btoul(cdb->length);
2182 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
2184 ctsio->kern_sg_entries = 0;
2186 if (total_len < alloc_len) {
2187 ctsio->residual = alloc_len - total_len;
2188 ctsio->kern_data_len = total_len;
2189 ctsio->kern_total_len = total_len;
2191 ctsio->residual = 0;
2192 ctsio->kern_data_len = alloc_len;
2193 ctsio->kern_total_len = alloc_len;
2195 ctsio->kern_data_resid = 0;
2196 ctsio->kern_rel_offset = 0;
2198 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
2199 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len +
2200 4 + token_len, data->available_data);
2201 data->response_to_service_action = list_copy.service_action;
2202 if (list_copy.completed) {
2203 if (list_copy.error)
2204 data->copy_command_status = RCS_CCS_ERROR;
2205 else if (list_copy.abort)
2206 data->copy_command_status = RCS_CCS_ABORTED;
2208 data->copy_command_status = RCS_CCS_COMPLETED;
2210 data->copy_command_status = RCS_CCS_INPROG_FG;
2211 scsi_ulto2b(list_copy.curops, data->operation_counter);
2212 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
2213 data->transfer_count_units = RCS_TC_LBAS;
2214 scsi_u64to8b(list_copy.cursectors, data->transfer_count);
2215 scsi_ulto2b(list_copy.curseg, data->segments_processed);
2216 data->length_of_the_sense_data_field = list_copy.sense_len;
2217 data->sense_data_length = list_copy.sense_len;
2218 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
2220 ptr = &data->sense_data[data->length_of_the_sense_data_field];
2221 scsi_ulto4b(token_len, &ptr[0]);
2222 if (list_copy.res_token_valid) {
2223 scsi_ulto2b(0, &ptr[4]);
2224 memcpy(&ptr[6], list_copy.res_token, sizeof(list_copy.res_token));
2227 printf("RRTI(list=%u) valid=%d\n",
2228 scsi_4btoul(cdb->list_identifier), list_copy.res_token_valid);
2230 ctl_set_success(ctsio);
2231 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2232 ctsio->be_move_done = ctl_config_move_done;
2233 ctl_datamove((union ctl_io *)ctsio);
2238 ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio)
2240 struct ctl_softc *softc;
2241 struct ctl_lun *lun;
2242 struct scsi_report_all_rod_tokens *cdb;
2243 struct scsi_report_all_rod_tokens_data *data;
2244 struct tpc_token *token;
2246 int alloc_len, total_len, tokens, i;
2248 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
2250 cdb = (struct scsi_report_all_rod_tokens *)ctsio->cdb;
2251 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
2252 softc = lun->ctl_softc;
2254 retval = CTL_RETVAL_COMPLETE;
2257 mtx_lock(&softc->ctl_lock);
2258 TAILQ_FOREACH(token, &softc->tpc_tokens, links)
2260 mtx_unlock(&softc->ctl_lock);
2264 total_len = sizeof(*data) + tokens * 96;
2265 alloc_len = scsi_4btoul(cdb->length);
2267 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
2269 ctsio->kern_sg_entries = 0;
2271 if (total_len < alloc_len) {
2272 ctsio->residual = alloc_len - total_len;
2273 ctsio->kern_data_len = total_len;
2274 ctsio->kern_total_len = total_len;
2276 ctsio->residual = 0;
2277 ctsio->kern_data_len = alloc_len;
2278 ctsio->kern_total_len = alloc_len;
2280 ctsio->kern_data_resid = 0;
2281 ctsio->kern_rel_offset = 0;
2283 data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr;
2285 mtx_lock(&softc->ctl_lock);
2286 TAILQ_FOREACH(token, &softc->tpc_tokens, links) {
2289 memcpy(&data->rod_management_token_list[i * 96],
2293 mtx_unlock(&softc->ctl_lock);
2294 scsi_ulto4b(sizeof(*data) - 4 + i * 96, data->available_data);
2296 printf("RART tokens=%d\n", i);
2298 ctl_set_success(ctsio);
2299 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2300 ctsio->be_move_done = ctl_config_move_done;
2301 ctl_datamove((union ctl_io *)ctsio);