2 * Copyright (c) 2014 Alexander Motin <mav@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/types.h>
35 #include <sys/module.h>
36 #include <sys/mutex.h>
37 #include <sys/condvar.h>
38 #include <sys/malloc.h>
40 #include <sys/queue.h>
41 #include <sys/sysctl.h>
42 #include <machine/atomic.h>
45 #include <cam/scsi/scsi_all.h>
46 #include <cam/scsi/scsi_da.h>
47 #include <cam/ctl/ctl_io.h>
48 #include <cam/ctl/ctl.h>
49 #include <cam/ctl/ctl_frontend.h>
50 #include <cam/ctl/ctl_frontend_internal.h>
51 #include <cam/ctl/ctl_util.h>
52 #include <cam/ctl/ctl_backend.h>
53 #include <cam/ctl/ctl_ioctl.h>
54 #include <cam/ctl/ctl_ha.h>
55 #include <cam/ctl/ctl_private.h>
56 #include <cam/ctl/ctl_debug.h>
57 #include <cam/ctl/ctl_scsi_all.h>
58 #include <cam/ctl/ctl_tpc.h>
59 #include <cam/ctl/ctl_error.h>
61 #define TPC_MAX_CSCDS 64
62 #define TPC_MAX_SEGS 64
64 #define TPC_MAX_LIST 8192
65 #define TPC_MAX_INLINE 0
66 #define TPC_MAX_LISTS 255
67 #define TPC_MAX_IO_SIZE (1024 * 1024)
68 #define TPC_MAX_IOCHUNK_SIZE (TPC_MAX_IO_SIZE * 16)
69 #define TPC_MIN_TOKEN_TIMEOUT 1
70 #define TPC_DFL_TOKEN_TIMEOUT 60
71 #define TPC_MAX_TOKEN_TIMEOUT 600
73 MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC");
76 TPC_ERR_RETRY = 0x000,
79 TPC_ERR_NO_DECREMENT = 0x100
83 TAILQ_HEAD(runl, tpc_io);
87 struct tpc_list *list;
89 TAILQ_ENTRY(tpc_io) rlinks;
90 TAILQ_ENTRY(tpc_io) links;
98 struct scsi_range_desc *range;
103 TAILQ_ENTRY(tpc_token) links;
107 uint8_t service_action;
113 struct scsi_ec_cscd *cscd;
114 struct scsi_ec_segment *seg[TPC_MAX_SEGS];
119 struct tpc_token *token;
120 struct scsi_range_desc *range;
122 off_t offset_into_rod;
137 TAILQ_HEAD(, tpc_io) allio;
138 struct scsi_sense_data sense_data;
141 struct ctl_scsiio *ctsio;
144 uint8_t res_token[512];
145 TAILQ_ENTRY(tpc_list) links;
148 extern struct ctl_softc *control_softc;
151 tpc_timeout(void *arg)
153 struct ctl_softc *softc = arg;
155 struct tpc_token *token, *ttoken;
156 struct tpc_list *list, *tlist;
158 /* Free completed lists with expired timeout. */
159 STAILQ_FOREACH(lun, &softc->lun_list, links) {
160 mtx_lock(&lun->lun_lock);
161 TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) {
162 if (!list->completed || time_uptime < list->last_active +
163 TPC_DFL_TOKEN_TIMEOUT)
165 TAILQ_REMOVE(&lun->tpc_lists, list, links);
168 mtx_unlock(&lun->lun_lock);
171 /* Free inactive ROD tokens with expired timeout. */
172 TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
174 time_uptime < token->last_active + token->timeout + 1)
176 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
177 free(token->params, M_CTL);
180 callout_schedule(&softc->tpc_timeout, hz);
184 ctl_tpc_init(struct ctl_softc *softc)
187 TAILQ_INIT(&softc->tpc_tokens);
188 callout_init_mtx(&softc->tpc_timeout, &softc->ctl_lock, 0);
189 callout_reset(&softc->tpc_timeout, hz, tpc_timeout, softc);
193 ctl_tpc_shutdown(struct ctl_softc *softc)
195 struct tpc_token *token;
197 callout_drain(&softc->tpc_timeout);
199 /* Free ROD tokens. */
200 mtx_lock(&softc->ctl_lock);
201 while ((token = TAILQ_FIRST(&softc->tpc_tokens)) != NULL) {
202 TAILQ_REMOVE(&softc->tpc_tokens, token, links);
203 free(token->params, M_CTL);
206 mtx_unlock(&softc->ctl_lock);
210 ctl_tpc_lun_init(struct ctl_lun *lun)
213 TAILQ_INIT(&lun->tpc_lists);
217 ctl_tpc_lun_shutdown(struct ctl_lun *lun)
219 struct tpc_list *list;
220 struct tpc_token *token, *ttoken;
222 /* Free lists for this LUN. */
223 while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) {
224 TAILQ_REMOVE(&lun->tpc_lists, list, links);
225 KASSERT(list->completed,
226 ("Not completed TPC (%p) on shutdown", list));
230 /* Free ROD tokens for this LUN. */
231 mtx_assert(&control_softc->ctl_lock, MA_OWNED);
232 TAILQ_FOREACH_SAFE(token, &control_softc->tpc_tokens, links, ttoken) {
233 if (token->lun != lun->lun || token->active)
235 TAILQ_REMOVE(&control_softc->tpc_tokens, token, links);
236 free(token->params, M_CTL);
242 ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
244 struct scsi_vpd_tpc *tpc_ptr;
245 struct scsi_vpd_tpc_descriptor *d_ptr;
246 struct scsi_vpd_tpc_descriptor_bdrl *bdrl_ptr;
247 struct scsi_vpd_tpc_descriptor_sc *sc_ptr;
248 struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr;
249 struct scsi_vpd_tpc_descriptor_pd *pd_ptr;
250 struct scsi_vpd_tpc_descriptor_sd *sd_ptr;
251 struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr;
252 struct scsi_vpd_tpc_descriptor_rtf *rtf_ptr;
253 struct scsi_vpd_tpc_descriptor_rtf_block *rtfb_ptr;
254 struct scsi_vpd_tpc_descriptor_srt *srt_ptr;
255 struct scsi_vpd_tpc_descriptor_srtd *srtd_ptr;
256 struct scsi_vpd_tpc_descriptor_gco *gco_ptr;
260 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
262 data_len = sizeof(struct scsi_vpd_tpc) +
263 sizeof(struct scsi_vpd_tpc_descriptor_bdrl) +
264 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) +
265 2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 11, 4) +
266 sizeof(struct scsi_vpd_tpc_descriptor_pd) +
267 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) +
268 roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) +
269 sizeof(struct scsi_vpd_tpc_descriptor_rtf) +
270 sizeof(struct scsi_vpd_tpc_descriptor_rtf_block) +
271 sizeof(struct scsi_vpd_tpc_descriptor_srt) +
272 2*sizeof(struct scsi_vpd_tpc_descriptor_srtd) +
273 sizeof(struct scsi_vpd_tpc_descriptor_gco);
275 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
276 tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr;
277 ctsio->kern_sg_entries = 0;
279 if (data_len < alloc_len) {
280 ctsio->residual = alloc_len - data_len;
281 ctsio->kern_data_len = data_len;
282 ctsio->kern_total_len = data_len;
285 ctsio->kern_data_len = alloc_len;
286 ctsio->kern_total_len = alloc_len;
288 ctsio->kern_data_resid = 0;
289 ctsio->kern_rel_offset = 0;
290 ctsio->kern_sg_entries = 0;
293 * The control device is always connected. The disk device, on the
294 * other hand, may not be online all the time.
297 tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
298 lun->be_lun->lun_type;
300 tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
301 tpc_ptr->page_code = SVPD_SCSI_TPC;
302 scsi_ulto2b(data_len - 4, tpc_ptr->page_length);
304 /* Block Device ROD Limits */
305 d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0];
306 bdrl_ptr = (struct scsi_vpd_tpc_descriptor_bdrl *)d_ptr;
307 scsi_ulto2b(SVPD_TPC_BDRL, bdrl_ptr->desc_type);
308 scsi_ulto2b(sizeof(*bdrl_ptr) - 4, bdrl_ptr->desc_length);
309 scsi_ulto2b(TPC_MAX_SEGS, bdrl_ptr->maximum_ranges);
310 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
311 bdrl_ptr->maximum_inactivity_timeout);
312 scsi_ulto4b(TPC_DFL_TOKEN_TIMEOUT,
313 bdrl_ptr->default_inactivity_timeout);
314 scsi_u64to8b(0, bdrl_ptr->maximum_token_transfer_size);
315 scsi_u64to8b(0, bdrl_ptr->optimal_transfer_count);
317 /* Supported commands */
318 d_ptr = (struct scsi_vpd_tpc_descriptor *)
319 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
320 sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr;
321 scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type);
322 sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 11;
323 scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length);
324 scd_ptr = &sc_ptr->descr[0];
325 scd_ptr->opcode = EXTENDED_COPY;
326 scd_ptr->sa_length = 5;
327 scd_ptr->supported_service_actions[0] = EC_EC_LID1;
328 scd_ptr->supported_service_actions[1] = EC_EC_LID4;
329 scd_ptr->supported_service_actions[2] = EC_PT;
330 scd_ptr->supported_service_actions[3] = EC_WUT;
331 scd_ptr->supported_service_actions[4] = EC_COA;
332 scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *)
333 &scd_ptr->supported_service_actions[scd_ptr->sa_length];
334 scd_ptr->opcode = RECEIVE_COPY_STATUS;
335 scd_ptr->sa_length = 6;
336 scd_ptr->supported_service_actions[0] = RCS_RCS_LID1;
337 scd_ptr->supported_service_actions[1] = RCS_RCFD;
338 scd_ptr->supported_service_actions[2] = RCS_RCS_LID4;
339 scd_ptr->supported_service_actions[3] = RCS_RCOP;
340 scd_ptr->supported_service_actions[4] = RCS_RRTI;
341 scd_ptr->supported_service_actions[5] = RCS_RART;
343 /* Parameter data. */
344 d_ptr = (struct scsi_vpd_tpc_descriptor *)
345 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
346 pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr;
347 scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type);
348 scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length);
349 scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count);
350 scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count);
351 scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length);
352 scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length);
354 /* Supported Descriptors */
355 d_ptr = (struct scsi_vpd_tpc_descriptor *)
356 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
357 sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr;
358 scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type);
359 scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length);
360 sd_ptr->list_length = 4;
361 sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B;
362 sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY;
363 sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY;
364 sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID;
366 /* Supported CSCD Descriptor IDs */
367 d_ptr = (struct scsi_vpd_tpc_descriptor *)
368 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
369 sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr;
370 scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type);
371 scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length);
372 scsi_ulto2b(2, sdid_ptr->list_length);
373 scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]);
375 /* ROD Token Features */
376 d_ptr = (struct scsi_vpd_tpc_descriptor *)
377 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
378 rtf_ptr = (struct scsi_vpd_tpc_descriptor_rtf *)d_ptr;
379 scsi_ulto2b(SVPD_TPC_RTF, rtf_ptr->desc_type);
380 scsi_ulto2b(sizeof(*rtf_ptr) - 4 + sizeof(*rtfb_ptr), rtf_ptr->desc_length);
381 rtf_ptr->remote_tokens = 0;
382 scsi_ulto4b(TPC_MIN_TOKEN_TIMEOUT, rtf_ptr->minimum_token_lifetime);
383 scsi_ulto4b(UINT32_MAX, rtf_ptr->maximum_token_lifetime);
384 scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
385 rtf_ptr->maximum_token_inactivity_timeout);
386 scsi_ulto2b(sizeof(*rtfb_ptr), rtf_ptr->type_specific_features_length);
387 rtfb_ptr = (struct scsi_vpd_tpc_descriptor_rtf_block *)
388 &rtf_ptr->type_specific_features;
389 rtfb_ptr->type_format = SVPD_TPC_RTF_BLOCK;
390 scsi_ulto2b(sizeof(*rtfb_ptr) - 4, rtfb_ptr->desc_length);
391 scsi_ulto2b(0, rtfb_ptr->optimal_length_granularity);
392 scsi_u64to8b(0, rtfb_ptr->maximum_bytes);
393 scsi_u64to8b(0, rtfb_ptr->optimal_bytes);
394 scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE,
395 rtfb_ptr->optimal_bytes_to_token_per_segment);
396 scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE,
397 rtfb_ptr->optimal_bytes_from_token_per_segment);
399 /* Supported ROD Tokens */
400 d_ptr = (struct scsi_vpd_tpc_descriptor *)
401 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
402 srt_ptr = (struct scsi_vpd_tpc_descriptor_srt *)d_ptr;
403 scsi_ulto2b(SVPD_TPC_SRT, srt_ptr->desc_type);
404 scsi_ulto2b(sizeof(*srt_ptr) - 4 + 2*sizeof(*srtd_ptr), srt_ptr->desc_length);
405 scsi_ulto2b(2*sizeof(*srtd_ptr), srt_ptr->rod_type_descriptors_length);
406 srtd_ptr = (struct scsi_vpd_tpc_descriptor_srtd *)
407 &srt_ptr->rod_type_descriptors;
408 scsi_ulto4b(ROD_TYPE_AUR, srtd_ptr->rod_type);
409 srtd_ptr->flags = SVPD_TPC_SRTD_TIN | SVPD_TPC_SRTD_TOUT;
410 scsi_ulto2b(0, srtd_ptr->preference_indicator);
412 scsi_ulto4b(ROD_TYPE_BLOCK_ZERO, srtd_ptr->rod_type);
413 srtd_ptr->flags = SVPD_TPC_SRTD_TIN;
414 scsi_ulto2b(0, srtd_ptr->preference_indicator);
416 /* General Copy Operations */
417 d_ptr = (struct scsi_vpd_tpc_descriptor *)
418 (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
419 gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr;
420 scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type);
421 scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length);
422 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies);
423 scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies);
424 scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length);
425 gco_ptr->data_segment_granularity = 0;
426 gco_ptr->inline_data_granularity = 0;
428 ctl_set_success(ctsio);
429 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
430 ctsio->be_move_done = ctl_config_move_done;
431 ctl_datamove((union ctl_io *)ctsio);
433 return (CTL_RETVAL_COMPLETE);
437 ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
439 struct scsi_receive_copy_operating_parameters *cdb;
440 struct scsi_receive_copy_operating_parameters_data *data;
442 int alloc_len, total_len;
444 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
446 cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb;
448 retval = CTL_RETVAL_COMPLETE;
450 total_len = sizeof(*data) + 4;
451 alloc_len = scsi_4btoul(cdb->length);
453 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
455 ctsio->kern_sg_entries = 0;
457 if (total_len < alloc_len) {
458 ctsio->residual = alloc_len - total_len;
459 ctsio->kern_data_len = total_len;
460 ctsio->kern_total_len = total_len;
463 ctsio->kern_data_len = alloc_len;
464 ctsio->kern_total_len = alloc_len;
466 ctsio->kern_data_resid = 0;
467 ctsio->kern_rel_offset = 0;
469 data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr;
470 scsi_ulto4b(sizeof(*data) - 4 + 4, data->length);
471 data->snlid = RCOP_SNLID;
472 scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count);
473 scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count);
474 scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length);
475 scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length);
476 scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length);
477 scsi_ulto4b(0, data->held_data_limit);
478 scsi_ulto4b(0, data->maximum_stream_device_transfer_size);
479 scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies);
480 data->maximum_concurrent_copies = TPC_MAX_LISTS;
481 data->data_segment_granularity = 0;
482 data->inline_data_granularity = 0;
483 data->held_data_granularity = 0;
484 data->implemented_descriptor_list_length = 4;
485 data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B;
486 data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY;
487 data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY;
488 data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID;
490 ctl_set_success(ctsio);
491 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
492 ctsio->be_move_done = ctl_config_move_done;
493 ctl_datamove((union ctl_io *)ctsio);
497 static struct tpc_list *
498 tpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx)
500 struct tpc_list *list;
502 mtx_assert(&lun->lun_lock, MA_OWNED);
503 TAILQ_FOREACH(list, &lun->tpc_lists, links) {
504 if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
505 EC_LIST_ID_USAGE_NONE && list->list_id == list_id &&
506 list->init_idx == init_idx)
513 ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
516 struct scsi_receive_copy_status_lid1 *cdb;
517 struct scsi_receive_copy_status_lid1_data *data;
518 struct tpc_list *list;
519 struct tpc_list list_copy;
521 int alloc_len, total_len;
524 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n"));
526 cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb;
527 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
529 retval = CTL_RETVAL_COMPLETE;
531 list_id = cdb->list_identifier;
532 mtx_lock(&lun->lun_lock);
533 list = tpc_find_list(lun, list_id,
534 ctl_get_resindex(&ctsio->io_hdr.nexus));
536 mtx_unlock(&lun->lun_lock);
537 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
538 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
540 ctl_done((union ctl_io *)ctsio);
544 if (list->completed) {
545 TAILQ_REMOVE(&lun->tpc_lists, list, links);
548 mtx_unlock(&lun->lun_lock);
550 total_len = sizeof(*data);
551 alloc_len = scsi_4btoul(cdb->length);
553 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
555 ctsio->kern_sg_entries = 0;
557 if (total_len < alloc_len) {
558 ctsio->residual = alloc_len - total_len;
559 ctsio->kern_data_len = total_len;
560 ctsio->kern_total_len = total_len;
563 ctsio->kern_data_len = alloc_len;
564 ctsio->kern_total_len = alloc_len;
566 ctsio->kern_data_resid = 0;
567 ctsio->kern_rel_offset = 0;
569 data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr;
570 scsi_ulto4b(sizeof(*data) - 4, data->available_data);
571 if (list_copy.completed) {
572 if (list_copy.error || list_copy.abort)
573 data->copy_command_status = RCS_CCS_ERROR;
575 data->copy_command_status = RCS_CCS_COMPLETED;
577 data->copy_command_status = RCS_CCS_INPROG;
578 scsi_ulto2b(list_copy.curseg, data->segments_processed);
579 if (list_copy.curbytes <= UINT32_MAX) {
580 data->transfer_count_units = RCS_TC_BYTES;
581 scsi_ulto4b(list_copy.curbytes, data->transfer_count);
583 data->transfer_count_units = RCS_TC_MBYTES;
584 scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count);
587 ctl_set_success(ctsio);
588 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
589 ctsio->be_move_done = ctl_config_move_done;
590 ctl_datamove((union ctl_io *)ctsio);
595 ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
598 struct scsi_receive_copy_failure_details *cdb;
599 struct scsi_receive_copy_failure_details_data *data;
600 struct tpc_list *list;
601 struct tpc_list list_copy;
603 int alloc_len, total_len;
606 CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n"));
608 cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb;
609 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
611 retval = CTL_RETVAL_COMPLETE;
613 list_id = cdb->list_identifier;
614 mtx_lock(&lun->lun_lock);
615 list = tpc_find_list(lun, list_id,
616 ctl_get_resindex(&ctsio->io_hdr.nexus));
617 if (list == NULL || !list->completed) {
618 mtx_unlock(&lun->lun_lock);
619 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
620 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
622 ctl_done((union ctl_io *)ctsio);
626 TAILQ_REMOVE(&lun->tpc_lists, list, links);
628 mtx_unlock(&lun->lun_lock);
630 total_len = sizeof(*data) + list_copy.sense_len;
631 alloc_len = scsi_4btoul(cdb->length);
633 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
635 ctsio->kern_sg_entries = 0;
637 if (total_len < alloc_len) {
638 ctsio->residual = alloc_len - total_len;
639 ctsio->kern_data_len = total_len;
640 ctsio->kern_total_len = total_len;
643 ctsio->kern_data_len = alloc_len;
644 ctsio->kern_total_len = alloc_len;
646 ctsio->kern_data_resid = 0;
647 ctsio->kern_rel_offset = 0;
649 data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr;
650 if (list_copy.completed && (list_copy.error || list_copy.abort)) {
651 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
652 data->available_data);
653 data->copy_command_status = RCS_CCS_ERROR;
655 scsi_ulto4b(0, data->available_data);
656 scsi_ulto2b(list_copy.sense_len, data->sense_data_length);
657 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
659 ctl_set_success(ctsio);
660 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
661 ctsio->be_move_done = ctl_config_move_done;
662 ctl_datamove((union ctl_io *)ctsio);
667 ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
670 struct scsi_receive_copy_status_lid4 *cdb;
671 struct scsi_receive_copy_status_lid4_data *data;
672 struct tpc_list *list;
673 struct tpc_list list_copy;
675 int alloc_len, total_len;
678 CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n"));
680 cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb;
681 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
683 retval = CTL_RETVAL_COMPLETE;
685 list_id = scsi_4btoul(cdb->list_identifier);
686 mtx_lock(&lun->lun_lock);
687 list = tpc_find_list(lun, list_id,
688 ctl_get_resindex(&ctsio->io_hdr.nexus));
690 mtx_unlock(&lun->lun_lock);
691 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
692 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
694 ctl_done((union ctl_io *)ctsio);
698 if (list->completed) {
699 TAILQ_REMOVE(&lun->tpc_lists, list, links);
702 mtx_unlock(&lun->lun_lock);
704 total_len = sizeof(*data) + list_copy.sense_len;
705 alloc_len = scsi_4btoul(cdb->length);
707 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
709 ctsio->kern_sg_entries = 0;
711 if (total_len < alloc_len) {
712 ctsio->residual = alloc_len - total_len;
713 ctsio->kern_data_len = total_len;
714 ctsio->kern_total_len = total_len;
717 ctsio->kern_data_len = alloc_len;
718 ctsio->kern_total_len = alloc_len;
720 ctsio->kern_data_resid = 0;
721 ctsio->kern_rel_offset = 0;
723 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
724 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
725 data->available_data);
726 data->response_to_service_action = list_copy.service_action;
727 if (list_copy.completed) {
729 data->copy_command_status = RCS_CCS_ERROR;
730 else if (list_copy.abort)
731 data->copy_command_status = RCS_CCS_ABORTED;
733 data->copy_command_status = RCS_CCS_COMPLETED;
735 data->copy_command_status = RCS_CCS_INPROG_FG;
736 scsi_ulto2b(list_copy.curops, data->operation_counter);
737 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
738 data->transfer_count_units = RCS_TC_BYTES;
739 scsi_u64to8b(list_copy.curbytes, data->transfer_count);
740 scsi_ulto2b(list_copy.curseg, data->segments_processed);
741 data->length_of_the_sense_data_field = list_copy.sense_len;
742 data->sense_data_length = list_copy.sense_len;
743 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
745 ctl_set_success(ctsio);
746 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
747 ctsio->be_move_done = ctl_config_move_done;
748 ctl_datamove((union ctl_io *)ctsio);
753 ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
756 struct scsi_copy_operation_abort *cdb;
757 struct tpc_list *list;
761 CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n"));
763 cdb = (struct scsi_copy_operation_abort *)ctsio->cdb;
764 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
766 retval = CTL_RETVAL_COMPLETE;
768 list_id = scsi_4btoul(cdb->list_identifier);
769 mtx_lock(&lun->lun_lock);
770 list = tpc_find_list(lun, list_id,
771 ctl_get_resindex(&ctsio->io_hdr.nexus));
773 mtx_unlock(&lun->lun_lock);
774 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
775 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
777 ctl_done((union ctl_io *)ctsio);
781 mtx_unlock(&lun->lun_lock);
783 ctl_set_success(ctsio);
784 ctl_done((union ctl_io *)ctsio);
789 tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss)
793 if (ss && list->lun->be_lun)
794 *ss = list->lun->be_lun->blocksize;
795 return (list->lun->lun);
797 if (idx >= list->ncscd)
799 return (tpcl_resolve(list->init_port, &list->cscd[idx], ss));
803 tpc_process_b2b(struct tpc_list *list)
805 struct scsi_ec_segment_b2b *seg;
806 struct scsi_ec_cscd_dtsp *sdstp, *ddstp;
807 struct tpc_io *tior, *tiow;
808 struct runl run, *prun;
810 off_t srclba, dstlba, numbytes, donebytes, roundbytes;
812 uint32_t srcblock, dstblock;
814 if (list->stage == 1) {
815 while ((tior = TAILQ_FIRST(&list->allio)) != NULL) {
816 TAILQ_REMOVE(&list->allio, tior, links);
817 ctl_free_io(tior->io);
820 free(list->buf, M_CTL);
822 ctl_set_task_aborted(list->ctsio);
823 return (CTL_RETVAL_ERROR);
824 } else if (list->error) {
825 ctl_set_sense(list->ctsio, /*current_error*/ 1,
826 /*sense_key*/ SSD_KEY_COPY_ABORTED,
827 /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
828 return (CTL_RETVAL_ERROR);
830 list->cursectors += list->segsectors;
831 list->curbytes += list->segbytes;
832 return (CTL_RETVAL_COMPLETE);
835 TAILQ_INIT(&list->allio);
836 seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg];
837 sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), &srcblock);
838 dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), &dstblock);
839 if (sl >= CTL_MAX_LUNS || dl >= CTL_MAX_LUNS) {
840 ctl_set_sense(list->ctsio, /*current_error*/ 1,
841 /*sense_key*/ SSD_KEY_COPY_ABORTED,
842 /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
843 return (CTL_RETVAL_ERROR);
845 sdstp = &list->cscd[scsi_2btoul(seg->src_cscd)].dtsp;
846 if (scsi_3btoul(sdstp->block_length) != 0)
847 srcblock = scsi_3btoul(sdstp->block_length);
848 ddstp = &list->cscd[scsi_2btoul(seg->dst_cscd)].dtsp;
849 if (scsi_3btoul(ddstp->block_length) != 0)
850 dstblock = scsi_3btoul(ddstp->block_length);
851 numlba = scsi_2btoul(seg->number_of_blocks);
852 if (seg->flags & EC_SEG_DC)
853 numbytes = (off_t)numlba * dstblock;
855 numbytes = (off_t)numlba * srcblock;
856 srclba = scsi_8btou64(seg->src_lba);
857 dstlba = scsi_8btou64(seg->dst_lba);
859 // printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n",
860 // (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba),
861 // dl, scsi_8btou64(seg->dst_lba));
864 return (CTL_RETVAL_COMPLETE);
866 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
867 ctl_set_sense(list->ctsio, /*current_error*/ 1,
868 /*sense_key*/ SSD_KEY_COPY_ABORTED,
869 /*asc*/ 0x26, /*ascq*/ 0x0A, SSD_ELEM_NONE);
870 return (CTL_RETVAL_ERROR);
873 list->buf = malloc(numbytes, M_CTL, M_WAITOK);
874 list->segbytes = numbytes;
875 list->segsectors = numbytes / dstblock;
880 while (donebytes < numbytes) {
881 roundbytes = MIN(numbytes - donebytes, TPC_MAX_IO_SIZE);
883 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
884 TAILQ_INIT(&tior->run);
886 TAILQ_INSERT_TAIL(&list->allio, tior, links);
887 tior->io = tpcl_alloc_io();
888 ctl_scsi_read_write(tior->io,
889 /*data_ptr*/ &list->buf[donebytes],
890 /*data_len*/ roundbytes,
893 /*minimum_cdb_size*/ 0,
894 /*lba*/ srclba + donebytes / srcblock,
895 /*num_blocks*/ roundbytes / srcblock,
896 /*tag_type*/ CTL_TAG_SIMPLE,
898 tior->io->io_hdr.retries = 3;
900 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
902 tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
903 TAILQ_INIT(&tiow->run);
905 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
906 tiow->io = tpcl_alloc_io();
907 ctl_scsi_read_write(tiow->io,
908 /*data_ptr*/ &list->buf[donebytes],
909 /*data_len*/ roundbytes,
912 /*minimum_cdb_size*/ 0,
913 /*lba*/ dstlba + donebytes / dstblock,
914 /*num_blocks*/ roundbytes / dstblock,
915 /*tag_type*/ CTL_TAG_SIMPLE,
917 tiow->io->io_hdr.retries = 3;
919 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
921 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
922 TAILQ_INSERT_TAIL(prun, tior, rlinks);
924 donebytes += roundbytes;
927 while ((tior = TAILQ_FIRST(&run)) != NULL) {
928 TAILQ_REMOVE(&run, tior, rlinks);
929 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
930 panic("tpcl_queue() error");
934 return (CTL_RETVAL_QUEUED);
938 tpc_process_verify(struct tpc_list *list)
940 struct scsi_ec_segment_verify *seg;
944 if (list->stage == 1) {
945 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
946 TAILQ_REMOVE(&list->allio, tio, links);
947 ctl_free_io(tio->io);
951 ctl_set_task_aborted(list->ctsio);
952 return (CTL_RETVAL_ERROR);
953 } else if (list->error) {
954 ctl_set_sense(list->ctsio, /*current_error*/ 1,
955 /*sense_key*/ SSD_KEY_COPY_ABORTED,
956 /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
957 return (CTL_RETVAL_ERROR);
959 return (CTL_RETVAL_COMPLETE);
962 TAILQ_INIT(&list->allio);
963 seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg];
964 sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), NULL);
965 if (sl >= CTL_MAX_LUNS) {
966 ctl_set_sense(list->ctsio, /*current_error*/ 1,
967 /*sense_key*/ SSD_KEY_COPY_ABORTED,
968 /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
969 return (CTL_RETVAL_ERROR);
972 // printf("Verify %ju\n", sl);
974 if ((seg->tur & 0x01) == 0)
975 return (CTL_RETVAL_COMPLETE);
978 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
979 TAILQ_INIT(&tio->run);
981 TAILQ_INSERT_TAIL(&list->allio, tio, links);
982 tio->io = tpcl_alloc_io();
983 ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
984 tio->io->io_hdr.retries = 3;
986 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
988 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
989 panic("tpcl_queue() error");
990 return (CTL_RETVAL_QUEUED);
994 tpc_process_register_key(struct tpc_list *list)
996 struct scsi_ec_segment_register_key *seg;
1001 if (list->stage == 1) {
1002 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1003 TAILQ_REMOVE(&list->allio, tio, links);
1004 ctl_free_io(tio->io);
1007 free(list->buf, M_CTL);
1009 ctl_set_task_aborted(list->ctsio);
1010 return (CTL_RETVAL_ERROR);
1011 } else if (list->error) {
1012 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1013 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1014 /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
1015 return (CTL_RETVAL_ERROR);
1017 return (CTL_RETVAL_COMPLETE);
1020 TAILQ_INIT(&list->allio);
1021 seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg];
1022 dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), NULL);
1023 if (dl >= CTL_MAX_LUNS) {
1024 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1025 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1026 /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
1027 return (CTL_RETVAL_ERROR);
1030 // printf("Register Key %ju\n", dl);
1033 tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
1034 TAILQ_INIT(&tio->run);
1036 TAILQ_INSERT_TAIL(&list->allio, tio, links);
1037 tio->io = tpcl_alloc_io();
1038 datalen = sizeof(struct scsi_per_res_out_parms);
1039 list->buf = malloc(datalen, M_CTL, M_WAITOK);
1040 ctl_scsi_persistent_res_out(tio->io,
1041 list->buf, datalen, SPRO_REGISTER, -1,
1042 scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key),
1043 /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
1044 tio->io->io_hdr.retries = 3;
1046 tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
1048 if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
1049 panic("tpcl_queue() error");
1050 return (CTL_RETVAL_QUEUED);
1054 tpc_ranges_length(struct scsi_range_desc *range, int nrange)
1059 for (r = 0; r < nrange; r++)
1060 length += scsi_4btoul(range[r].length);
1065 tpc_skip_ranges(struct scsi_range_desc *range, int nrange, off_t skip,
1066 int *srange, off_t *soffset)
1073 while (r < nrange) {
1074 if (skip - off < scsi_4btoul(range[r].length)) {
1076 *soffset = skip - off;
1079 off += scsi_4btoul(range[r].length);
1086 tpc_process_wut(struct tpc_list *list)
1088 struct tpc_io *tio, *tior, *tiow;
1089 struct runl run, *prun;
1091 off_t doffset, soffset;
1092 off_t srclba, dstlba, numbytes, donebytes, roundbytes;
1093 uint32_t srcblock, dstblock;
1095 if (list->stage > 0) {
1096 /* Cleanup after previous rounds. */
1097 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1098 TAILQ_REMOVE(&list->allio, tio, links);
1099 ctl_free_io(tio->io);
1102 free(list->buf, M_CTL);
1104 ctl_set_task_aborted(list->ctsio);
1105 return (CTL_RETVAL_ERROR);
1106 } else if (list->error) {
1107 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1108 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1109 /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
1110 return (CTL_RETVAL_ERROR);
1112 list->cursectors += list->segsectors;
1113 list->curbytes += list->segbytes;
1116 /* Check where we are on destination ranges list. */
1117 if (tpc_skip_ranges(list->range, list->nrange, list->cursectors,
1118 &drange, &doffset) != 0)
1119 return (CTL_RETVAL_COMPLETE);
1120 dstblock = list->lun->be_lun->blocksize;
1122 /* Check where we are on source ranges list. */
1123 srcblock = list->token->blocksize;
1124 if (tpc_skip_ranges(list->token->range, list->token->nrange,
1125 list->offset_into_rod + list->cursectors * dstblock / srcblock,
1126 &srange, &soffset) != 0) {
1127 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1128 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1129 /*asc*/ 0x0d, /*ascq*/ 0x04, SSD_ELEM_NONE);
1130 return (CTL_RETVAL_ERROR);
1133 srclba = scsi_8btou64(list->token->range[srange].lba) + soffset;
1134 numbytes = srcblock * omin(TPC_MAX_IOCHUNK_SIZE / srcblock,
1135 (scsi_4btoul(list->token->range[srange].length) - soffset));
1136 dstlba = scsi_8btou64(list->range[drange].lba) + doffset;
1137 numbytes = omin(numbytes,
1138 dstblock * omin(TPC_MAX_IOCHUNK_SIZE / dstblock,
1139 (scsi_4btoul(list->range[drange].length) - doffset)));
1141 if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
1142 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1143 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1144 /*asc*/ 0x26, /*ascq*/ 0x0A, SSD_ELEM_NONE);
1145 return (CTL_RETVAL_ERROR);
1148 list->buf = malloc(numbytes, M_CTL, M_WAITOK |
1149 (list->token == NULL ? M_ZERO : 0));
1150 list->segbytes = numbytes;
1151 list->segsectors = numbytes / dstblock;
1152 //printf("Copy chunk of %ju sectors from %ju to %ju\n", list->segsectors,
1158 TAILQ_INIT(&list->allio);
1159 while (donebytes < numbytes) {
1160 roundbytes = MIN(numbytes - donebytes, TPC_MAX_IO_SIZE);
1162 tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
1163 TAILQ_INIT(&tior->run);
1165 TAILQ_INSERT_TAIL(&list->allio, tior, links);
1166 tior->io = tpcl_alloc_io();
1167 ctl_scsi_read_write(tior->io,
1168 /*data_ptr*/ &list->buf[donebytes],
1169 /*data_len*/ roundbytes,
1172 /*minimum_cdb_size*/ 0,
1173 /*lba*/ srclba + donebytes / srcblock,
1174 /*num_blocks*/ roundbytes / srcblock,
1175 /*tag_type*/ CTL_TAG_SIMPLE,
1177 tior->io->io_hdr.retries = 3;
1178 tior->lun = list->token->lun;
1179 tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
1181 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
1182 TAILQ_INIT(&tiow->run);
1184 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
1185 tiow->io = tpcl_alloc_io();
1186 ctl_scsi_read_write(tiow->io,
1187 /*data_ptr*/ &list->buf[donebytes],
1188 /*data_len*/ roundbytes,
1191 /*minimum_cdb_size*/ 0,
1192 /*lba*/ dstlba + donebytes / dstblock,
1193 /*num_blocks*/ roundbytes / dstblock,
1194 /*tag_type*/ CTL_TAG_SIMPLE,
1196 tiow->io->io_hdr.retries = 3;
1197 tiow->lun = list->lun->lun;
1198 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
1200 TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
1201 TAILQ_INSERT_TAIL(prun, tior, rlinks);
1203 donebytes += roundbytes;
1206 while ((tior = TAILQ_FIRST(&run)) != NULL) {
1207 TAILQ_REMOVE(&run, tior, rlinks);
1208 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1209 panic("tpcl_queue() error");
1213 return (CTL_RETVAL_QUEUED);
1217 tpc_process_zero_wut(struct tpc_list *list)
1219 struct tpc_io *tio, *tiow;
1220 struct runl run, *prun;
1222 uint32_t dstblock, len;
1224 if (list->stage > 0) {
1226 /* Cleanup after previous rounds. */
1227 while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1228 TAILQ_REMOVE(&list->allio, tio, links);
1229 ctl_free_io(tio->io);
1232 free(list->buf, M_CTL);
1234 ctl_set_task_aborted(list->ctsio);
1235 return (CTL_RETVAL_ERROR);
1236 } else if (list->error) {
1237 ctl_set_sense(list->ctsio, /*current_error*/ 1,
1238 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1239 /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
1240 return (CTL_RETVAL_ERROR);
1242 list->cursectors += list->segsectors;
1243 list->curbytes += list->segbytes;
1244 return (CTL_RETVAL_COMPLETE);
1247 dstblock = list->lun->be_lun->blocksize;
1248 list->buf = malloc(dstblock, M_CTL, M_WAITOK | M_ZERO);
1252 TAILQ_INIT(&list->allio);
1253 list->segsectors = 0;
1254 for (r = 0; r < list->nrange; r++) {
1255 len = scsi_4btoul(list->range[r].length);
1259 tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
1260 TAILQ_INIT(&tiow->run);
1262 TAILQ_INSERT_TAIL(&list->allio, tiow, links);
1263 tiow->io = tpcl_alloc_io();
1264 ctl_scsi_write_same(tiow->io,
1265 /*data_ptr*/ list->buf,
1266 /*data_len*/ dstblock,
1268 /*lba*/ scsi_8btou64(list->range[r].lba),
1270 /*tag_type*/ CTL_TAG_SIMPLE,
1272 tiow->io->io_hdr.retries = 3;
1273 tiow->lun = list->lun->lun;
1274 tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
1276 TAILQ_INSERT_TAIL(prun, tiow, rlinks);
1278 list->segsectors += len;
1280 list->segbytes = list->segsectors * dstblock;
1282 if (TAILQ_EMPTY(&run))
1285 while ((tiow = TAILQ_FIRST(&run)) != NULL) {
1286 TAILQ_REMOVE(&run, tiow, rlinks);
1287 if (tpcl_queue(tiow->io, tiow->lun) != CTL_RETVAL_COMPLETE)
1288 panic("tpcl_queue() error");
1292 return (CTL_RETVAL_QUEUED);
1296 tpc_process(struct tpc_list *list)
1298 struct ctl_lun *lun = list->lun;
1299 struct scsi_ec_segment *seg;
1300 struct ctl_scsiio *ctsio = list->ctsio;
1301 int retval = CTL_RETVAL_COMPLETE;
1303 if (list->service_action == EC_WUT) {
1304 if (list->token != NULL)
1305 retval = tpc_process_wut(list);
1307 retval = tpc_process_zero_wut(list);
1308 if (retval == CTL_RETVAL_QUEUED)
1310 if (retval == CTL_RETVAL_ERROR) {
1315 //printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg);
1316 while (list->curseg < list->nseg) {
1317 seg = list->seg[list->curseg];
1318 switch (seg->type_code) {
1320 retval = tpc_process_b2b(list);
1323 retval = tpc_process_verify(list);
1325 case EC_SEG_REGISTER_KEY:
1326 retval = tpc_process_register_key(list);
1329 ctl_set_sense(ctsio, /*current_error*/ 1,
1330 /*sense_key*/ SSD_KEY_COPY_ABORTED,
1331 /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
1334 if (retval == CTL_RETVAL_QUEUED)
1336 if (retval == CTL_RETVAL_ERROR) {
1345 ctl_set_success(ctsio);
1348 //printf("ZZZ done\n");
1349 free(list->params, M_CTL);
1350 list->params = NULL;
1352 mtx_lock(&control_softc->ctl_lock);
1353 if (--list->token->active == 0)
1354 list->token->last_active = time_uptime;
1355 mtx_unlock(&control_softc->ctl_lock);
1358 mtx_lock(&lun->lun_lock);
1359 if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) {
1360 TAILQ_REMOVE(&lun->tpc_lists, list, links);
1363 list->completed = 1;
1364 list->last_active = time_uptime;
1365 list->sense_data = ctsio->sense_data;
1366 list->sense_len = ctsio->sense_len;
1367 list->scsi_status = ctsio->scsi_status;
1369 mtx_unlock(&lun->lun_lock);
1371 ctl_done((union ctl_io *)ctsio);
1375 * For any sort of check condition, busy, etc., we just retry. We do not
1376 * decrement the retry count for unit attention type errors. These are
1377 * normal, and we want to save the retry count for "real" errors. Otherwise,
1378 * we could end up with situations where a command will succeed in some
1379 * situations and fail in others, depending on whether a unit attention is
1380 * pending. Also, some of our error recovery actions, most notably the
1381 * LUN reset action, will cause a unit attention.
1383 * We can add more detail here later if necessary.
1385 static tpc_error_action
1386 tpc_checkcond_parse(union ctl_io *io)
1388 tpc_error_action error_action;
1389 int error_code, sense_key, asc, ascq;
1392 * Default to retrying the command.
1394 error_action = TPC_ERR_RETRY;
1396 scsi_extract_sense_len(&io->scsiio.sense_data,
1397 io->scsiio.sense_len,
1404 switch (error_code) {
1405 case SSD_DEFERRED_ERROR:
1406 case SSD_DESC_DEFERRED_ERROR:
1407 error_action |= TPC_ERR_NO_DECREMENT;
1409 case SSD_CURRENT_ERROR:
1410 case SSD_DESC_CURRENT_ERROR:
1412 switch (sense_key) {
1413 case SSD_KEY_UNIT_ATTENTION:
1414 error_action |= TPC_ERR_NO_DECREMENT;
1416 case SSD_KEY_HARDWARE_ERROR:
1418 * This is our generic "something bad happened"
1419 * error code. It often isn't recoverable.
1421 if ((asc == 0x44) && (ascq == 0x00))
1422 error_action = TPC_ERR_FAIL;
1424 case SSD_KEY_NOT_READY:
1426 * If the LUN is powered down, there likely isn't
1427 * much point in retrying right now.
1429 if ((asc == 0x04) && (ascq == 0x02))
1430 error_action = TPC_ERR_FAIL;
1432 * If the LUN is offline, there probably isn't much
1433 * point in retrying, either.
1435 if ((asc == 0x04) && (ascq == 0x03))
1436 error_action = TPC_ERR_FAIL;
1440 return (error_action);
1443 static tpc_error_action
1444 tpc_error_parse(union ctl_io *io)
1446 tpc_error_action error_action = TPC_ERR_RETRY;
1448 switch (io->io_hdr.io_type) {
1450 switch (io->io_hdr.status & CTL_STATUS_MASK) {
1451 case CTL_SCSI_ERROR:
1452 switch (io->scsiio.scsi_status) {
1453 case SCSI_STATUS_CHECK_COND:
1454 error_action = tpc_checkcond_parse(io);
1467 panic("%s: invalid ctl_io type %d\n", __func__,
1468 io->io_hdr.io_type);
1471 return (error_action);
1475 tpc_done(union ctl_io *io)
1477 struct tpc_io *tio, *tior;
1480 * Very minimal retry logic. We basically retry if we got an error
1481 * back, and the retry count is greater than 0. If we ever want
1482 * more sophisticated initiator type behavior, the CAM error
1483 * recovery code in ../common might be helpful.
1485 tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1486 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1487 && (io->io_hdr.retries > 0)) {
1488 ctl_io_status old_status;
1489 tpc_error_action error_action;
1491 error_action = tpc_error_parse(io);
1492 switch (error_action & TPC_ERR_MASK) {
1497 if ((error_action & TPC_ERR_NO_DECREMENT) == 0)
1498 io->io_hdr.retries--;
1499 old_status = io->io_hdr.status;
1500 io->io_hdr.status = CTL_STATUS_NONE;
1501 io->io_hdr.flags &= ~CTL_FLAG_ABORT;
1502 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
1503 if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) {
1504 printf("%s: error returned from ctl_queue()!\n",
1506 io->io_hdr.status = old_status;
1512 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1513 tio->list->error = 1;
1515 atomic_add_int(&tio->list->curops, 1);
1516 if (!tio->list->error && !tio->list->abort) {
1517 while ((tior = TAILQ_FIRST(&tio->run)) != NULL) {
1518 TAILQ_REMOVE(&tio->run, tior, rlinks);
1519 atomic_add_int(&tio->list->tbdio, 1);
1520 if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1521 panic("tpcl_queue() error");
1524 if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1)
1525 tpc_process(tio->list);
1529 ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
1531 struct scsi_extended_copy *cdb;
1532 struct scsi_extended_copy_lid1_data *data;
1533 struct ctl_lun *lun;
1534 struct tpc_list *list, *tlist;
1537 int len, off, lencscd, lenseg, leninl, nseg;
1539 CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n"));
1541 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1542 cdb = (struct scsi_extended_copy *)ctsio->cdb;
1543 len = scsi_4btoul(cdb->length);
1545 if (len < sizeof(struct scsi_extended_copy_lid1_data) ||
1546 len > sizeof(struct scsi_extended_copy_lid1_data) +
1547 TPC_MAX_LIST + TPC_MAX_INLINE) {
1548 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1549 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1554 * If we've got a kernel request that hasn't been malloced yet,
1555 * malloc it and tell the caller the data buffer is here.
1557 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1558 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1559 ctsio->kern_data_len = len;
1560 ctsio->kern_total_len = len;
1561 ctsio->kern_data_resid = 0;
1562 ctsio->kern_rel_offset = 0;
1563 ctsio->kern_sg_entries = 0;
1564 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1565 ctsio->be_move_done = ctl_config_move_done;
1566 ctl_datamove((union ctl_io *)ctsio);
1568 return (CTL_RETVAL_COMPLETE);
1571 data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr;
1572 lencscd = scsi_2btoul(data->cscd_list_length);
1573 lenseg = scsi_4btoul(data->segment_list_length);
1574 leninl = scsi_4btoul(data->inline_data_length);
1575 if (len < sizeof(struct scsi_extended_copy_lid1_data) +
1576 lencscd + lenseg + leninl ||
1577 leninl > TPC_MAX_INLINE) {
1578 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1579 /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1582 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1583 ctl_set_sense(ctsio, /*current_error*/ 1,
1584 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1585 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1588 if (lencscd + lenseg > TPC_MAX_LIST) {
1589 ctl_set_param_len_error(ctsio);
1593 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1594 list->service_action = cdb->service_action;
1595 value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
1596 if (value != NULL && strcmp(value, "on") == 0)
1597 list->init_port = -1;
1599 list->init_port = ctsio->io_hdr.nexus.targ_port;
1600 list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
1601 list->list_id = data->list_identifier;
1602 list->flags = data->flags;
1603 list->params = ctsio->kern_data_ptr;
1604 list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1605 ptr = &data->data[lencscd];
1606 for (nseg = 0, off = 0; off < lenseg; nseg++) {
1607 if (nseg >= TPC_MAX_SEGS) {
1609 ctl_set_sense(ctsio, /*current_error*/ 1,
1610 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1611 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1614 list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off);
1615 off += sizeof(struct scsi_ec_segment) +
1616 scsi_2btoul(list->seg[nseg]->descr_length);
1618 list->inl = &data->data[lencscd + lenseg];
1619 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1621 list->leninl = leninl;
1622 list->ctsio = ctsio;
1624 mtx_lock(&lun->lun_lock);
1625 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1626 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1627 if (tlist != NULL && !tlist->completed) {
1628 mtx_unlock(&lun->lun_lock);
1630 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1631 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1635 if (tlist != NULL) {
1636 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1640 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1641 mtx_unlock(&lun->lun_lock);
1644 return (CTL_RETVAL_COMPLETE);
1647 ctl_done((union ctl_io *)ctsio);
1648 return (CTL_RETVAL_COMPLETE);
1652 ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
1654 struct scsi_extended_copy *cdb;
1655 struct scsi_extended_copy_lid4_data *data;
1656 struct ctl_lun *lun;
1657 struct tpc_list *list, *tlist;
1660 int len, off, lencscd, lenseg, leninl, nseg;
1662 CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n"));
1664 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1665 cdb = (struct scsi_extended_copy *)ctsio->cdb;
1666 len = scsi_4btoul(cdb->length);
1668 if (len < sizeof(struct scsi_extended_copy_lid4_data) ||
1669 len > sizeof(struct scsi_extended_copy_lid4_data) +
1670 TPC_MAX_LIST + TPC_MAX_INLINE) {
1671 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1672 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1677 * If we've got a kernel request that hasn't been malloced yet,
1678 * malloc it and tell the caller the data buffer is here.
1680 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1681 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1682 ctsio->kern_data_len = len;
1683 ctsio->kern_total_len = len;
1684 ctsio->kern_data_resid = 0;
1685 ctsio->kern_rel_offset = 0;
1686 ctsio->kern_sg_entries = 0;
1687 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1688 ctsio->be_move_done = ctl_config_move_done;
1689 ctl_datamove((union ctl_io *)ctsio);
1691 return (CTL_RETVAL_COMPLETE);
1694 data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr;
1695 lencscd = scsi_2btoul(data->cscd_list_length);
1696 lenseg = scsi_2btoul(data->segment_list_length);
1697 leninl = scsi_2btoul(data->inline_data_length);
1698 if (len < sizeof(struct scsi_extended_copy_lid4_data) +
1699 lencscd + lenseg + leninl ||
1700 leninl > TPC_MAX_INLINE) {
1701 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1702 /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1705 if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1706 ctl_set_sense(ctsio, /*current_error*/ 1,
1707 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1708 /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1711 if (lencscd + lenseg > TPC_MAX_LIST) {
1712 ctl_set_param_len_error(ctsio);
1716 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1717 list->service_action = cdb->service_action;
1718 value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
1719 if (value != NULL && strcmp(value, "on") == 0)
1720 list->init_port = -1;
1722 list->init_port = ctsio->io_hdr.nexus.targ_port;
1723 list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
1724 list->list_id = scsi_4btoul(data->list_identifier);
1725 list->flags = data->flags;
1726 list->params = ctsio->kern_data_ptr;
1727 list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1728 ptr = &data->data[lencscd];
1729 for (nseg = 0, off = 0; off < lenseg; nseg++) {
1730 if (nseg >= TPC_MAX_SEGS) {
1732 ctl_set_sense(ctsio, /*current_error*/ 1,
1733 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1734 /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1737 list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off);
1738 off += sizeof(struct scsi_ec_segment) +
1739 scsi_2btoul(list->seg[nseg]->descr_length);
1741 list->inl = &data->data[lencscd + lenseg];
1742 list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1744 list->leninl = leninl;
1745 list->ctsio = ctsio;
1747 mtx_lock(&lun->lun_lock);
1748 if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1749 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1750 if (tlist != NULL && !tlist->completed) {
1751 mtx_unlock(&lun->lun_lock);
1753 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1754 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1758 if (tlist != NULL) {
1759 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1763 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1764 mtx_unlock(&lun->lun_lock);
1767 return (CTL_RETVAL_COMPLETE);
1770 ctl_done((union ctl_io *)ctsio);
1771 return (CTL_RETVAL_COMPLETE);
1775 tpc_create_token(struct ctl_lun *lun, struct ctl_port *port, off_t len,
1776 struct scsi_token *token)
1779 struct scsi_vpd_id_descriptor *idd = NULL;
1780 struct scsi_ec_cscd_id *cscd;
1781 struct scsi_read_capacity_data_long *dtsd;
1784 scsi_ulto4b(ROD_TYPE_AUR, token->type);
1785 scsi_ulto2b(0x01f8, token->length);
1786 scsi_u64to8b(atomic_fetchadd_int(&id, 1), &token->body[0]);
1788 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
1789 lun->lun_devid->data, lun->lun_devid->len,
1790 scsi_devid_is_lun_naa);
1791 if (idd == NULL && lun->lun_devid)
1792 idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
1793 lun->lun_devid->data, lun->lun_devid->len,
1794 scsi_devid_is_lun_eui64);
1796 cscd = (struct scsi_ec_cscd_id *)&token->body[8];
1797 cscd->type_code = EC_CSCD_ID;
1798 cscd->luidt_pdt = T_DIRECT;
1799 memcpy(&cscd->codeset, idd, 4 + idd->length);
1800 scsi_ulto3b(lun->be_lun->blocksize, cscd->dtsp.block_length);
1802 scsi_u64to8b(0, &token->body[40]); /* XXX: Should be 128bit value. */
1803 scsi_u64to8b(len, &token->body[48]);
1805 /* ROD token device type specific data (RC16 without first field) */
1806 dtsd = (struct scsi_read_capacity_data_long *)&token->body[88 - 8];
1807 scsi_ulto4b(lun->be_lun->blocksize, dtsd->length);
1808 dtsd->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE;
1809 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, dtsd->lalba_lbp);
1810 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
1811 dtsd->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ;
1813 if (port->target_devid) {
1814 targid_len = port->target_devid->len;
1815 memcpy(&token->body[120], port->target_devid->data, targid_len);
1818 arc4rand(&token->body[120 + targid_len], 384 - targid_len, 0);
1822 ctl_populate_token(struct ctl_scsiio *ctsio)
1824 struct scsi_populate_token *cdb;
1825 struct scsi_populate_token_data *data;
1826 struct ctl_lun *lun;
1827 struct ctl_port *port;
1828 struct tpc_list *list, *tlist;
1829 struct tpc_token *token;
1832 CTL_DEBUG_PRINT(("ctl_populate_token\n"));
1834 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1835 port = control_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
1836 cdb = (struct scsi_populate_token *)ctsio->cdb;
1837 len = scsi_4btoul(cdb->length);
1839 if (len < sizeof(struct scsi_populate_token_data) ||
1840 len > sizeof(struct scsi_populate_token_data) +
1841 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
1842 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1843 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1848 * If we've got a kernel request that hasn't been malloced yet,
1849 * malloc it and tell the caller the data buffer is here.
1851 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1852 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1853 ctsio->kern_data_len = len;
1854 ctsio->kern_total_len = len;
1855 ctsio->kern_data_resid = 0;
1856 ctsio->kern_rel_offset = 0;
1857 ctsio->kern_sg_entries = 0;
1858 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1859 ctsio->be_move_done = ctl_config_move_done;
1860 ctl_datamove((union ctl_io *)ctsio);
1862 return (CTL_RETVAL_COMPLETE);
1865 data = (struct scsi_populate_token_data *)ctsio->kern_data_ptr;
1866 lendesc = scsi_2btoul(data->range_descriptor_length);
1867 if (len < sizeof(struct scsi_populate_token_data) + lendesc) {
1868 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1869 /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1873 printf("PT(list=%u) flags=%x to=%d rt=%x len=%x\n",
1874 scsi_4btoul(cdb->list_identifier),
1875 data->flags, scsi_4btoul(data->inactivity_timeout),
1876 scsi_4btoul(data->rod_type),
1877 scsi_2btoul(data->range_descriptor_length));
1879 if ((data->flags & EC_PT_RTV) &&
1880 scsi_4btoul(data->rod_type) != ROD_TYPE_AUR) {
1881 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1882 /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0);
1886 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1887 list->service_action = cdb->service_action;
1888 list->init_port = ctsio->io_hdr.nexus.targ_port;
1889 list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
1890 list->list_id = scsi_4btoul(cdb->list_identifier);
1891 list->flags = data->flags;
1892 list->ctsio = ctsio;
1894 mtx_lock(&lun->lun_lock);
1895 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1896 if (tlist != NULL && !tlist->completed) {
1897 mtx_unlock(&lun->lun_lock);
1899 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1900 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1904 if (tlist != NULL) {
1905 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1908 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1909 mtx_unlock(&lun->lun_lock);
1911 token = malloc(sizeof(*token), M_CTL, M_WAITOK | M_ZERO);
1912 token->lun = lun->lun;
1913 token->blocksize = lun->be_lun->blocksize;
1914 token->params = ctsio->kern_data_ptr;
1915 token->range = &data->desc[0];
1916 token->nrange = scsi_2btoul(data->range_descriptor_length) /
1917 sizeof(struct scsi_range_desc);
1918 list->cursectors = tpc_ranges_length(token->range, token->nrange);
1919 list->curbytes = (off_t)list->cursectors * lun->be_lun->blocksize;
1920 tpc_create_token(lun, port, list->curbytes,
1921 (struct scsi_token *)token->token);
1923 token->last_active = time_uptime;
1924 token->timeout = scsi_4btoul(data->inactivity_timeout);
1925 if (token->timeout == 0)
1926 token->timeout = TPC_DFL_TOKEN_TIMEOUT;
1927 else if (token->timeout < TPC_MIN_TOKEN_TIMEOUT)
1928 token->timeout = TPC_MIN_TOKEN_TIMEOUT;
1929 else if (token->timeout > TPC_MAX_TOKEN_TIMEOUT) {
1930 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1931 /*command*/ 0, /*field*/ 4, /*bit_valid*/ 0,
1934 memcpy(list->res_token, token->token, sizeof(list->res_token));
1935 list->res_token_valid = 1;
1937 list->completed = 1;
1938 list->last_active = time_uptime;
1939 mtx_lock(&control_softc->ctl_lock);
1940 TAILQ_INSERT_TAIL(&control_softc->tpc_tokens, token, links);
1941 mtx_unlock(&control_softc->ctl_lock);
1942 ctl_set_success(ctsio);
1943 ctl_done((union ctl_io *)ctsio);
1944 return (CTL_RETVAL_COMPLETE);
1947 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED)
1948 free(ctsio->kern_data_ptr, M_CTL);
1949 ctl_done((union ctl_io *)ctsio);
1950 return (CTL_RETVAL_COMPLETE);
1954 ctl_write_using_token(struct ctl_scsiio *ctsio)
1956 struct scsi_write_using_token *cdb;
1957 struct scsi_write_using_token_data *data;
1958 struct ctl_lun *lun;
1959 struct tpc_list *list, *tlist;
1960 struct tpc_token *token;
1963 CTL_DEBUG_PRINT(("ctl_write_using_token\n"));
1965 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1966 cdb = (struct scsi_write_using_token *)ctsio->cdb;
1967 len = scsi_4btoul(cdb->length);
1969 if (len < sizeof(struct scsi_populate_token_data) ||
1970 len > sizeof(struct scsi_populate_token_data) +
1971 TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
1972 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1973 /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1978 * If we've got a kernel request that hasn't been malloced yet,
1979 * malloc it and tell the caller the data buffer is here.
1981 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1982 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1983 ctsio->kern_data_len = len;
1984 ctsio->kern_total_len = len;
1985 ctsio->kern_data_resid = 0;
1986 ctsio->kern_rel_offset = 0;
1987 ctsio->kern_sg_entries = 0;
1988 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1989 ctsio->be_move_done = ctl_config_move_done;
1990 ctl_datamove((union ctl_io *)ctsio);
1992 return (CTL_RETVAL_COMPLETE);
1995 data = (struct scsi_write_using_token_data *)ctsio->kern_data_ptr;
1996 lendesc = scsi_2btoul(data->range_descriptor_length);
1997 if (len < sizeof(struct scsi_populate_token_data) + lendesc) {
1998 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1999 /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
2003 printf("WUT(list=%u) flags=%x off=%ju len=%x\n",
2004 scsi_4btoul(cdb->list_identifier),
2005 data->flags, scsi_8btou64(data->offset_into_rod),
2006 scsi_2btoul(data->range_descriptor_length));
2008 list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
2009 list->service_action = cdb->service_action;
2010 list->init_port = ctsio->io_hdr.nexus.targ_port;
2011 list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
2012 list->list_id = scsi_4btoul(cdb->list_identifier);
2013 list->flags = data->flags;
2014 list->params = ctsio->kern_data_ptr;
2015 list->range = &data->desc[0];
2016 list->nrange = scsi_2btoul(data->range_descriptor_length) /
2017 sizeof(struct scsi_range_desc);
2018 list->offset_into_rod = scsi_8btou64(data->offset_into_rod);
2019 list->ctsio = ctsio;
2021 mtx_lock(&lun->lun_lock);
2022 tlist = tpc_find_list(lun, list->list_id, list->init_idx);
2023 if (tlist != NULL && !tlist->completed) {
2024 mtx_unlock(&lun->lun_lock);
2026 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2027 /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2031 if (tlist != NULL) {
2032 TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
2035 TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
2036 mtx_unlock(&lun->lun_lock);
2038 /* Block device zero ROD token -> no token. */
2039 if (scsi_4btoul(data->rod_token) == ROD_TYPE_BLOCK_ZERO) {
2041 return (CTL_RETVAL_COMPLETE);
2044 mtx_lock(&control_softc->ctl_lock);
2045 TAILQ_FOREACH(token, &control_softc->tpc_tokens, links) {
2046 if (memcmp(token->token, data->rod_token,
2047 sizeof(data->rod_token)) == 0)
2050 if (token != NULL) {
2052 list->token = token;
2053 if (data->flags & EC_WUT_DEL_TKN)
2056 mtx_unlock(&control_softc->ctl_lock);
2057 if (token == NULL) {
2058 mtx_lock(&lun->lun_lock);
2059 TAILQ_REMOVE(&lun->tpc_lists, list, links);
2060 mtx_unlock(&lun->lun_lock);
2062 ctl_set_sense(ctsio, /*current_error*/ 1,
2063 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
2064 /*asc*/ 0x23, /*ascq*/ 0x04, SSD_ELEM_NONE);
2069 return (CTL_RETVAL_COMPLETE);
2072 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED)
2073 free(ctsio->kern_data_ptr, M_CTL);
2074 ctl_done((union ctl_io *)ctsio);
2075 return (CTL_RETVAL_COMPLETE);
2079 ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
2081 struct ctl_lun *lun;
2082 struct scsi_receive_rod_token_information *cdb;
2083 struct scsi_receive_copy_status_lid4_data *data;
2084 struct tpc_list *list;
2085 struct tpc_list list_copy;
2088 int alloc_len, total_len, token_len;
2091 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
2093 cdb = (struct scsi_receive_rod_token_information *)ctsio->cdb;
2094 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
2096 retval = CTL_RETVAL_COMPLETE;
2098 list_id = scsi_4btoul(cdb->list_identifier);
2099 mtx_lock(&lun->lun_lock);
2100 list = tpc_find_list(lun, list_id,
2101 ctl_get_resindex(&ctsio->io_hdr.nexus));
2103 mtx_unlock(&lun->lun_lock);
2104 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2105 /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
2107 ctl_done((union ctl_io *)ctsio);
2111 if (list->completed) {
2112 TAILQ_REMOVE(&lun->tpc_lists, list, links);
2115 mtx_unlock(&lun->lun_lock);
2117 token_len = list_copy.res_token_valid ? 2 + sizeof(list_copy.res_token) : 0;
2118 total_len = sizeof(*data) + list_copy.sense_len + 4 + token_len;
2119 alloc_len = scsi_4btoul(cdb->length);
2121 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
2123 ctsio->kern_sg_entries = 0;
2125 if (total_len < alloc_len) {
2126 ctsio->residual = alloc_len - total_len;
2127 ctsio->kern_data_len = total_len;
2128 ctsio->kern_total_len = total_len;
2130 ctsio->residual = 0;
2131 ctsio->kern_data_len = alloc_len;
2132 ctsio->kern_total_len = alloc_len;
2134 ctsio->kern_data_resid = 0;
2135 ctsio->kern_rel_offset = 0;
2137 data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
2138 scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len +
2139 4 + token_len, data->available_data);
2140 data->response_to_service_action = list_copy.service_action;
2141 if (list_copy.completed) {
2142 if (list_copy.error)
2143 data->copy_command_status = RCS_CCS_ERROR;
2144 else if (list_copy.abort)
2145 data->copy_command_status = RCS_CCS_ABORTED;
2147 data->copy_command_status = RCS_CCS_COMPLETED;
2149 data->copy_command_status = RCS_CCS_INPROG_FG;
2150 scsi_ulto2b(list_copy.curops, data->operation_counter);
2151 scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
2152 data->transfer_count_units = RCS_TC_LBAS;
2153 scsi_u64to8b(list_copy.cursectors, data->transfer_count);
2154 scsi_ulto2b(list_copy.curseg, data->segments_processed);
2155 data->length_of_the_sense_data_field = list_copy.sense_len;
2156 data->sense_data_length = list_copy.sense_len;
2157 memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
2159 ptr = &data->sense_data[data->length_of_the_sense_data_field];
2160 scsi_ulto4b(token_len, &ptr[0]);
2161 if (list_copy.res_token_valid) {
2162 scsi_ulto2b(0, &ptr[4]);
2163 memcpy(&ptr[6], list_copy.res_token, sizeof(list_copy.res_token));
2166 printf("RRTI(list=%u) valid=%d\n",
2167 scsi_4btoul(cdb->list_identifier), list_copy.res_token_valid);
2169 ctl_set_success(ctsio);
2170 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2171 ctsio->be_move_done = ctl_config_move_done;
2172 ctl_datamove((union ctl_io *)ctsio);
2177 ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio)
2179 struct ctl_lun *lun;
2180 struct scsi_report_all_rod_tokens *cdb;
2181 struct scsi_report_all_rod_tokens_data *data;
2182 struct tpc_token *token;
2184 int alloc_len, total_len, tokens, i;
2186 CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
2188 cdb = (struct scsi_report_all_rod_tokens *)ctsio->cdb;
2189 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
2191 retval = CTL_RETVAL_COMPLETE;
2194 mtx_lock(&control_softc->ctl_lock);
2195 TAILQ_FOREACH(token, &control_softc->tpc_tokens, links)
2197 mtx_unlock(&control_softc->ctl_lock);
2201 total_len = sizeof(*data) + tokens * 96;
2202 alloc_len = scsi_4btoul(cdb->length);
2204 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
2206 ctsio->kern_sg_entries = 0;
2208 if (total_len < alloc_len) {
2209 ctsio->residual = alloc_len - total_len;
2210 ctsio->kern_data_len = total_len;
2211 ctsio->kern_total_len = total_len;
2213 ctsio->residual = 0;
2214 ctsio->kern_data_len = alloc_len;
2215 ctsio->kern_total_len = alloc_len;
2217 ctsio->kern_data_resid = 0;
2218 ctsio->kern_rel_offset = 0;
2220 data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr;
2222 mtx_lock(&control_softc->ctl_lock);
2223 TAILQ_FOREACH(token, &control_softc->tpc_tokens, links) {
2226 memcpy(&data->rod_management_token_list[i * 96],
2230 mtx_unlock(&control_softc->ctl_lock);
2231 scsi_ulto4b(sizeof(*data) - 4 + i * 96, data->available_data);
2233 printf("RART tokens=%d\n", i);
2235 ctl_set_success(ctsio);
2236 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2237 ctsio->be_move_done = ctl_config_move_done;
2238 ctl_datamove((union ctl_io *)ctsio);