2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2012-2014 Intel Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
35 #include <sys/domainset.h>
38 #include <dev/pci/pcivar.h>
40 #include "nvme_private.h"
42 typedef enum error_print { ERROR_PRINT_NONE, ERROR_PRINT_NO_RETRY, ERROR_PRINT_ALL } error_print_t;
43 #define DO_NOT_RETRY 1
45 static void _nvme_qpair_submit_request(struct nvme_qpair *qpair,
46 struct nvme_request *req);
47 static void nvme_qpair_destroy(struct nvme_qpair *qpair);
49 struct nvme_opcode_string {
54 static struct nvme_opcode_string admin_opcode[] = {
55 { NVME_OPC_DELETE_IO_SQ, "DELETE IO SQ" },
56 { NVME_OPC_CREATE_IO_SQ, "CREATE IO SQ" },
57 { NVME_OPC_GET_LOG_PAGE, "GET LOG PAGE" },
58 { NVME_OPC_DELETE_IO_CQ, "DELETE IO CQ" },
59 { NVME_OPC_CREATE_IO_CQ, "CREATE IO CQ" },
60 { NVME_OPC_IDENTIFY, "IDENTIFY" },
61 { NVME_OPC_ABORT, "ABORT" },
62 { NVME_OPC_SET_FEATURES, "SET FEATURES" },
63 { NVME_OPC_GET_FEATURES, "GET FEATURES" },
64 { NVME_OPC_ASYNC_EVENT_REQUEST, "ASYNC EVENT REQUEST" },
65 { NVME_OPC_FIRMWARE_ACTIVATE, "FIRMWARE ACTIVATE" },
66 { NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD, "FIRMWARE IMAGE DOWNLOAD" },
67 { NVME_OPC_DEVICE_SELF_TEST, "DEVICE SELF-TEST" },
68 { NVME_OPC_NAMESPACE_ATTACHMENT, "NAMESPACE ATTACHMENT" },
69 { NVME_OPC_KEEP_ALIVE, "KEEP ALIVE" },
70 { NVME_OPC_DIRECTIVE_SEND, "DIRECTIVE SEND" },
71 { NVME_OPC_DIRECTIVE_RECEIVE, "DIRECTIVE RECEIVE" },
72 { NVME_OPC_VIRTUALIZATION_MANAGEMENT, "VIRTUALIZATION MANAGEMENT" },
73 { NVME_OPC_NVME_MI_SEND, "NVME-MI SEND" },
74 { NVME_OPC_NVME_MI_RECEIVE, "NVME-MI RECEIVE" },
75 { NVME_OPC_DOORBELL_BUFFER_CONFIG, "DOORBELL BUFFER CONFIG" },
76 { NVME_OPC_FORMAT_NVM, "FORMAT NVM" },
77 { NVME_OPC_SECURITY_SEND, "SECURITY SEND" },
78 { NVME_OPC_SECURITY_RECEIVE, "SECURITY RECEIVE" },
79 { NVME_OPC_SANITIZE, "SANITIZE" },
80 { NVME_OPC_GET_LBA_STATUS, "GET LBA STATUS" },
81 { 0xFFFF, "ADMIN COMMAND" }
84 static struct nvme_opcode_string io_opcode[] = {
85 { NVME_OPC_FLUSH, "FLUSH" },
86 { NVME_OPC_WRITE, "WRITE" },
87 { NVME_OPC_READ, "READ" },
88 { NVME_OPC_WRITE_UNCORRECTABLE, "WRITE UNCORRECTABLE" },
89 { NVME_OPC_COMPARE, "COMPARE" },
90 { NVME_OPC_WRITE_ZEROES, "WRITE ZEROES" },
91 { NVME_OPC_DATASET_MANAGEMENT, "DATASET MANAGEMENT" },
92 { NVME_OPC_VERIFY, "VERIFY" },
93 { NVME_OPC_RESERVATION_REGISTER, "RESERVATION REGISTER" },
94 { NVME_OPC_RESERVATION_REPORT, "RESERVATION REPORT" },
95 { NVME_OPC_RESERVATION_ACQUIRE, "RESERVATION ACQUIRE" },
96 { NVME_OPC_RESERVATION_RELEASE, "RESERVATION RELEASE" },
97 { 0xFFFF, "IO COMMAND" }
101 get_admin_opcode_string(uint16_t opc)
103 struct nvme_opcode_string *entry;
105 entry = admin_opcode;
107 while (entry->opc != 0xFFFF) {
108 if (entry->opc == opc)
116 get_io_opcode_string(uint16_t opc)
118 struct nvme_opcode_string *entry;
122 while (entry->opc != 0xFFFF) {
123 if (entry->opc == opc)
131 nvme_admin_qpair_print_command(struct nvme_qpair *qpair,
132 struct nvme_command *cmd)
135 nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%x "
136 "cdw10:%08x cdw11:%08x\n",
137 get_admin_opcode_string(cmd->opc), cmd->opc, qpair->id, cmd->cid,
138 le32toh(cmd->nsid), le32toh(cmd->cdw10), le32toh(cmd->cdw11));
142 nvme_io_qpair_print_command(struct nvme_qpair *qpair,
143 struct nvme_command *cmd)
149 case NVME_OPC_WRITE_UNCORRECTABLE:
150 case NVME_OPC_COMPARE:
151 case NVME_OPC_WRITE_ZEROES:
152 case NVME_OPC_VERIFY:
153 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d "
155 get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid),
156 ((unsigned long long)le32toh(cmd->cdw11) << 32) + le32toh(cmd->cdw10),
157 (le32toh(cmd->cdw12) & 0xFFFF) + 1);
160 case NVME_OPC_DATASET_MANAGEMENT:
161 case NVME_OPC_RESERVATION_REGISTER:
162 case NVME_OPC_RESERVATION_REPORT:
163 case NVME_OPC_RESERVATION_ACQUIRE:
164 case NVME_OPC_RESERVATION_RELEASE:
165 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n",
166 get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid));
169 nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%d\n",
170 get_io_opcode_string(cmd->opc), cmd->opc, qpair->id,
171 cmd->cid, le32toh(cmd->nsid));
177 nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd)
180 nvme_admin_qpair_print_command(qpair, cmd);
182 nvme_io_qpair_print_command(qpair, cmd);
183 if (nvme_verbose_cmd_dump) {
184 nvme_printf(qpair->ctrlr,
185 "nsid:%#x rsvd2:%#x rsvd3:%#x mptr:%#jx prp1:%#jx prp2:%#jx\n",
186 cmd->nsid, cmd->rsvd2, cmd->rsvd3, (uintmax_t)cmd->mptr,
187 (uintmax_t)cmd->prp1, (uintmax_t)cmd->prp2);
188 nvme_printf(qpair->ctrlr,
189 "cdw10: %#x cdw11:%#x cdw12:%#x cdw13:%#x cdw14:%#x cdw15:%#x\n",
190 cmd->cdw10, cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14,
195 struct nvme_status_string {
200 static struct nvme_status_string generic_status[] = {
201 { NVME_SC_SUCCESS, "SUCCESS" },
202 { NVME_SC_INVALID_OPCODE, "INVALID OPCODE" },
203 { NVME_SC_INVALID_FIELD, "INVALID_FIELD" },
204 { NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" },
205 { NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" },
206 { NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" },
207 { NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" },
208 { NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" },
209 { NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" },
210 { NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" },
211 { NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" },
212 { NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" },
213 { NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" },
214 { NVME_SC_INVALID_SGL_SEGMENT_DESCR, "INVALID SGL SEGMENT DESCRIPTOR" },
215 { NVME_SC_INVALID_NUMBER_OF_SGL_DESCR, "INVALID NUMBER OF SGL DESCRIPTORS" },
216 { NVME_SC_DATA_SGL_LENGTH_INVALID, "DATA SGL LENGTH INVALID" },
217 { NVME_SC_METADATA_SGL_LENGTH_INVALID, "METADATA SGL LENGTH INVALID" },
218 { NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID, "SGL DESCRIPTOR TYPE INVALID" },
219 { NVME_SC_INVALID_USE_OF_CMB, "INVALID USE OF CONTROLLER MEMORY BUFFER" },
220 { NVME_SC_PRP_OFFET_INVALID, "PRP OFFET INVALID" },
221 { NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED, "ATOMIC WRITE UNIT EXCEEDED" },
222 { NVME_SC_OPERATION_DENIED, "OPERATION DENIED" },
223 { NVME_SC_SGL_OFFSET_INVALID, "SGL OFFSET INVALID" },
224 { NVME_SC_HOST_ID_INCONSISTENT_FORMAT, "HOST IDENTIFIER INCONSISTENT FORMAT" },
225 { NVME_SC_KEEP_ALIVE_TIMEOUT_EXPIRED, "KEEP ALIVE TIMEOUT EXPIRED" },
226 { NVME_SC_KEEP_ALIVE_TIMEOUT_INVALID, "KEEP ALIVE TIMEOUT INVALID" },
227 { NVME_SC_ABORTED_DUE_TO_PREEMPT, "COMMAND ABORTED DUE TO PREEMPT AND ABORT" },
228 { NVME_SC_SANITIZE_FAILED, "SANITIZE FAILED" },
229 { NVME_SC_SANITIZE_IN_PROGRESS, "SANITIZE IN PROGRESS" },
230 { NVME_SC_SGL_DATA_BLOCK_GRAN_INVALID, "SGL_DATA_BLOCK_GRANULARITY_INVALID" },
231 { NVME_SC_NOT_SUPPORTED_IN_CMB, "COMMAND NOT SUPPORTED FOR QUEUE IN CMB" },
232 { NVME_SC_NAMESPACE_IS_WRITE_PROTECTED, "NAMESPACE IS WRITE PROTECTED" },
233 { NVME_SC_COMMAND_INTERRUPTED, "COMMAND INTERRUPTED" },
234 { NVME_SC_TRANSIENT_TRANSPORT_ERROR, "TRANSIENT TRANSPORT ERROR" },
236 { NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" },
237 { NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" },
238 { NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" },
239 { NVME_SC_RESERVATION_CONFLICT, "RESERVATION CONFLICT" },
240 { NVME_SC_FORMAT_IN_PROGRESS, "FORMAT IN PROGRESS" },
241 { 0xFFFF, "GENERIC" }
244 static struct nvme_status_string command_specific_status[] = {
245 { NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" },
246 { NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" },
247 { NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" },
248 { NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" },
249 { NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" },
250 { NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" },
251 { NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" },
252 { NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" },
253 { NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" },
254 { NVME_SC_INVALID_FORMAT, "INVALID FORMAT" },
255 { NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" },
256 { NVME_SC_INVALID_QUEUE_DELETION, "INVALID QUEUE DELETION" },
257 { NVME_SC_FEATURE_NOT_SAVEABLE, "FEATURE IDENTIFIER NOT SAVEABLE" },
258 { NVME_SC_FEATURE_NOT_CHANGEABLE, "FEATURE NOT CHANGEABLE" },
259 { NVME_SC_FEATURE_NOT_NS_SPECIFIC, "FEATURE NOT NAMESPACE SPECIFIC" },
260 { NVME_SC_FW_ACT_REQUIRES_NVMS_RESET, "FIRMWARE ACTIVATION REQUIRES NVM SUBSYSTEM RESET" },
261 { NVME_SC_FW_ACT_REQUIRES_RESET, "FIRMWARE ACTIVATION REQUIRES RESET" },
262 { NVME_SC_FW_ACT_REQUIRES_TIME, "FIRMWARE ACTIVATION REQUIRES MAXIMUM TIME VIOLATION" },
263 { NVME_SC_FW_ACT_PROHIBITED, "FIRMWARE ACTIVATION PROHIBITED" },
264 { NVME_SC_OVERLAPPING_RANGE, "OVERLAPPING RANGE" },
265 { NVME_SC_NS_INSUFFICIENT_CAPACITY, "NAMESPACE INSUFFICIENT CAPACITY" },
266 { NVME_SC_NS_ID_UNAVAILABLE, "NAMESPACE IDENTIFIER UNAVAILABLE" },
267 { NVME_SC_NS_ALREADY_ATTACHED, "NAMESPACE ALREADY ATTACHED" },
268 { NVME_SC_NS_IS_PRIVATE, "NAMESPACE IS PRIVATE" },
269 { NVME_SC_NS_NOT_ATTACHED, "NS NOT ATTACHED" },
270 { NVME_SC_THIN_PROV_NOT_SUPPORTED, "THIN PROVISIONING NOT SUPPORTED" },
271 { NVME_SC_CTRLR_LIST_INVALID, "CONTROLLER LIST INVALID" },
272 { NVME_SC_SELF_TEST_IN_PROGRESS, "DEVICE SELF-TEST IN PROGRESS" },
273 { NVME_SC_BOOT_PART_WRITE_PROHIB, "BOOT PARTITION WRITE PROHIBITED" },
274 { NVME_SC_INVALID_CTRLR_ID, "INVALID CONTROLLER IDENTIFIER" },
275 { NVME_SC_INVALID_SEC_CTRLR_STATE, "INVALID SECONDARY CONTROLLER STATE" },
276 { NVME_SC_INVALID_NUM_OF_CTRLR_RESRC, "INVALID NUMBER OF CONTROLLER RESOURCES" },
277 { NVME_SC_INVALID_RESOURCE_ID, "INVALID RESOURCE IDENTIFIER" },
278 { NVME_SC_SANITIZE_PROHIBITED_WPMRE, "SANITIZE PROHIBITED WRITE PERSISTENT MEMORY REGION ENABLED" },
279 { NVME_SC_ANA_GROUP_ID_INVALID, "ANA GROUP IDENTIFIED INVALID" },
280 { NVME_SC_ANA_ATTACH_FAILED, "ANA ATTACH FAILED" },
282 { NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" },
283 { NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" },
284 { NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" },
285 { 0xFFFF, "COMMAND SPECIFIC" }
288 static struct nvme_status_string media_error_status[] = {
289 { NVME_SC_WRITE_FAULTS, "WRITE FAULTS" },
290 { NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" },
291 { NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" },
292 { NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" },
293 { NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" },
294 { NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" },
295 { NVME_SC_ACCESS_DENIED, "ACCESS DENIED" },
296 { NVME_SC_DEALLOCATED_OR_UNWRITTEN, "DEALLOCATED OR UNWRITTEN LOGICAL BLOCK" },
297 { 0xFFFF, "MEDIA ERROR" }
300 static struct nvme_status_string path_related_status[] = {
301 { NVME_SC_INTERNAL_PATH_ERROR, "INTERNAL PATH ERROR" },
302 { NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS, "ASYMMETRIC ACCESS PERSISTENT LOSS" },
303 { NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE, "ASYMMETRIC ACCESS INACCESSIBLE" },
304 { NVME_SC_ASYMMETRIC_ACCESS_TRANSITION, "ASYMMETRIC ACCESS TRANSITION" },
305 { NVME_SC_CONTROLLER_PATHING_ERROR, "CONTROLLER PATHING ERROR" },
306 { NVME_SC_HOST_PATHING_ERROR, "HOST PATHING ERROR" },
307 { NVME_SC_COMMAND_ABOTHED_BY_HOST, "COMMAND ABOTHED BY HOST" },
308 { 0xFFFF, "PATH RELATED" },
312 get_status_string(uint16_t sct, uint16_t sc)
314 struct nvme_status_string *entry;
317 case NVME_SCT_GENERIC:
318 entry = generic_status;
320 case NVME_SCT_COMMAND_SPECIFIC:
321 entry = command_specific_status;
323 case NVME_SCT_MEDIA_ERROR:
324 entry = media_error_status;
326 case NVME_SCT_PATH_RELATED:
327 entry = path_related_status;
329 case NVME_SCT_VENDOR_SPECIFIC:
330 return ("VENDOR SPECIFIC");
335 while (entry->sc != 0xFFFF) {
344 nvme_qpair_print_completion(struct nvme_qpair *qpair,
345 struct nvme_completion *cpl)
349 sct = NVME_STATUS_GET_SCT(cpl->status);
350 sc = NVME_STATUS_GET_SC(cpl->status);
352 nvme_printf(qpair->ctrlr, "%s (%02x/%02x) sqid:%d cid:%d cdw0:%x\n",
353 get_status_string(sct, sc), sct, sc, cpl->sqid, cpl->cid,
358 nvme_completion_is_retry(const struct nvme_completion *cpl)
360 uint8_t sct, sc, dnr;
362 sct = NVME_STATUS_GET_SCT(cpl->status);
363 sc = NVME_STATUS_GET_SC(cpl->status);
364 dnr = NVME_STATUS_GET_DNR(cpl->status); /* Do Not Retry Bit */
367 * TODO: spec is not clear how commands that are aborted due
368 * to TLER will be marked. So for now, it seems
369 * NAMESPACE_NOT_READY is the only case where we should
370 * look at the DNR bit. Requests failed with ABORTED_BY_REQUEST
371 * set the DNR bit correctly since the driver controls that.
374 case NVME_SCT_GENERIC:
376 case NVME_SC_ABORTED_BY_REQUEST:
377 case NVME_SC_NAMESPACE_NOT_READY:
382 case NVME_SC_INVALID_OPCODE:
383 case NVME_SC_INVALID_FIELD:
384 case NVME_SC_COMMAND_ID_CONFLICT:
385 case NVME_SC_DATA_TRANSFER_ERROR:
386 case NVME_SC_ABORTED_POWER_LOSS:
387 case NVME_SC_INTERNAL_DEVICE_ERROR:
388 case NVME_SC_ABORTED_SQ_DELETION:
389 case NVME_SC_ABORTED_FAILED_FUSED:
390 case NVME_SC_ABORTED_MISSING_FUSED:
391 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
392 case NVME_SC_COMMAND_SEQUENCE_ERROR:
393 case NVME_SC_LBA_OUT_OF_RANGE:
394 case NVME_SC_CAPACITY_EXCEEDED:
398 case NVME_SCT_COMMAND_SPECIFIC:
399 case NVME_SCT_MEDIA_ERROR:
401 case NVME_SCT_PATH_RELATED:
403 case NVME_SC_INTERNAL_PATH_ERROR:
411 case NVME_SCT_VENDOR_SPECIFIC:
418 nvme_qpair_complete_tracker(struct nvme_tracker *tr,
419 struct nvme_completion *cpl, error_print_t print_on_error)
421 struct nvme_qpair * qpair = tr->qpair;
422 struct nvme_request *req;
423 bool retry, error, retriable;
426 error = nvme_completion_is_error(cpl);
427 retriable = nvme_completion_is_retry(cpl);
428 retry = error && retriable && req->retries < nvme_retry_count;
430 qpair->num_retries++;
431 if (error && req->retries >= nvme_retry_count && retriable)
432 qpair->num_failures++;
434 if (error && (print_on_error == ERROR_PRINT_ALL ||
435 (!retry && print_on_error == ERROR_PRINT_NO_RETRY))) {
436 nvme_qpair_print_command(qpair, &req->cmd);
437 nvme_qpair_print_completion(qpair, cpl);
440 qpair->act_tr[cpl->cid] = NULL;
442 KASSERT(cpl->cid == req->cmd.cid, ("cpl cid does not match cmd cid\n"));
445 if (req->type != NVME_REQUEST_NULL) {
446 bus_dmamap_sync(qpair->dma_tag_payload,
448 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
451 req->cb_fn(req->cb_arg, cpl);
454 mtx_lock(&qpair->lock);
458 nvme_qpair_submit_tracker(qpair, tr);
460 if (req->type != NVME_REQUEST_NULL) {
461 bus_dmamap_unload(qpair->dma_tag_payload,
462 tr->payload_dma_map);
465 nvme_free_request(req);
468 TAILQ_REMOVE(&qpair->outstanding_tr, tr, tailq);
469 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
472 * If the controller is in the middle of resetting, don't
473 * try to submit queued requests here - let the reset logic
474 * handle that instead.
476 if (!STAILQ_EMPTY(&qpair->queued_req) &&
477 !qpair->ctrlr->is_resetting) {
478 req = STAILQ_FIRST(&qpair->queued_req);
479 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
480 _nvme_qpair_submit_request(qpair, req);
484 mtx_unlock(&qpair->lock);
488 nvme_qpair_manual_complete_tracker(
489 struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr,
490 error_print_t print_on_error)
492 struct nvme_completion cpl;
494 memset(&cpl, 0, sizeof(cpl));
496 struct nvme_qpair * qpair = tr->qpair;
498 cpl.sqid = qpair->id;
500 cpl.status |= (sct & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT;
501 cpl.status |= (sc & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT;
502 cpl.status |= (dnr & NVME_STATUS_DNR_MASK) << NVME_STATUS_DNR_SHIFT;
503 nvme_qpair_complete_tracker(tr, &cpl, print_on_error);
507 nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
508 struct nvme_request *req, uint32_t sct, uint32_t sc)
510 struct nvme_completion cpl;
513 memset(&cpl, 0, sizeof(cpl));
514 cpl.sqid = qpair->id;
515 cpl.status |= (sct & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT;
516 cpl.status |= (sc & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT;
518 error = nvme_completion_is_error(&cpl);
521 nvme_qpair_print_command(qpair, &req->cmd);
522 nvme_qpair_print_completion(qpair, &cpl);
526 req->cb_fn(req->cb_arg, &cpl);
528 nvme_free_request(req);
532 nvme_qpair_process_completions(struct nvme_qpair *qpair)
534 struct nvme_tracker *tr;
535 struct nvme_completion cpl;
537 bool in_panic = dumping || SCHEDULER_STOPPED();
540 * qpair is not enabled, likely because a controller reset is is in
541 * progress. Ignore the interrupt - any I/O that was associated with
542 * this interrupt will get retried when the reset is complete. Any
543 * pending completions for when we're in startup will be completed
544 * as soon as initialization is complete and we start sending commands
547 if (qpair->recovery_state != RECOVERY_NONE) {
548 qpair->num_ignored++;
553 * Sanity check initialization. After we reset the hardware, the phase
554 * is defined to be 1. So if we get here with zero prior calls and the
555 * phase is 0, it means that we've lost a race between the
556 * initialization and the ISR running. With the phase wrong, we'll
557 * process a bunch of completions that aren't really completions leading
558 * to a KASSERT below.
560 KASSERT(!(qpair->num_intr_handler_calls == 0 && qpair->phase == 0),
561 ("%s: Phase wrong for first interrupt call.",
562 device_get_nameunit(qpair->ctrlr->dev)));
564 qpair->num_intr_handler_calls++;
566 bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
567 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
569 * A panic can stop the CPU this routine is running on at any point. If
570 * we're called during a panic, complete the sq_head wrap protocol for
571 * the case where we are interrupted just after the increment at 1
572 * below, but before we can reset cq_head to zero at 2. Also cope with
573 * the case where we do the zero at 2, but may or may not have done the
574 * phase adjustment at step 3. The panic machinery flushes all pending
575 * memory writes, so we can make these strong ordering assumptions
576 * that would otherwise be unwise if we were racing in real time.
578 if (__predict_false(in_panic)) {
579 if (qpair->cq_head == qpair->num_entries) {
581 * Here we know that we need to zero cq_head and then negate
582 * the phase, which hasn't been assigned if cq_head isn't
583 * zero due to the atomic_store_rel.
586 qpair->phase = !qpair->phase;
587 } else if (qpair->cq_head == 0) {
589 * In this case, we know that the assignment at 2
590 * happened below, but we don't know if it 3 happened or
591 * not. To do this, we look at the last completion
592 * entry and set the phase to the opposite phase
593 * that it has. This gets us back in sync
595 cpl = qpair->cpl[qpair->num_entries - 1];
596 nvme_completion_swapbytes(&cpl);
597 qpair->phase = !NVME_STATUS_GET_P(cpl.status);
605 * We need to do this dance to avoid a race between the host and
606 * the device where the device overtakes the host while the host
607 * is reading this record, leaving the status field 'new' and
608 * the sqhd and cid fields potentially stale. If the phase
609 * doesn't match, that means status hasn't yet been updated and
610 * we'll get any pending changes next time. It also means that
611 * the phase must be the same the second time. We have to sync
612 * before reading to ensure any bouncing completes.
614 status = le16toh(qpair->cpl[qpair->cq_head].status);
615 if (NVME_STATUS_GET_P(status) != qpair->phase)
618 bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
619 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
620 cpl = qpair->cpl[qpair->cq_head];
621 nvme_completion_swapbytes(&cpl);
624 NVME_STATUS_GET_P(status) == NVME_STATUS_GET_P(cpl.status),
625 ("Phase unexpectedly inconsistent"));
627 if (cpl.cid < qpair->num_trackers)
628 tr = qpair->act_tr[cpl.cid];
634 nvme_qpair_complete_tracker(tr, &cpl, ERROR_PRINT_ALL);
635 qpair->sq_head = cpl.sqhd;
636 } else if (!in_panic) {
638 * A missing tracker is normally an error. However, a
639 * panic can stop the CPU this routine is running on
640 * after completing an I/O but before updating
641 * qpair->cq_head at 1 below. Later, we re-enter this
642 * routine to poll I/O associated with the kernel
643 * dump. We find that the tr has been set to null before
644 * calling the completion routine. If it hasn't
645 * completed (or it triggers a panic), then '1' below
646 * won't have updated cq_head. Rather than panic again,
647 * ignore this condition because it's not unexpected.
649 nvme_printf(qpair->ctrlr,
650 "cpl (cid = %u) does not map to outstanding cmd\n",
652 /* nvme_dump_completion expects device endianess */
653 nvme_dump_completion(&qpair->cpl[qpair->cq_head]);
654 KASSERT(0, ("received completion for unknown cmd"));
658 * There's a number of races with the following (see above) when
659 * the system panics. We compensate for each one of them by
660 * using the atomic store to force strong ordering (at least when
661 * viewed in the aftermath of a panic).
663 if (++qpair->cq_head == qpair->num_entries) { /* 1 */
664 atomic_store_rel_int(&qpair->cq_head, 0); /* 2 */
665 qpair->phase = !qpair->phase; /* 3 */
670 bus_space_write_4(qpair->ctrlr->bus_tag, qpair->ctrlr->bus_handle,
671 qpair->cq_hdbl_off, qpair->cq_head);
678 nvme_qpair_msi_handler(void *arg)
680 struct nvme_qpair *qpair = arg;
682 nvme_qpair_process_completions(qpair);
686 nvme_qpair_construct(struct nvme_qpair *qpair,
687 uint32_t num_entries, uint32_t num_trackers,
688 struct nvme_controller *ctrlr)
690 struct nvme_tracker *tr;
691 size_t cmdsz, cplsz, prpsz, allocsz, prpmemsz;
692 uint64_t queuemem_phys, prpmem_phys, list_phys;
693 uint8_t *queuemem, *prpmem, *prp_list;
696 qpair->vector = ctrlr->msi_count > 1 ? qpair->id : 0;
697 qpair->num_entries = num_entries;
698 qpair->num_trackers = num_trackers;
699 qpair->ctrlr = ctrlr;
701 mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF);
703 /* Note: NVMe PRP format is restricted to 4-byte alignment. */
704 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
705 4, PAGE_SIZE, BUS_SPACE_MAXADDR,
706 BUS_SPACE_MAXADDR, NULL, NULL, ctrlr->max_xfer_size,
707 btoc(ctrlr->max_xfer_size) + 1, PAGE_SIZE, 0,
708 NULL, NULL, &qpair->dma_tag_payload);
710 nvme_printf(ctrlr, "payload tag create failed %d\n", err);
715 * Each component must be page aligned, and individual PRP lists
716 * cannot cross a page boundary.
718 cmdsz = qpair->num_entries * sizeof(struct nvme_command);
719 cmdsz = roundup2(cmdsz, PAGE_SIZE);
720 cplsz = qpair->num_entries * sizeof(struct nvme_completion);
721 cplsz = roundup2(cplsz, PAGE_SIZE);
723 * For commands requiring more than 2 PRP entries, one PRP will be
724 * embedded in the command (prp1), and the rest of the PRP entries
725 * will be in a list pointed to by the command (prp2).
727 prpsz = sizeof(uint64_t) * btoc(ctrlr->max_xfer_size);
728 prpmemsz = qpair->num_trackers * prpsz;
729 allocsz = cmdsz + cplsz + prpmemsz;
731 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
732 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
733 allocsz, 1, allocsz, 0, NULL, NULL, &qpair->dma_tag);
735 nvme_printf(ctrlr, "tag create failed %d\n", err);
738 bus_dma_tag_set_domain(qpair->dma_tag, qpair->domain);
740 if (bus_dmamem_alloc(qpair->dma_tag, (void **)&queuemem,
741 BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &qpair->queuemem_map)) {
742 nvme_printf(ctrlr, "failed to alloc qpair memory\n");
746 if (bus_dmamap_load(qpair->dma_tag, qpair->queuemem_map,
747 queuemem, allocsz, nvme_single_map, &queuemem_phys, 0) != 0) {
748 nvme_printf(ctrlr, "failed to load qpair memory\n");
749 bus_dmamem_free(qpair->dma_tag, qpair->cmd,
750 qpair->queuemem_map);
755 qpair->num_intr_handler_calls = 0;
756 qpair->num_retries = 0;
757 qpair->num_failures = 0;
758 qpair->num_ignored = 0;
759 qpair->cmd = (struct nvme_command *)queuemem;
760 qpair->cpl = (struct nvme_completion *)(queuemem + cmdsz);
761 prpmem = (uint8_t *)(queuemem + cmdsz + cplsz);
762 qpair->cmd_bus_addr = queuemem_phys;
763 qpair->cpl_bus_addr = queuemem_phys + cmdsz;
764 prpmem_phys = queuemem_phys + cmdsz + cplsz;
766 callout_init(&qpair->timer, 1);
767 qpair->timer_armed = false;
768 qpair->recovery_state = RECOVERY_WAITING;
771 * Calcuate the stride of the doorbell register. Many emulators set this
772 * value to correspond to a cache line. However, some hardware has set
773 * it to various small values.
775 qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[0]) +
776 (qpair->id << (ctrlr->dstrd + 1));
777 qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[0]) +
778 (qpair->id << (ctrlr->dstrd + 1)) + (1 << ctrlr->dstrd);
780 TAILQ_INIT(&qpair->free_tr);
781 TAILQ_INIT(&qpair->outstanding_tr);
782 STAILQ_INIT(&qpair->queued_req);
784 list_phys = prpmem_phys;
786 for (i = 0; i < qpair->num_trackers; i++) {
787 if (list_phys + prpsz > prpmem_phys + prpmemsz) {
788 qpair->num_trackers = i;
793 * Make sure that the PRP list for this tracker doesn't
794 * overflow to another page.
796 if (trunc_page(list_phys) !=
797 trunc_page(list_phys + prpsz - 1)) {
798 list_phys = roundup2(list_phys, PAGE_SIZE);
800 (uint8_t *)roundup2((uintptr_t)prp_list, PAGE_SIZE);
803 tr = malloc_domainset(sizeof(*tr), M_NVME,
804 DOMAINSET_PREF(qpair->domain), M_ZERO | M_WAITOK);
805 bus_dmamap_create(qpair->dma_tag_payload, 0,
806 &tr->payload_dma_map);
809 tr->prp = (uint64_t *)prp_list;
810 tr->prp_bus_addr = list_phys;
811 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
816 if (qpair->num_trackers == 0) {
817 nvme_printf(ctrlr, "failed to allocate enough trackers\n");
821 qpair->act_tr = malloc_domainset(sizeof(struct nvme_tracker *) *
822 qpair->num_entries, M_NVME, DOMAINSET_PREF(qpair->domain),
825 if (ctrlr->msi_count > 1) {
827 * MSI-X vector resource IDs start at 1, so we add one to
828 * the queue's vector to get the corresponding rid to use.
830 qpair->rid = qpair->vector + 1;
832 qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
833 &qpair->rid, RF_ACTIVE);
834 if (qpair->res == NULL) {
835 nvme_printf(ctrlr, "unable to allocate MSI\n");
838 if (bus_setup_intr(ctrlr->dev, qpair->res,
839 INTR_TYPE_MISC | INTR_MPSAFE, NULL,
840 nvme_qpair_msi_handler, qpair, &qpair->tag) != 0) {
841 nvme_printf(ctrlr, "unable to setup MSI\n");
844 if (qpair->id == 0) {
845 bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag,
848 bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag,
849 "io%d", qpair->id - 1);
856 nvme_qpair_destroy(qpair);
861 nvme_qpair_destroy(struct nvme_qpair *qpair)
863 struct nvme_tracker *tr;
865 callout_drain(&qpair->timer);
868 bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag);
873 free(qpair->act_tr, M_NVME);
874 qpair->act_tr = NULL;
877 while (!TAILQ_EMPTY(&qpair->free_tr)) {
878 tr = TAILQ_FIRST(&qpair->free_tr);
879 TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
880 bus_dmamap_destroy(qpair->dma_tag_payload,
881 tr->payload_dma_map);
885 if (qpair->cmd != NULL) {
886 bus_dmamap_unload(qpair->dma_tag, qpair->queuemem_map);
887 bus_dmamem_free(qpair->dma_tag, qpair->cmd,
888 qpair->queuemem_map);
892 if (qpair->dma_tag) {
893 bus_dma_tag_destroy(qpair->dma_tag);
894 qpair->dma_tag = NULL;
897 if (qpair->dma_tag_payload) {
898 bus_dma_tag_destroy(qpair->dma_tag_payload);
899 qpair->dma_tag_payload = NULL;
902 if (mtx_initialized(&qpair->lock))
903 mtx_destroy(&qpair->lock);
906 bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ,
907 rman_get_rid(qpair->res), qpair->res);
913 nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair)
915 struct nvme_tracker *tr;
917 tr = TAILQ_FIRST(&qpair->outstanding_tr);
919 if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) {
920 nvme_qpair_manual_complete_tracker(tr,
921 NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION, 0,
923 tr = TAILQ_FIRST(&qpair->outstanding_tr);
925 tr = TAILQ_NEXT(tr, tailq);
931 nvme_admin_qpair_destroy(struct nvme_qpair *qpair)
934 nvme_admin_qpair_abort_aers(qpair);
935 nvme_qpair_destroy(qpair);
939 nvme_io_qpair_destroy(struct nvme_qpair *qpair)
942 nvme_qpair_destroy(qpair);
946 nvme_qpair_timeout(void *arg)
948 struct nvme_qpair *qpair = arg;
949 struct nvme_controller *ctrlr = qpair->ctrlr;
950 struct nvme_tracker *tr;
951 struct nvme_tracker *tr_temp;
957 mtx_lock(&qpair->lock);
958 idle = TAILQ_EMPTY(&qpair->outstanding_tr);
960 switch (qpair->recovery_state) {
964 now = getsbinuptime();
965 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
966 if (now > tr->deadline && tr->deadline != 0) {
968 * We're now passed our earliest deadline. We
969 * need to do expensive things to cope, but next
970 * time. Flag that and close the door to any
971 * further processing.
973 qpair->recovery_state = RECOVERY_START;
974 nvme_printf(ctrlr, "RECOVERY_START %jd vs %jd\n",
975 (uintmax_t)now, (uintmax_t)tr->deadline);
982 * Read csts to get value of cfs - controller fatal status.
983 * If no fatal status, try to call the completion routine, and
984 * if completes transactions, report a missed interrupt and
985 * return (this may need to be rate limited). Otherwise, if
986 * aborts are enabled and the controller is not reporting
987 * fatal status, abort the command. Otherwise, just reset the
988 * controller and hope for the best.
990 csts = nvme_mmio_read_4(ctrlr, csts);
991 cfs = (csts >> NVME_CSTS_REG_CFS_SHIFT) & NVME_CSTS_REG_CFS_MASK;
993 nvme_printf(ctrlr, "Controller in fatal status, resetting\n");
994 qpair->recovery_state = RECOVERY_RESET;
997 mtx_unlock(&qpair->lock);
998 if (nvme_qpair_process_completions(qpair)) {
999 nvme_printf(ctrlr, "Completions present in output without an interrupt\n");
1000 qpair->recovery_state = RECOVERY_NONE;
1002 nvme_printf(ctrlr, "timeout with nothing complete, resetting\n");
1003 qpair->recovery_state = RECOVERY_RESET;
1004 mtx_lock(&qpair->lock);
1007 mtx_lock(&qpair->lock);
1009 case RECOVERY_RESET:
1011 * If we get here due to a possible surprise hot-unplug event,
1012 * then we let nvme_ctrlr_reset confirm and fail the
1015 nvme_printf(ctrlr, "Resetting controller due to a timeout%s.\n",
1016 (csts == 0xffffffff) ? " and possible hot unplug" :
1017 (cfs ? " and fatal error status" : ""));
1018 nvme_printf(ctrlr, "RECOVERY_WAITING\n");
1019 qpair->recovery_state = RECOVERY_WAITING;
1020 nvme_ctrlr_reset(ctrlr);
1022 case RECOVERY_WAITING:
1023 nvme_printf(ctrlr, "waiting\n");
1028 * Rearm the timeout.
1031 callout_schedule(&qpair->timer, hz / 2);
1033 qpair->timer_armed = false;
1035 mtx_unlock(&qpair->lock);
1039 * Submit the tracker to the hardware. Must already be in the
1040 * outstanding queue when called.
1043 nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr)
1045 struct nvme_request *req;
1046 struct nvme_controller *ctrlr;
1049 mtx_assert(&qpair->lock, MA_OWNED);
1052 req->cmd.cid = tr->cid;
1053 qpair->act_tr[tr->cid] = tr;
1054 ctrlr = qpair->ctrlr;
1057 if (req->cb_fn == nvme_completion_poll_cb)
1060 timeout = ctrlr->timeout_period;
1061 tr->deadline = getsbinuptime() + timeout * SBT_1S;
1062 if (!qpair->timer_armed) {
1063 qpair->timer_armed = true;
1064 callout_reset_on(&qpair->timer, hz / 2,
1065 nvme_qpair_timeout, qpair, qpair->cpu);
1068 tr->deadline = SBT_MAX;
1070 /* Copy the command from the tracker to the submission queue. */
1071 memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd));
1073 if (++qpair->sq_tail == qpair->num_entries)
1076 bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
1077 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1078 bus_space_write_4(qpair->ctrlr->bus_tag, qpair->ctrlr->bus_handle,
1079 qpair->sq_tdbl_off, qpair->sq_tail);
1084 nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
1086 struct nvme_tracker *tr = arg;
1090 * If the mapping operation failed, return immediately. The caller
1091 * is responsible for detecting the error status and failing the
1095 nvme_printf(tr->qpair->ctrlr,
1096 "nvme_payload_map err %d\n", error);
1101 * Note that we specified PAGE_SIZE for alignment and max
1102 * segment size when creating the bus dma tags. So here
1103 * we can safely just transfer each segment to its
1104 * associated PRP entry.
1106 tr->req->cmd.prp1 = htole64(seg[0].ds_addr);
1109 tr->req->cmd.prp2 = htole64(seg[1].ds_addr);
1110 } else if (nseg > 2) {
1112 tr->req->cmd.prp2 = htole64((uint64_t)tr->prp_bus_addr);
1113 while (cur_nseg < nseg) {
1114 tr->prp[cur_nseg-1] =
1115 htole64((uint64_t)seg[cur_nseg].ds_addr);
1120 * prp2 should not be used by the controller
1121 * since there is only one segment, but set
1122 * to 0 just to be safe.
1124 tr->req->cmd.prp2 = 0;
1127 bus_dmamap_sync(tr->qpair->dma_tag_payload, tr->payload_dma_map,
1128 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1129 nvme_qpair_submit_tracker(tr->qpair, tr);
1133 _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
1135 struct nvme_tracker *tr;
1138 mtx_assert(&qpair->lock, MA_OWNED);
1140 tr = TAILQ_FIRST(&qpair->free_tr);
1143 if (tr == NULL || qpair->recovery_state != RECOVERY_NONE) {
1145 * No tracker is available, or the qpair is disabled due to
1146 * an in-progress controller-level reset or controller
1150 if (qpair->ctrlr->is_failed) {
1152 * The controller has failed, so fail the request.
1154 nvme_qpair_manual_complete_request(qpair, req,
1155 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST);
1158 * Put the request on the qpair's request queue to be
1159 * processed when a tracker frees up via a command
1160 * completion or when the controller reset is
1163 STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
1168 TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
1169 TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq);
1170 if (!qpair->timer_armed)
1171 tr->deadline = SBT_MAX;
1174 switch (req->type) {
1175 case NVME_REQUEST_VADDR:
1176 KASSERT(req->payload_size <= qpair->ctrlr->max_xfer_size,
1177 ("payload_size (%d) exceeds max_xfer_size (%d)\n",
1178 req->payload_size, qpair->ctrlr->max_xfer_size));
1179 err = bus_dmamap_load(tr->qpair->dma_tag_payload,
1180 tr->payload_dma_map, req->u.payload, req->payload_size,
1181 nvme_payload_map, tr, 0);
1183 nvme_printf(qpair->ctrlr,
1184 "bus_dmamap_load returned 0x%x!\n", err);
1186 case NVME_REQUEST_NULL:
1187 nvme_qpair_submit_tracker(tr->qpair, tr);
1189 case NVME_REQUEST_BIO:
1190 KASSERT(req->u.bio->bio_bcount <= qpair->ctrlr->max_xfer_size,
1191 ("bio->bio_bcount (%jd) exceeds max_xfer_size (%d)\n",
1192 (intmax_t)req->u.bio->bio_bcount,
1193 qpair->ctrlr->max_xfer_size));
1194 err = bus_dmamap_load_bio(tr->qpair->dma_tag_payload,
1195 tr->payload_dma_map, req->u.bio, nvme_payload_map, tr, 0);
1197 nvme_printf(qpair->ctrlr,
1198 "bus_dmamap_load_bio returned 0x%x!\n", err);
1200 case NVME_REQUEST_CCB:
1201 err = bus_dmamap_load_ccb(tr->qpair->dma_tag_payload,
1202 tr->payload_dma_map, req->u.payload,
1203 nvme_payload_map, tr, 0);
1205 nvme_printf(qpair->ctrlr,
1206 "bus_dmamap_load_ccb returned 0x%x!\n", err);
1209 panic("unknown nvme request type 0x%x\n", req->type);
1215 * The dmamap operation failed, so we manually fail the
1216 * tracker here with DATA_TRANSFER_ERROR status.
1218 * nvme_qpair_manual_complete_tracker must not be called
1219 * with the qpair lock held.
1221 mtx_unlock(&qpair->lock);
1222 nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC,
1223 NVME_SC_DATA_TRANSFER_ERROR, DO_NOT_RETRY, ERROR_PRINT_ALL);
1224 mtx_lock(&qpair->lock);
1229 nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
1232 mtx_lock(&qpair->lock);
1233 _nvme_qpair_submit_request(qpair, req);
1234 mtx_unlock(&qpair->lock);
1238 nvme_qpair_enable(struct nvme_qpair *qpair)
1240 mtx_assert(&qpair->lock, MA_OWNED);
1242 qpair->recovery_state = RECOVERY_NONE;
1246 nvme_qpair_reset(struct nvme_qpair *qpair)
1249 qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0;
1252 * First time through the completion queue, HW will set phase
1253 * bit on completions to 1. So set this to 1 here, indicating
1254 * we're looking for a 1 to know which entries have completed.
1255 * we'll toggle the bit each time when the completion queue
1260 memset(qpair->cmd, 0,
1261 qpair->num_entries * sizeof(struct nvme_command));
1262 memset(qpair->cpl, 0,
1263 qpair->num_entries * sizeof(struct nvme_completion));
1267 nvme_admin_qpair_enable(struct nvme_qpair *qpair)
1269 struct nvme_tracker *tr;
1270 struct nvme_tracker *tr_temp;
1273 * Manually abort each outstanding admin command. Do not retry
1274 * admin commands found here, since they will be left over from
1275 * a controller reset and its likely the context in which the
1276 * command was issued no longer applies.
1278 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
1279 nvme_printf(qpair->ctrlr,
1280 "aborting outstanding admin command\n");
1281 nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC,
1282 NVME_SC_ABORTED_BY_REQUEST, DO_NOT_RETRY, ERROR_PRINT_ALL);
1285 mtx_lock(&qpair->lock);
1286 nvme_qpair_enable(qpair);
1287 mtx_unlock(&qpair->lock);
1291 nvme_io_qpair_enable(struct nvme_qpair *qpair)
1293 STAILQ_HEAD(, nvme_request) temp;
1294 struct nvme_tracker *tr;
1295 struct nvme_tracker *tr_temp;
1296 struct nvme_request *req;
1299 * Manually abort each outstanding I/O. This normally results in a
1300 * retry, unless the retry count on the associated request has
1301 * reached its limit.
1303 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
1304 nvme_printf(qpair->ctrlr, "aborting outstanding i/o\n");
1305 nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC,
1306 NVME_SC_ABORTED_BY_REQUEST, 0, ERROR_PRINT_NO_RETRY);
1309 mtx_lock(&qpair->lock);
1311 nvme_qpair_enable(qpair);
1314 STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request);
1316 while (!STAILQ_EMPTY(&temp)) {
1317 req = STAILQ_FIRST(&temp);
1318 STAILQ_REMOVE_HEAD(&temp, stailq);
1319 nvme_printf(qpair->ctrlr, "resubmitting queued i/o\n");
1320 nvme_qpair_print_command(qpair, &req->cmd);
1321 _nvme_qpair_submit_request(qpair, req);
1324 mtx_unlock(&qpair->lock);
1328 nvme_qpair_disable(struct nvme_qpair *qpair)
1330 struct nvme_tracker *tr, *tr_temp;
1332 mtx_lock(&qpair->lock);
1333 qpair->recovery_state = RECOVERY_WAITING;
1334 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
1335 tr->deadline = SBT_MAX;
1337 mtx_unlock(&qpair->lock);
1341 nvme_admin_qpair_disable(struct nvme_qpair *qpair)
1344 nvme_qpair_disable(qpair);
1345 nvme_admin_qpair_abort_aers(qpair);
1349 nvme_io_qpair_disable(struct nvme_qpair *qpair)
1352 nvme_qpair_disable(qpair);
1356 nvme_qpair_fail(struct nvme_qpair *qpair)
1358 struct nvme_tracker *tr;
1359 struct nvme_request *req;
1361 if (!mtx_initialized(&qpair->lock))
1364 mtx_lock(&qpair->lock);
1366 while (!STAILQ_EMPTY(&qpair->queued_req)) {
1367 req = STAILQ_FIRST(&qpair->queued_req);
1368 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
1369 nvme_printf(qpair->ctrlr, "failing queued i/o\n");
1370 mtx_unlock(&qpair->lock);
1371 nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC,
1372 NVME_SC_ABORTED_BY_REQUEST);
1373 mtx_lock(&qpair->lock);
1376 /* Manually abort each outstanding I/O. */
1377 while (!TAILQ_EMPTY(&qpair->outstanding_tr)) {
1378 tr = TAILQ_FIRST(&qpair->outstanding_tr);
1380 * Do not remove the tracker. The abort_tracker path will
1383 nvme_printf(qpair->ctrlr, "failing outstanding i/o\n");
1384 mtx_unlock(&qpair->lock);
1385 nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC,
1386 NVME_SC_ABORTED_BY_REQUEST, DO_NOT_RETRY, ERROR_PRINT_ALL);
1387 mtx_lock(&qpair->lock);
1390 mtx_unlock(&qpair->lock);