2 * Copyright (C) 2012-2014 Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
33 #include <dev/pci/pcivar.h>
35 #include "nvme_private.h"
37 static void _nvme_qpair_submit_request(struct nvme_qpair *qpair,
38 struct nvme_request *req);
39 static void nvme_qpair_destroy(struct nvme_qpair *qpair);
41 struct nvme_opcode_string {
47 static struct nvme_opcode_string admin_opcode[] = {
48 { NVME_OPC_DELETE_IO_SQ, "DELETE IO SQ" },
49 { NVME_OPC_CREATE_IO_SQ, "CREATE IO SQ" },
50 { NVME_OPC_GET_LOG_PAGE, "GET LOG PAGE" },
51 { NVME_OPC_DELETE_IO_CQ, "DELETE IO CQ" },
52 { NVME_OPC_CREATE_IO_CQ, "CREATE IO CQ" },
53 { NVME_OPC_IDENTIFY, "IDENTIFY" },
54 { NVME_OPC_ABORT, "ABORT" },
55 { NVME_OPC_SET_FEATURES, "SET FEATURES" },
56 { NVME_OPC_GET_FEATURES, "GET FEATURES" },
57 { NVME_OPC_ASYNC_EVENT_REQUEST, "ASYNC EVENT REQUEST" },
58 { NVME_OPC_FIRMWARE_ACTIVATE, "FIRMWARE ACTIVATE" },
59 { NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD, "FIRMWARE IMAGE DOWNLOAD" },
60 { NVME_OPC_FORMAT_NVM, "FORMAT NVM" },
61 { NVME_OPC_SECURITY_SEND, "SECURITY SEND" },
62 { NVME_OPC_SECURITY_RECEIVE, "SECURITY RECEIVE" },
63 { 0xFFFF, "ADMIN COMMAND" }
66 static struct nvme_opcode_string io_opcode[] = {
67 { NVME_OPC_FLUSH, "FLUSH" },
68 { NVME_OPC_WRITE, "WRITE" },
69 { NVME_OPC_READ, "READ" },
70 { NVME_OPC_WRITE_UNCORRECTABLE, "WRITE UNCORRECTABLE" },
71 { NVME_OPC_COMPARE, "COMPARE" },
72 { NVME_OPC_DATASET_MANAGEMENT, "DATASET MANAGEMENT" },
73 { 0xFFFF, "IO COMMAND" }
77 get_admin_opcode_string(uint16_t opc)
79 struct nvme_opcode_string *entry;
83 while (entry->opc != 0xFFFF) {
84 if (entry->opc == opc)
92 get_io_opcode_string(uint16_t opc)
94 struct nvme_opcode_string *entry;
98 while (entry->opc != 0xFFFF) {
99 if (entry->opc == opc)
108 nvme_admin_qpair_print_command(struct nvme_qpair *qpair,
109 struct nvme_command *cmd)
112 nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%x "
113 "cdw10:%08x cdw11:%08x\n",
114 get_admin_opcode_string(cmd->opc), cmd->opc, qpair->id, cmd->cid,
115 cmd->nsid, cmd->cdw10, cmd->cdw11);
119 nvme_io_qpair_print_command(struct nvme_qpair *qpair,
120 struct nvme_command *cmd)
126 case NVME_OPC_WRITE_UNCORRECTABLE:
127 case NVME_OPC_COMPARE:
128 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d "
130 get_io_opcode_string(cmd->opc), qpair->id, cmd->cid,
132 ((unsigned long long)cmd->cdw11 << 32) + cmd->cdw10,
133 (cmd->cdw12 & 0xFFFF) + 1);
136 case NVME_OPC_DATASET_MANAGEMENT:
137 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n",
138 get_io_opcode_string(cmd->opc), qpair->id, cmd->cid,
142 nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%d\n",
143 get_io_opcode_string(cmd->opc), cmd->opc, qpair->id,
144 cmd->cid, cmd->nsid);
150 nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd)
153 nvme_admin_qpair_print_command(qpair, cmd);
155 nvme_io_qpair_print_command(qpair, cmd);
158 struct nvme_status_string {
164 static struct nvme_status_string generic_status[] = {
165 { NVME_SC_SUCCESS, "SUCCESS" },
166 { NVME_SC_INVALID_OPCODE, "INVALID OPCODE" },
167 { NVME_SC_INVALID_FIELD, "INVALID_FIELD" },
168 { NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" },
169 { NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" },
170 { NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" },
171 { NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" },
172 { NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" },
173 { NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" },
174 { NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" },
175 { NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" },
176 { NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" },
177 { NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" },
178 { NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" },
179 { NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" },
180 { NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" },
181 { 0xFFFF, "GENERIC" }
184 static struct nvme_status_string command_specific_status[] = {
185 { NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" },
186 { NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" },
187 { NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" },
188 { NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" },
189 { NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" },
190 { NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" },
191 { NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" },
192 { NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" },
193 { NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" },
194 { NVME_SC_INVALID_FORMAT, "INVALID FORMAT" },
195 { NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" },
196 { NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" },
197 { NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" },
198 { NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" },
199 { 0xFFFF, "COMMAND SPECIFIC" }
202 static struct nvme_status_string media_error_status[] = {
203 { NVME_SC_WRITE_FAULTS, "WRITE FAULTS" },
204 { NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" },
205 { NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" },
206 { NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" },
207 { NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" },
208 { NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" },
209 { NVME_SC_ACCESS_DENIED, "ACCESS DENIED" },
210 { 0xFFFF, "MEDIA ERROR" }
214 get_status_string(uint16_t sct, uint16_t sc)
216 struct nvme_status_string *entry;
219 case NVME_SCT_GENERIC:
220 entry = generic_status;
222 case NVME_SCT_COMMAND_SPECIFIC:
223 entry = command_specific_status;
225 case NVME_SCT_MEDIA_ERROR:
226 entry = media_error_status;
228 case NVME_SCT_VENDOR_SPECIFIC:
229 return ("VENDOR SPECIFIC");
234 while (entry->sc != 0xFFFF) {
243 nvme_qpair_print_completion(struct nvme_qpair *qpair,
244 struct nvme_completion *cpl)
246 nvme_printf(qpair->ctrlr, "%s (%02x/%02x) sqid:%d cid:%d cdw0:%x\n",
247 get_status_string(cpl->status.sct, cpl->status.sc),
248 cpl->status.sct, cpl->status.sc, cpl->sqid, cpl->cid, cpl->cdw0);
252 nvme_completion_is_retry(const struct nvme_completion *cpl)
255 * TODO: spec is not clear how commands that are aborted due
256 * to TLER will be marked. So for now, it seems
257 * NAMESPACE_NOT_READY is the only case where we should
258 * look at the DNR bit.
260 switch (cpl->status.sct) {
261 case NVME_SCT_GENERIC:
262 switch (cpl->status.sc) {
263 case NVME_SC_ABORTED_BY_REQUEST:
264 case NVME_SC_NAMESPACE_NOT_READY:
269 case NVME_SC_INVALID_OPCODE:
270 case NVME_SC_INVALID_FIELD:
271 case NVME_SC_COMMAND_ID_CONFLICT:
272 case NVME_SC_DATA_TRANSFER_ERROR:
273 case NVME_SC_ABORTED_POWER_LOSS:
274 case NVME_SC_INTERNAL_DEVICE_ERROR:
275 case NVME_SC_ABORTED_SQ_DELETION:
276 case NVME_SC_ABORTED_FAILED_FUSED:
277 case NVME_SC_ABORTED_MISSING_FUSED:
278 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
279 case NVME_SC_COMMAND_SEQUENCE_ERROR:
280 case NVME_SC_LBA_OUT_OF_RANGE:
281 case NVME_SC_CAPACITY_EXCEEDED:
285 case NVME_SCT_COMMAND_SPECIFIC:
286 case NVME_SCT_MEDIA_ERROR:
287 case NVME_SCT_VENDOR_SPECIFIC:
294 nvme_qpair_complete_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr,
295 struct nvme_completion *cpl, boolean_t print_on_error)
297 struct nvme_request *req;
298 boolean_t retry, error;
301 error = nvme_completion_is_error(cpl);
302 retry = error && nvme_completion_is_retry(cpl) &&
303 req->retries < nvme_retry_count;
305 if (error && print_on_error) {
306 nvme_qpair_print_command(qpair, &req->cmd);
307 nvme_qpair_print_completion(qpair, cpl);
310 qpair->act_tr[cpl->cid] = NULL;
312 KASSERT(cpl->cid == req->cmd.cid, ("cpl cid does not match cmd cid\n"));
314 if (req->cb_fn && !retry)
315 req->cb_fn(req->cb_arg, cpl);
317 mtx_lock(&qpair->lock);
318 callout_stop(&tr->timer);
322 nvme_qpair_submit_tracker(qpair, tr);
324 if (req->type != NVME_REQUEST_NULL)
325 bus_dmamap_unload(qpair->dma_tag_payload,
326 tr->payload_dma_map);
328 nvme_free_request(req);
331 TAILQ_REMOVE(&qpair->outstanding_tr, tr, tailq);
332 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
335 * If the controller is in the middle of resetting, don't
336 * try to submit queued requests here - let the reset logic
337 * handle that instead.
339 if (!STAILQ_EMPTY(&qpair->queued_req) &&
340 !qpair->ctrlr->is_resetting) {
341 req = STAILQ_FIRST(&qpair->queued_req);
342 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
343 _nvme_qpair_submit_request(qpair, req);
347 mtx_unlock(&qpair->lock);
351 nvme_qpair_manual_complete_tracker(struct nvme_qpair *qpair,
352 struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr,
353 boolean_t print_on_error)
355 struct nvme_completion cpl;
357 memset(&cpl, 0, sizeof(cpl));
358 cpl.sqid = qpair->id;
360 cpl.status.sct = sct;
362 cpl.status.dnr = dnr;
363 nvme_qpair_complete_tracker(qpair, tr, &cpl, print_on_error);
367 nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
368 struct nvme_request *req, uint32_t sct, uint32_t sc,
369 boolean_t print_on_error)
371 struct nvme_completion cpl;
374 memset(&cpl, 0, sizeof(cpl));
375 cpl.sqid = qpair->id;
376 cpl.status.sct = sct;
379 error = nvme_completion_is_error(&cpl);
381 if (error && print_on_error) {
382 nvme_qpair_print_command(qpair, &req->cmd);
383 nvme_qpair_print_completion(qpair, &cpl);
387 req->cb_fn(req->cb_arg, &cpl);
389 nvme_free_request(req);
393 nvme_qpair_process_completions(struct nvme_qpair *qpair)
395 struct nvme_tracker *tr;
396 struct nvme_completion *cpl;
398 qpair->num_intr_handler_calls++;
400 if (!qpair->is_enabled)
402 * qpair is not enabled, likely because a controller reset is
403 * is in progress. Ignore the interrupt - any I/O that was
404 * associated with this interrupt will get retried when the
410 cpl = &qpair->cpl[qpair->cq_head];
412 if (cpl->status.p != qpair->phase)
415 tr = qpair->act_tr[cpl->cid];
418 nvme_qpair_complete_tracker(qpair, tr, cpl, TRUE);
419 qpair->sq_head = cpl->sqhd;
421 nvme_printf(qpair->ctrlr,
422 "cpl does not map to outstanding cmd\n");
423 nvme_dump_completion(cpl);
424 KASSERT(0, ("received completion for unknown cmd\n"));
427 if (++qpair->cq_head == qpair->num_entries) {
429 qpair->phase = !qpair->phase;
432 nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].cq_hdbl,
438 nvme_qpair_msix_handler(void *arg)
440 struct nvme_qpair *qpair = arg;
442 nvme_qpair_process_completions(qpair);
446 nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id,
447 uint16_t vector, uint32_t num_entries, uint32_t num_trackers,
448 struct nvme_controller *ctrlr)
450 struct nvme_tracker *tr;
451 size_t cmdsz, cplsz, prpsz, allocsz, prpmemsz;
452 uint64_t queuemem_phys, prpmem_phys, list_phys;
453 uint8_t *queuemem, *prpmem, *prp_list;
457 qpair->vector = vector;
458 qpair->num_entries = num_entries;
459 qpair->num_trackers = num_trackers;
460 qpair->ctrlr = ctrlr;
462 if (ctrlr->msix_enabled) {
465 * MSI-X vector resource IDs start at 1, so we add one to
466 * the queue's vector to get the corresponding rid to use.
468 qpair->rid = vector + 1;
470 qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
471 &qpair->rid, RF_ACTIVE);
472 bus_setup_intr(ctrlr->dev, qpair->res,
473 INTR_TYPE_MISC | INTR_MPSAFE, NULL,
474 nvme_qpair_msix_handler, qpair, &qpair->tag);
477 mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF);
479 /* Note: NVMe PRP format is restricted to 4-byte alignment. */
480 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
481 4, PAGE_SIZE, BUS_SPACE_MAXADDR,
482 BUS_SPACE_MAXADDR, NULL, NULL, NVME_MAX_XFER_SIZE,
483 (NVME_MAX_XFER_SIZE/PAGE_SIZE)+1, PAGE_SIZE, 0,
484 NULL, NULL, &qpair->dma_tag_payload);
486 nvme_printf(ctrlr, "payload tag create failed %d\n", err);
491 * Each component must be page aligned, and individual PRP lists
492 * cannot cross a page boundary.
494 cmdsz = qpair->num_entries * sizeof(struct nvme_command);
495 cmdsz = roundup2(cmdsz, PAGE_SIZE);
496 cplsz = qpair->num_entries * sizeof(struct nvme_completion);
497 cplsz = roundup2(cplsz, PAGE_SIZE);
498 prpsz = sizeof(uint64_t) * NVME_MAX_PRP_LIST_ENTRIES;;
499 prpmemsz = qpair->num_trackers * prpsz;
500 allocsz = cmdsz + cplsz + prpmemsz;
502 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
503 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
504 allocsz, 1, allocsz, 0, NULL, NULL, &qpair->dma_tag);
506 nvme_printf(ctrlr, "tag create failed %d\n", err);
510 if (bus_dmamem_alloc(qpair->dma_tag, (void **)&queuemem,
511 BUS_DMA_NOWAIT, &qpair->queuemem_map)) {
512 nvme_printf(ctrlr, "failed to alloc qpair memory\n");
516 if (bus_dmamap_load(qpair->dma_tag, qpair->queuemem_map,
517 queuemem, allocsz, nvme_single_map, &queuemem_phys, 0) != 0) {
518 nvme_printf(ctrlr, "failed to load qpair memory\n");
523 qpair->num_intr_handler_calls = 0;
524 qpair->cmd = (struct nvme_command *)queuemem;
525 qpair->cpl = (struct nvme_completion *)(queuemem + cmdsz);
526 prpmem = (uint8_t *)(queuemem + cmdsz + cplsz);
527 qpair->cmd_bus_addr = queuemem_phys;
528 qpair->cpl_bus_addr = queuemem_phys + cmdsz;
529 prpmem_phys = queuemem_phys + cmdsz + cplsz;
531 qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[id].sq_tdbl);
532 qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[id].cq_hdbl);
534 TAILQ_INIT(&qpair->free_tr);
535 TAILQ_INIT(&qpair->outstanding_tr);
536 STAILQ_INIT(&qpair->queued_req);
538 list_phys = prpmem_phys;
540 for (i = 0; i < qpair->num_trackers; i++) {
542 if (list_phys + prpsz > prpmem_phys + prpmemsz) {
543 qpair->num_trackers = i;
548 * Make sure that the PRP list for this tracker doesn't
549 * overflow to another page.
551 if (trunc_page(list_phys) !=
552 trunc_page(list_phys + prpsz - 1)) {
553 list_phys = roundup2(list_phys, PAGE_SIZE);
555 (uint8_t *)roundup2((uintptr_t)prp_list, PAGE_SIZE);
558 tr = malloc(sizeof(*tr), M_NVME, M_ZERO | M_WAITOK);
559 bus_dmamap_create(qpair->dma_tag_payload, 0,
560 &tr->payload_dma_map);
561 callout_init(&tr->timer, 1);
564 tr->prp = (uint64_t *)prp_list;
565 tr->prp_bus_addr = list_phys;
566 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
571 if (qpair->num_trackers == 0) {
572 nvme_printf(ctrlr, "failed to allocate enough trackers\n");
576 qpair->act_tr = malloc(sizeof(struct nvme_tracker *) *
577 qpair->num_entries, M_NVME, M_ZERO | M_WAITOK);
581 nvme_qpair_destroy(qpair);
586 nvme_qpair_destroy(struct nvme_qpair *qpair)
588 struct nvme_tracker *tr;
591 bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag);
593 if (mtx_initialized(&qpair->lock))
594 mtx_destroy(&qpair->lock);
597 bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ,
598 rman_get_rid(qpair->res), qpair->res);
600 if (qpair->cmd != NULL) {
601 bus_dmamap_unload(qpair->dma_tag, qpair->queuemem_map);
602 bus_dmamem_free(qpair->dma_tag, qpair->cmd,
603 qpair->queuemem_map);
607 bus_dma_tag_destroy(qpair->dma_tag);
609 if (qpair->dma_tag_payload)
610 bus_dma_tag_destroy(qpair->dma_tag_payload);
613 free(qpair->act_tr, M_NVME);
615 while (!TAILQ_EMPTY(&qpair->free_tr)) {
616 tr = TAILQ_FIRST(&qpair->free_tr);
617 TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
618 bus_dmamap_destroy(qpair->dma_tag, tr->payload_dma_map);
624 nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair)
626 struct nvme_tracker *tr;
628 tr = TAILQ_FIRST(&qpair->outstanding_tr);
630 if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) {
631 nvme_qpair_manual_complete_tracker(qpair, tr,
632 NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION, 0,
634 tr = TAILQ_FIRST(&qpair->outstanding_tr);
636 tr = TAILQ_NEXT(tr, tailq);
642 nvme_admin_qpair_destroy(struct nvme_qpair *qpair)
645 nvme_admin_qpair_abort_aers(qpair);
646 nvme_qpair_destroy(qpair);
650 nvme_io_qpair_destroy(struct nvme_qpair *qpair)
653 nvme_qpair_destroy(qpair);
657 nvme_abort_complete(void *arg, const struct nvme_completion *status)
659 struct nvme_tracker *tr = arg;
662 * If cdw0 == 1, the controller was not able to abort the command
663 * we requested. We still need to check the active tracker array,
664 * to cover race where I/O timed out at same time controller was
665 * completing the I/O.
667 if (status->cdw0 == 1 && tr->qpair->act_tr[tr->cid] != NULL) {
669 * An I/O has timed out, and the controller was unable to
670 * abort it for some reason. Construct a fake completion
671 * status, and then complete the I/O's tracker manually.
673 nvme_printf(tr->qpair->ctrlr,
674 "abort command failed, aborting command manually\n");
675 nvme_qpair_manual_complete_tracker(tr->qpair, tr,
676 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, 0, TRUE);
681 nvme_timeout(void *arg)
683 struct nvme_tracker *tr = arg;
684 struct nvme_qpair *qpair = tr->qpair;
685 struct nvme_controller *ctrlr = qpair->ctrlr;
686 union csts_register csts;
688 /* Read csts to get value of cfs - controller fatal status. */
689 csts.raw = nvme_mmio_read_4(ctrlr, csts);
691 if (ctrlr->enable_aborts && csts.bits.cfs == 0) {
693 * If aborts are enabled, only use them if the controller is
694 * not reporting fatal status.
696 nvme_ctrlr_cmd_abort(ctrlr, tr->cid, qpair->id,
697 nvme_abort_complete, tr);
699 nvme_ctrlr_reset(ctrlr);
703 nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr)
705 struct nvme_request *req;
706 struct nvme_controller *ctrlr;
708 mtx_assert(&qpair->lock, MA_OWNED);
711 req->cmd.cid = tr->cid;
712 qpair->act_tr[tr->cid] = tr;
713 ctrlr = qpair->ctrlr;
716 #if __FreeBSD_version >= 800030
717 callout_reset_curcpu(&tr->timer, ctrlr->timeout_period * hz,
720 callout_reset(&tr->timer, ctrlr->timeout_period * hz,
724 /* Copy the command from the tracker to the submission queue. */
725 memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd));
727 if (++qpair->sq_tail == qpair->num_entries)
731 nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].sq_tdbl,
738 nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
740 struct nvme_tracker *tr = arg;
744 * If the mapping operation failed, return immediately. The caller
745 * is responsible for detecting the error status and failing the
749 nvme_printf(tr->qpair->ctrlr,
750 "nvme_payload_map err %d\n", error);
755 * Note that we specified PAGE_SIZE for alignment and max
756 * segment size when creating the bus dma tags. So here
757 * we can safely just transfer each segment to its
758 * associated PRP entry.
760 tr->req->cmd.prp1 = seg[0].ds_addr;
763 tr->req->cmd.prp2 = seg[1].ds_addr;
764 } else if (nseg > 2) {
766 tr->req->cmd.prp2 = (uint64_t)tr->prp_bus_addr;
767 while (cur_nseg < nseg) {
768 tr->prp[cur_nseg-1] =
769 (uint64_t)seg[cur_nseg].ds_addr;
774 * prp2 should not be used by the controller
775 * since there is only one segment, but set
776 * to 0 just to be safe.
778 tr->req->cmd.prp2 = 0;
781 nvme_qpair_submit_tracker(tr->qpair, tr);
785 _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
787 struct nvme_tracker *tr;
790 mtx_assert(&qpair->lock, MA_OWNED);
792 tr = TAILQ_FIRST(&qpair->free_tr);
795 if (tr == NULL || !qpair->is_enabled) {
797 * No tracker is available, or the qpair is disabled due to
798 * an in-progress controller-level reset or controller
802 if (qpair->ctrlr->is_failed) {
804 * The controller has failed. Post the request to a
805 * task where it will be aborted, so that we do not
806 * invoke the request's callback in the context
809 nvme_ctrlr_post_failed_request(qpair->ctrlr, req);
812 * Put the request on the qpair's request queue to be
813 * processed when a tracker frees up via a command
814 * completion or when the controller reset is
817 STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
822 TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
823 TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq);
827 case NVME_REQUEST_VADDR:
828 KASSERT(req->payload_size <= qpair->ctrlr->max_xfer_size,
829 ("payload_size (%d) exceeds max_xfer_size (%d)\n",
830 req->payload_size, qpair->ctrlr->max_xfer_size));
831 err = bus_dmamap_load(tr->qpair->dma_tag_payload,
832 tr->payload_dma_map, req->u.payload, req->payload_size,
833 nvme_payload_map, tr, 0);
835 nvme_printf(qpair->ctrlr,
836 "bus_dmamap_load returned 0x%x!\n", err);
838 case NVME_REQUEST_NULL:
839 nvme_qpair_submit_tracker(tr->qpair, tr);
841 #ifdef NVME_UNMAPPED_BIO_SUPPORT
842 case NVME_REQUEST_BIO:
843 KASSERT(req->u.bio->bio_bcount <= qpair->ctrlr->max_xfer_size,
844 ("bio->bio_bcount (%jd) exceeds max_xfer_size (%d)\n",
845 (intmax_t)req->u.bio->bio_bcount,
846 qpair->ctrlr->max_xfer_size));
847 err = bus_dmamap_load_bio(tr->qpair->dma_tag_payload,
848 tr->payload_dma_map, req->u.bio, nvme_payload_map, tr, 0);
850 nvme_printf(qpair->ctrlr,
851 "bus_dmamap_load_bio returned 0x%x!\n", err);
854 case NVME_REQUEST_CCB:
855 err = bus_dmamap_load_ccb(tr->qpair->dma_tag_payload,
856 tr->payload_dma_map, req->u.payload,
857 nvme_payload_map, tr, 0);
859 nvme_printf(qpair->ctrlr,
860 "bus_dmamap_load_ccb returned 0x%x!\n", err);
863 panic("unknown nvme request type 0x%x\n", req->type);
869 * The dmamap operation failed, so we manually fail the
870 * tracker here with DATA_TRANSFER_ERROR status.
872 * nvme_qpair_manual_complete_tracker must not be called
873 * with the qpair lock held.
875 mtx_unlock(&qpair->lock);
876 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
877 NVME_SC_DATA_TRANSFER_ERROR, 1 /* do not retry */, TRUE);
878 mtx_lock(&qpair->lock);
883 nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
886 mtx_lock(&qpair->lock);
887 _nvme_qpair_submit_request(qpair, req);
888 mtx_unlock(&qpair->lock);
892 nvme_qpair_enable(struct nvme_qpair *qpair)
895 qpair->is_enabled = TRUE;
899 nvme_qpair_reset(struct nvme_qpair *qpair)
902 qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0;
905 * First time through the completion queue, HW will set phase
906 * bit on completions to 1. So set this to 1 here, indicating
907 * we're looking for a 1 to know which entries have completed.
908 * we'll toggle the bit each time when the completion queue
913 memset(qpair->cmd, 0,
914 qpair->num_entries * sizeof(struct nvme_command));
915 memset(qpair->cpl, 0,
916 qpair->num_entries * sizeof(struct nvme_completion));
920 nvme_admin_qpair_enable(struct nvme_qpair *qpair)
922 struct nvme_tracker *tr;
923 struct nvme_tracker *tr_temp;
926 * Manually abort each outstanding admin command. Do not retry
927 * admin commands found here, since they will be left over from
928 * a controller reset and its likely the context in which the
929 * command was issued no longer applies.
931 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
932 nvme_printf(qpair->ctrlr,
933 "aborting outstanding admin command\n");
934 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
935 NVME_SC_ABORTED_BY_REQUEST, 1 /* do not retry */, TRUE);
938 nvme_qpair_enable(qpair);
942 nvme_io_qpair_enable(struct nvme_qpair *qpair)
944 STAILQ_HEAD(, nvme_request) temp;
945 struct nvme_tracker *tr;
946 struct nvme_tracker *tr_temp;
947 struct nvme_request *req;
950 * Manually abort each outstanding I/O. This normally results in a
951 * retry, unless the retry count on the associated request has
954 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
955 nvme_printf(qpair->ctrlr, "aborting outstanding i/o\n");
956 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
957 NVME_SC_ABORTED_BY_REQUEST, 0, TRUE);
960 mtx_lock(&qpair->lock);
962 nvme_qpair_enable(qpair);
965 STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request);
967 while (!STAILQ_EMPTY(&temp)) {
968 req = STAILQ_FIRST(&temp);
969 STAILQ_REMOVE_HEAD(&temp, stailq);
970 nvme_printf(qpair->ctrlr, "resubmitting queued i/o\n");
971 nvme_qpair_print_command(qpair, &req->cmd);
972 _nvme_qpair_submit_request(qpair, req);
975 mtx_unlock(&qpair->lock);
979 nvme_qpair_disable(struct nvme_qpair *qpair)
981 struct nvme_tracker *tr;
983 qpair->is_enabled = FALSE;
984 mtx_lock(&qpair->lock);
985 TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq)
986 callout_stop(&tr->timer);
987 mtx_unlock(&qpair->lock);
991 nvme_admin_qpair_disable(struct nvme_qpair *qpair)
994 nvme_qpair_disable(qpair);
995 nvme_admin_qpair_abort_aers(qpair);
999 nvme_io_qpair_disable(struct nvme_qpair *qpair)
1002 nvme_qpair_disable(qpair);
1006 nvme_qpair_fail(struct nvme_qpair *qpair)
1008 struct nvme_tracker *tr;
1009 struct nvme_request *req;
1011 if (!mtx_initialized(&qpair->lock))
1014 mtx_lock(&qpair->lock);
1016 while (!STAILQ_EMPTY(&qpair->queued_req)) {
1017 req = STAILQ_FIRST(&qpair->queued_req);
1018 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
1019 nvme_printf(qpair->ctrlr, "failing queued i/o\n");
1020 mtx_unlock(&qpair->lock);
1021 nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC,
1022 NVME_SC_ABORTED_BY_REQUEST, TRUE);
1023 mtx_lock(&qpair->lock);
1026 /* Manually abort each outstanding I/O. */
1027 while (!TAILQ_EMPTY(&qpair->outstanding_tr)) {
1028 tr = TAILQ_FIRST(&qpair->outstanding_tr);
1030 * Do not remove the tracker. The abort_tracker path will
1033 nvme_printf(qpair->ctrlr, "failing outstanding i/o\n");
1034 mtx_unlock(&qpair->lock);
1035 nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
1036 NVME_SC_ABORTED_BY_REQUEST, 1 /* do not retry */, TRUE);
1037 mtx_lock(&qpair->lock);
1040 mtx_unlock(&qpair->lock);