2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2012-2016 Intel Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
39 #include <sys/ioccom.h>
43 #include <sys/endian.h>
46 #include "nvme_private.h"
48 #define B4_CHK_RDY_DELAY_MS 2300 /* work around controller bug */
50 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
51 struct nvme_async_event_request *aer);
54 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
56 struct nvme_qpair *qpair;
60 qpair = &ctrlr->adminq;
62 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1;
63 qpair->domain = ctrlr->domain;
65 num_entries = NVME_ADMIN_ENTRIES;
66 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
68 * If admin_entries was overridden to an invalid value, revert it
69 * back to our default value.
71 if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
72 num_entries > NVME_MAX_ADMIN_ENTRIES) {
73 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
74 "specified\n", num_entries);
75 num_entries = NVME_ADMIN_ENTRIES;
79 * The admin queue's max xfer size is treated differently than the
80 * max I/O xfer size. 16KB is sufficient here - maybe even less?
82 error = nvme_qpair_construct(qpair, num_entries, NVME_ADMIN_TRACKERS,
87 #define QP(ctrlr, c) ((c) * (ctrlr)->num_io_queues / mp_ncpus)
90 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
92 struct nvme_qpair *qpair;
96 int num_entries, num_trackers, max_entries;
99 * NVMe spec sets a hard limit of 64K max entries, but devices may
100 * specify a smaller limit, so we need to check the MQES field in the
101 * capabilities register. We have to cap the number of entries to the
102 * current stride allows for in BAR 0/1, otherwise the remainder entries
103 * are inaccessable. MQES should reflect this, and this is just a
107 (rman_get_size(ctrlr->resource) - nvme_mmio_offsetof(doorbell[0])) /
108 (1 << (ctrlr->dstrd + 1));
109 num_entries = NVME_IO_ENTRIES;
110 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
111 cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
112 mqes = NVME_CAP_LO_MQES(cap_lo);
113 num_entries = min(num_entries, mqes + 1);
114 num_entries = min(num_entries, max_entries);
116 num_trackers = NVME_IO_TRACKERS;
117 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
119 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
120 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
122 * No need to have more trackers than entries in the submit queue. Note
123 * also that for a queue size of N, we can only have (N-1) commands
124 * outstanding, hence the "-1" here.
126 num_trackers = min(num_trackers, (num_entries-1));
129 * Our best estimate for the maximum number of I/Os that we should
130 * normally have in flight at one time. This should be viewed as a hint,
131 * not a hard limit and will need to be revisited when the upper layers
132 * of the storage system grows multi-queue support.
134 ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4;
136 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
137 M_NVME, M_ZERO | M_WAITOK);
139 for (i = c = n = 0; i < ctrlr->num_io_queues; i++, c += n) {
140 qpair = &ctrlr->ioq[i];
143 * Admin queue has ID=0. IO queues start at ID=1 -
144 * hence the 'i+1' here.
147 if (ctrlr->num_io_queues > 1) {
148 /* Find number of CPUs served by this queue. */
149 for (n = 1; QP(ctrlr, c + n) == i; n++)
151 /* Shuffle multiple NVMe devices between CPUs. */
152 qpair->cpu = c + (device_get_unit(ctrlr->dev)+n/2) % n;
153 qpair->domain = pcpu_find(qpair->cpu)->pc_domain;
155 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1;
156 qpair->domain = ctrlr->domain;
160 * For I/O queues, use the controller-wide max_xfer_size
161 * calculated in nvme_attach().
163 error = nvme_qpair_construct(qpair, num_entries, num_trackers,
169 * Do not bother binding interrupts if we only have one I/O
170 * interrupt thread for this controller.
172 if (ctrlr->num_io_queues > 1)
173 bus_bind_intr(ctrlr->dev, qpair->res, qpair->cpu);
180 nvme_ctrlr_fail(struct nvme_controller *ctrlr)
184 ctrlr->is_failed = true;
185 nvme_admin_qpair_disable(&ctrlr->adminq);
186 nvme_qpair_fail(&ctrlr->adminq);
187 if (ctrlr->ioq != NULL) {
188 for (i = 0; i < ctrlr->num_io_queues; i++) {
189 nvme_io_qpair_disable(&ctrlr->ioq[i]);
190 nvme_qpair_fail(&ctrlr->ioq[i]);
193 nvme_notify_fail_consumers(ctrlr);
197 nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
198 struct nvme_request *req)
201 mtx_lock(&ctrlr->lock);
202 STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
203 mtx_unlock(&ctrlr->lock);
204 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
208 nvme_ctrlr_fail_req_task(void *arg, int pending)
210 struct nvme_controller *ctrlr = arg;
211 struct nvme_request *req;
213 mtx_lock(&ctrlr->lock);
214 while ((req = STAILQ_FIRST(&ctrlr->fail_req)) != NULL) {
215 STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
216 mtx_unlock(&ctrlr->lock);
217 nvme_qpair_manual_complete_request(req->qpair, req,
218 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST);
219 mtx_lock(&ctrlr->lock);
221 mtx_unlock(&ctrlr->lock);
225 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
232 csts = nvme_mmio_read_4(ctrlr, csts);
233 if (csts == 0xffffffff) /* Hot unplug. */
235 if (((csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK)
238 if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
239 nvme_printf(ctrlr, "controller ready did not become %d "
240 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
250 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
257 cc = nvme_mmio_read_4(ctrlr, cc);
258 csts = nvme_mmio_read_4(ctrlr, csts);
260 en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
261 rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
264 * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1
265 * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when
266 * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY
267 * isn't the desired value. Short circuit if we're already disabled.
271 /* EN == 1, wait for RDY == 1 or fail */
272 err = nvme_ctrlr_wait_for_ready(ctrlr, 1);
277 /* EN == 0 already wait for RDY == 0 */
281 return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
284 cc &= ~NVME_CC_REG_EN_MASK;
285 nvme_mmio_write_4(ctrlr, cc, cc);
287 * Some drives have issues with accessing the mmio after we
288 * disable, so delay for a bit after we write the bit to
289 * cope with these issues.
291 if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY)
292 pause("nvmeR", B4_CHK_RDY_DELAY_MS * hz / 1000);
293 return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
297 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
306 cc = nvme_mmio_read_4(ctrlr, cc);
307 csts = nvme_mmio_read_4(ctrlr, csts);
309 en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
310 rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
313 * See note in nvme_ctrlr_disable. Short circuit if we're already enabled.
319 return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
321 /* EN == 0 already wait for RDY == 0 or fail */
322 err = nvme_ctrlr_wait_for_ready(ctrlr, 0);
327 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
329 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
332 /* acqs and asqs are 0-based. */
333 qsize = ctrlr->adminq.num_entries - 1;
336 aqa = (qsize & NVME_AQA_REG_ACQS_MASK) << NVME_AQA_REG_ACQS_SHIFT;
337 aqa |= (qsize & NVME_AQA_REG_ASQS_MASK) << NVME_AQA_REG_ASQS_SHIFT;
338 nvme_mmio_write_4(ctrlr, aqa, aqa);
341 /* Initialization values for CC */
343 cc |= 1 << NVME_CC_REG_EN_SHIFT;
344 cc |= 0 << NVME_CC_REG_CSS_SHIFT;
345 cc |= 0 << NVME_CC_REG_AMS_SHIFT;
346 cc |= 0 << NVME_CC_REG_SHN_SHIFT;
347 cc |= 6 << NVME_CC_REG_IOSQES_SHIFT; /* SQ entry size == 64 == 2^6 */
348 cc |= 4 << NVME_CC_REG_IOCQES_SHIFT; /* CQ entry size == 16 == 2^4 */
350 /* This evaluates to 0, which is according to spec. */
351 cc |= (PAGE_SIZE >> 13) << NVME_CC_REG_MPS_SHIFT;
353 nvme_mmio_write_4(ctrlr, cc, cc);
355 return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
359 nvme_ctrlr_disable_qpairs(struct nvme_controller *ctrlr)
363 nvme_admin_qpair_disable(&ctrlr->adminq);
365 * I/O queues are not allocated before the initial HW
366 * reset, so do not try to disable them. Use is_initialized
367 * to determine if this is the initial HW reset.
369 if (ctrlr->is_initialized) {
370 for (i = 0; i < ctrlr->num_io_queues; i++)
371 nvme_io_qpair_disable(&ctrlr->ioq[i]);
376 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
380 nvme_ctrlr_disable_qpairs(ctrlr);
384 err = nvme_ctrlr_disable(ctrlr);
387 return (nvme_ctrlr_enable(ctrlr));
391 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
395 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
397 if (cmpset == 0 || ctrlr->is_failed)
399 * Controller is already resetting or has failed. Return
400 * immediately since there is no need to kick off another
401 * reset in these cases.
405 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
409 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
411 struct nvme_completion_poll_status status;
414 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
415 nvme_completion_poll_cb, &status);
416 nvme_completion_poll(&status);
417 if (nvme_completion_is_error(&status.cpl)) {
418 nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
422 /* Convert data to host endian */
423 nvme_controller_data_swapbytes(&ctrlr->cdata);
426 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
427 * controller supports.
429 if (ctrlr->cdata.mdts > 0)
430 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
431 ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
437 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
439 struct nvme_completion_poll_status status;
440 int cq_allocated, sq_allocated;
443 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
444 nvme_completion_poll_cb, &status);
445 nvme_completion_poll(&status);
446 if (nvme_completion_is_error(&status.cpl)) {
447 nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n");
452 * Data in cdw0 is 0-based.
453 * Lower 16-bits indicate number of submission queues allocated.
454 * Upper 16-bits indicate number of completion queues allocated.
456 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
457 cq_allocated = (status.cpl.cdw0 >> 16) + 1;
460 * Controller may allocate more queues than we requested,
461 * so use the minimum of the number requested and what was
462 * actually allocated.
464 ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
465 ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
466 if (ctrlr->num_io_queues > vm_ndomains)
467 ctrlr->num_io_queues -= ctrlr->num_io_queues % vm_ndomains;
473 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
475 struct nvme_completion_poll_status status;
476 struct nvme_qpair *qpair;
479 for (i = 0; i < ctrlr->num_io_queues; i++) {
480 qpair = &ctrlr->ioq[i];
483 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair,
484 nvme_completion_poll_cb, &status);
485 nvme_completion_poll(&status);
486 if (nvme_completion_is_error(&status.cpl)) {
487 nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
492 nvme_ctrlr_cmd_create_io_sq(ctrlr, qpair,
493 nvme_completion_poll_cb, &status);
494 nvme_completion_poll(&status);
495 if (nvme_completion_is_error(&status.cpl)) {
496 nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
505 nvme_ctrlr_delete_qpairs(struct nvme_controller *ctrlr)
507 struct nvme_completion_poll_status status;
508 struct nvme_qpair *qpair;
510 for (int i = 0; i < ctrlr->num_io_queues; i++) {
511 qpair = &ctrlr->ioq[i];
514 nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair,
515 nvme_completion_poll_cb, &status);
516 nvme_completion_poll(&status);
517 if (nvme_completion_is_error(&status.cpl)) {
518 nvme_printf(ctrlr, "nvme_destroy_io_sq failed!\n");
523 nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair,
524 nvme_completion_poll_cb, &status);
525 nvme_completion_poll(&status);
526 if (nvme_completion_is_error(&status.cpl)) {
527 nvme_printf(ctrlr, "nvme_destroy_io_cq failed!\n");
536 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
538 struct nvme_namespace *ns;
541 for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) {
543 nvme_ns_construct(ns, i+1, ctrlr);
550 is_log_page_id_valid(uint8_t page_id)
555 case NVME_LOG_HEALTH_INFORMATION:
556 case NVME_LOG_FIRMWARE_SLOT:
557 case NVME_LOG_CHANGED_NAMESPACE:
558 case NVME_LOG_COMMAND_EFFECT:
559 case NVME_LOG_RES_NOTIFICATION:
560 case NVME_LOG_SANITIZE_STATUS:
568 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
570 uint32_t log_page_size;
575 sizeof(struct nvme_error_information_entry) *
576 (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE);
578 case NVME_LOG_HEALTH_INFORMATION:
579 log_page_size = sizeof(struct nvme_health_information_page);
581 case NVME_LOG_FIRMWARE_SLOT:
582 log_page_size = sizeof(struct nvme_firmware_page);
584 case NVME_LOG_CHANGED_NAMESPACE:
585 log_page_size = sizeof(struct nvme_ns_list);
587 case NVME_LOG_COMMAND_EFFECT:
588 log_page_size = sizeof(struct nvme_command_effects_page);
590 case NVME_LOG_RES_NOTIFICATION:
591 log_page_size = sizeof(struct nvme_res_notification_page);
593 case NVME_LOG_SANITIZE_STATUS:
594 log_page_size = sizeof(struct nvme_sanitize_status_page);
601 return (log_page_size);
605 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
609 if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE)
610 nvme_printf(ctrlr, "available spare space below threshold\n");
612 if (state & NVME_CRIT_WARN_ST_TEMPERATURE)
613 nvme_printf(ctrlr, "temperature above threshold\n");
615 if (state & NVME_CRIT_WARN_ST_DEVICE_RELIABILITY)
616 nvme_printf(ctrlr, "device reliability degraded\n");
618 if (state & NVME_CRIT_WARN_ST_READ_ONLY)
619 nvme_printf(ctrlr, "media placed in read only mode\n");
621 if (state & NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP)
622 nvme_printf(ctrlr, "volatile memory backup device failed\n");
624 if (state & NVME_CRIT_WARN_ST_RESERVED_MASK)
626 "unknown critical warning(s): state = 0x%02x\n", state);
630 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
632 struct nvme_async_event_request *aer = arg;
633 struct nvme_health_information_page *health_info;
634 struct nvme_ns_list *nsl;
635 struct nvme_error_information_entry *err;
639 * If the log page fetch for some reason completed with an error,
640 * don't pass log page data to the consumers. In practice, this case
641 * should never happen.
643 if (nvme_completion_is_error(cpl))
644 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
645 aer->log_page_id, NULL, 0);
647 /* Convert data to host endian */
648 switch (aer->log_page_id) {
650 err = (struct nvme_error_information_entry *)aer->log_page_buffer;
651 for (i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++)
652 nvme_error_information_entry_swapbytes(err++);
654 case NVME_LOG_HEALTH_INFORMATION:
655 nvme_health_information_page_swapbytes(
656 (struct nvme_health_information_page *)aer->log_page_buffer);
658 case NVME_LOG_FIRMWARE_SLOT:
659 nvme_firmware_page_swapbytes(
660 (struct nvme_firmware_page *)aer->log_page_buffer);
662 case NVME_LOG_CHANGED_NAMESPACE:
663 nvme_ns_list_swapbytes(
664 (struct nvme_ns_list *)aer->log_page_buffer);
666 case NVME_LOG_COMMAND_EFFECT:
667 nvme_command_effects_page_swapbytes(
668 (struct nvme_command_effects_page *)aer->log_page_buffer);
670 case NVME_LOG_RES_NOTIFICATION:
671 nvme_res_notification_page_swapbytes(
672 (struct nvme_res_notification_page *)aer->log_page_buffer);
674 case NVME_LOG_SANITIZE_STATUS:
675 nvme_sanitize_status_page_swapbytes(
676 (struct nvme_sanitize_status_page *)aer->log_page_buffer);
678 case INTEL_LOG_TEMP_STATS:
679 intel_log_temp_stats_swapbytes(
680 (struct intel_log_temp_stats *)aer->log_page_buffer);
686 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
687 health_info = (struct nvme_health_information_page *)
688 aer->log_page_buffer;
689 nvme_ctrlr_log_critical_warnings(aer->ctrlr,
690 health_info->critical_warning);
692 * Critical warnings reported through the
693 * SMART/health log page are persistent, so
694 * clear the associated bits in the async event
695 * config so that we do not receive repeated
696 * notifications for the same event.
698 aer->ctrlr->async_event_config &=
699 ~health_info->critical_warning;
700 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
701 aer->ctrlr->async_event_config, NULL, NULL);
702 } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE &&
704 nsl = (struct nvme_ns_list *)aer->log_page_buffer;
705 for (i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) {
706 if (nsl->ns[i] > NVME_MAX_NAMESPACES)
708 nvme_notify_ns(aer->ctrlr, nsl->ns[i]);
714 * Pass the cpl data from the original async event completion,
715 * not the log page fetch.
717 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
718 aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
722 * Repost another asynchronous event request to replace the one
723 * that just completed.
725 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
729 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
731 struct nvme_async_event_request *aer = arg;
733 if (nvme_completion_is_error(cpl)) {
735 * Do not retry failed async event requests. This avoids
736 * infinite loops where a new async event request is submitted
737 * to replace the one just failed, only to fail again and
738 * perpetuate the loop.
743 /* Associated log page is in bits 23:16 of completion entry dw0. */
744 aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
746 nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x,"
747 " page 0x%02x)\n", (cpl->cdw0 & 0x07), (cpl->cdw0 & 0xFF00) >> 8,
750 if (is_log_page_id_valid(aer->log_page_id)) {
751 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
753 memcpy(&aer->cpl, cpl, sizeof(*cpl));
754 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
755 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
756 aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
758 /* Wait to notify consumers until after log page is fetched. */
760 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
764 * Repost another asynchronous event request to replace the one
765 * that just completed.
767 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
772 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
773 struct nvme_async_event_request *aer)
775 struct nvme_request *req;
778 req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
782 * Disable timeout here, since asynchronous event requests should by
783 * nature never be timed out.
785 req->timeout = false;
786 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
787 nvme_ctrlr_submit_admin_request(ctrlr, req);
791 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
793 struct nvme_completion_poll_status status;
794 struct nvme_async_event_request *aer;
797 ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE |
798 NVME_CRIT_WARN_ST_DEVICE_RELIABILITY |
799 NVME_CRIT_WARN_ST_READ_ONLY |
800 NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP;
801 if (ctrlr->cdata.ver >= NVME_REV(1, 2))
802 ctrlr->async_event_config |= 0x300;
805 nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
806 0, NULL, 0, nvme_completion_poll_cb, &status);
807 nvme_completion_poll(&status);
808 if (nvme_completion_is_error(&status.cpl) ||
809 (status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
810 (status.cpl.cdw0 & 0xFFFF) == 0x0000) {
811 nvme_printf(ctrlr, "temperature threshold not supported\n");
813 ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE;
815 nvme_ctrlr_cmd_set_async_event_config(ctrlr,
816 ctrlr->async_event_config, NULL, NULL);
818 /* aerl is a zero-based value, so we need to add 1 here. */
819 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
821 for (i = 0; i < ctrlr->num_aers; i++) {
822 aer = &ctrlr->aer[i];
823 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
828 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
831 ctrlr->int_coal_time = 0;
832 TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
833 &ctrlr->int_coal_time);
835 ctrlr->int_coal_threshold = 0;
836 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
837 &ctrlr->int_coal_threshold);
839 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
840 ctrlr->int_coal_threshold, NULL, NULL);
844 nvme_ctrlr_hmb_free(struct nvme_controller *ctrlr)
846 struct nvme_hmb_chunk *hmbc;
849 if (ctrlr->hmb_desc_paddr) {
850 bus_dmamap_unload(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map);
851 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr,
852 ctrlr->hmb_desc_map);
853 ctrlr->hmb_desc_paddr = 0;
855 if (ctrlr->hmb_desc_tag) {
856 bus_dma_tag_destroy(ctrlr->hmb_desc_tag);
857 ctrlr->hmb_desc_tag = NULL;
859 for (i = 0; i < ctrlr->hmb_nchunks; i++) {
860 hmbc = &ctrlr->hmb_chunks[i];
861 bus_dmamap_unload(ctrlr->hmb_tag, hmbc->hmbc_map);
862 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr,
865 ctrlr->hmb_nchunks = 0;
866 if (ctrlr->hmb_tag) {
867 bus_dma_tag_destroy(ctrlr->hmb_tag);
868 ctrlr->hmb_tag = NULL;
870 if (ctrlr->hmb_chunks) {
871 free(ctrlr->hmb_chunks, M_NVME);
872 ctrlr->hmb_chunks = NULL;
877 nvme_ctrlr_hmb_alloc(struct nvme_controller *ctrlr)
879 struct nvme_hmb_chunk *hmbc;
880 size_t pref, min, minc, size;
884 /* Limit HMB to 5% of RAM size per device by default. */
885 max = (uint64_t)physmem * PAGE_SIZE / 20;
886 TUNABLE_UINT64_FETCH("hw.nvme.hmb_max", &max);
888 min = (long long unsigned)ctrlr->cdata.hmmin * 4096;
889 if (max == 0 || max < min)
891 pref = MIN((long long unsigned)ctrlr->cdata.hmpre * 4096, max);
892 minc = MAX(ctrlr->cdata.hmminds * 4096, PAGE_SIZE);
893 if (min > 0 && ctrlr->cdata.hmmaxd > 0)
894 minc = MAX(minc, min / ctrlr->cdata.hmmaxd);
895 ctrlr->hmb_chunk = pref;
898 ctrlr->hmb_chunk = roundup2(ctrlr->hmb_chunk, PAGE_SIZE);
899 ctrlr->hmb_nchunks = howmany(pref, ctrlr->hmb_chunk);
900 if (ctrlr->cdata.hmmaxd > 0 && ctrlr->hmb_nchunks > ctrlr->cdata.hmmaxd)
901 ctrlr->hmb_nchunks = ctrlr->cdata.hmmaxd;
902 ctrlr->hmb_chunks = malloc(sizeof(struct nvme_hmb_chunk) *
903 ctrlr->hmb_nchunks, M_NVME, M_WAITOK);
904 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
905 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
906 ctrlr->hmb_chunk, 1, ctrlr->hmb_chunk, 0, NULL, NULL, &ctrlr->hmb_tag);
908 nvme_printf(ctrlr, "HMB tag create failed %d\n", err);
909 nvme_ctrlr_hmb_free(ctrlr);
913 for (i = 0; i < ctrlr->hmb_nchunks; i++) {
914 hmbc = &ctrlr->hmb_chunks[i];
915 if (bus_dmamem_alloc(ctrlr->hmb_tag,
916 (void **)&hmbc->hmbc_vaddr, BUS_DMA_NOWAIT,
918 nvme_printf(ctrlr, "failed to alloc HMB\n");
921 if (bus_dmamap_load(ctrlr->hmb_tag, hmbc->hmbc_map,
922 hmbc->hmbc_vaddr, ctrlr->hmb_chunk, nvme_single_map,
923 &hmbc->hmbc_paddr, BUS_DMA_NOWAIT) != 0) {
924 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr,
926 nvme_printf(ctrlr, "failed to load HMB\n");
929 bus_dmamap_sync(ctrlr->hmb_tag, hmbc->hmbc_map,
930 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
933 if (i < ctrlr->hmb_nchunks && i * ctrlr->hmb_chunk < min &&
934 ctrlr->hmb_chunk / 2 >= minc) {
935 ctrlr->hmb_nchunks = i;
936 nvme_ctrlr_hmb_free(ctrlr);
937 ctrlr->hmb_chunk /= 2;
940 ctrlr->hmb_nchunks = i;
941 if (ctrlr->hmb_nchunks * ctrlr->hmb_chunk < min) {
942 nvme_ctrlr_hmb_free(ctrlr);
946 size = sizeof(struct nvme_hmb_desc) * ctrlr->hmb_nchunks;
947 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
948 16, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
949 size, 1, size, 0, NULL, NULL, &ctrlr->hmb_desc_tag);
951 nvme_printf(ctrlr, "HMB desc tag create failed %d\n", err);
952 nvme_ctrlr_hmb_free(ctrlr);
955 if (bus_dmamem_alloc(ctrlr->hmb_desc_tag,
956 (void **)&ctrlr->hmb_desc_vaddr, BUS_DMA_WAITOK,
957 &ctrlr->hmb_desc_map)) {
958 nvme_printf(ctrlr, "failed to alloc HMB desc\n");
959 nvme_ctrlr_hmb_free(ctrlr);
962 if (bus_dmamap_load(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map,
963 ctrlr->hmb_desc_vaddr, size, nvme_single_map,
964 &ctrlr->hmb_desc_paddr, BUS_DMA_NOWAIT) != 0) {
965 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr,
966 ctrlr->hmb_desc_map);
967 nvme_printf(ctrlr, "failed to load HMB desc\n");
968 nvme_ctrlr_hmb_free(ctrlr);
972 for (i = 0; i < ctrlr->hmb_nchunks; i++) {
973 ctrlr->hmb_desc_vaddr[i].addr =
974 htole64(ctrlr->hmb_chunks[i].hmbc_paddr);
975 ctrlr->hmb_desc_vaddr[i].size = htole32(ctrlr->hmb_chunk / 4096);
977 bus_dmamap_sync(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map,
978 BUS_DMASYNC_PREWRITE);
980 nvme_printf(ctrlr, "Allocated %lluMB host memory buffer\n",
981 (long long unsigned)ctrlr->hmb_nchunks * ctrlr->hmb_chunk
986 nvme_ctrlr_hmb_enable(struct nvme_controller *ctrlr, bool enable, bool memret)
988 struct nvme_completion_poll_status status;
997 nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_HOST_MEMORY_BUFFER, cdw11,
998 ctrlr->hmb_nchunks * ctrlr->hmb_chunk / 4096, ctrlr->hmb_desc_paddr,
999 ctrlr->hmb_desc_paddr >> 32, ctrlr->hmb_nchunks, NULL, 0,
1000 nvme_completion_poll_cb, &status);
1001 nvme_completion_poll(&status);
1002 if (nvme_completion_is_error(&status.cpl))
1003 nvme_printf(ctrlr, "nvme_ctrlr_hmb_enable failed!\n");
1007 nvme_ctrlr_start(void *ctrlr_arg, bool resetting)
1009 struct nvme_controller *ctrlr = ctrlr_arg;
1010 uint32_t old_num_io_queues;
1014 * Only reset adminq here when we are restarting the
1015 * controller after a reset. During initialization,
1016 * we have already submitted admin commands to get
1017 * the number of I/O queues supported, so cannot reset
1018 * the adminq again here.
1021 nvme_qpair_reset(&ctrlr->adminq);
1022 nvme_admin_qpair_enable(&ctrlr->adminq);
1025 if (ctrlr->ioq != NULL) {
1026 for (i = 0; i < ctrlr->num_io_queues; i++)
1027 nvme_qpair_reset(&ctrlr->ioq[i]);
1031 * If it was a reset on initialization command timeout, just
1032 * return here, letting initialization code fail gracefully.
1034 if (resetting && !ctrlr->is_initialized)
1037 if (resetting && nvme_ctrlr_identify(ctrlr) != 0) {
1038 nvme_ctrlr_fail(ctrlr);
1043 * The number of qpairs are determined during controller initialization,
1044 * including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the
1045 * HW limit. We call SET_FEATURES again here so that it gets called
1046 * after any reset for controllers that depend on the driver to
1047 * explicit specify how many queues it will use. This value should
1048 * never change between resets, so panic if somehow that does happen.
1051 old_num_io_queues = ctrlr->num_io_queues;
1052 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
1053 nvme_ctrlr_fail(ctrlr);
1057 if (old_num_io_queues != ctrlr->num_io_queues) {
1058 panic("num_io_queues changed from %u to %u",
1059 old_num_io_queues, ctrlr->num_io_queues);
1063 if (ctrlr->cdata.hmpre > 0 && ctrlr->hmb_nchunks == 0) {
1064 nvme_ctrlr_hmb_alloc(ctrlr);
1065 if (ctrlr->hmb_nchunks > 0)
1066 nvme_ctrlr_hmb_enable(ctrlr, true, false);
1067 } else if (ctrlr->hmb_nchunks > 0)
1068 nvme_ctrlr_hmb_enable(ctrlr, true, true);
1070 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
1071 nvme_ctrlr_fail(ctrlr);
1075 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
1076 nvme_ctrlr_fail(ctrlr);
1080 nvme_ctrlr_configure_aer(ctrlr);
1081 nvme_ctrlr_configure_int_coalescing(ctrlr);
1083 for (i = 0; i < ctrlr->num_io_queues; i++)
1084 nvme_io_qpair_enable(&ctrlr->ioq[i]);
1088 nvme_ctrlr_start_config_hook(void *arg)
1090 struct nvme_controller *ctrlr = arg;
1093 * Reset controller twice to ensure we do a transition from cc.en==1 to
1094 * cc.en==0. This is because we don't really know what status the
1095 * controller was left in when boot handed off to OS. Linux doesn't do
1096 * this, however. If we adopt that policy, see also nvme_ctrlr_resume().
1098 if (nvme_ctrlr_hw_reset(ctrlr) != 0) {
1100 nvme_ctrlr_fail(ctrlr);
1101 config_intrhook_disestablish(&ctrlr->config_hook);
1102 ctrlr->config_hook.ich_arg = NULL;
1106 if (nvme_ctrlr_hw_reset(ctrlr) != 0)
1109 nvme_qpair_reset(&ctrlr->adminq);
1110 nvme_admin_qpair_enable(&ctrlr->adminq);
1112 if (nvme_ctrlr_identify(ctrlr) == 0 &&
1113 nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
1114 nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
1115 nvme_ctrlr_start(ctrlr, false);
1119 nvme_sysctl_initialize_ctrlr(ctrlr);
1120 config_intrhook_disestablish(&ctrlr->config_hook);
1121 ctrlr->config_hook.ich_arg = NULL;
1123 ctrlr->is_initialized = 1;
1124 nvme_notify_new_controller(ctrlr);
1128 nvme_ctrlr_reset_task(void *arg, int pending)
1130 struct nvme_controller *ctrlr = arg;
1133 nvme_printf(ctrlr, "resetting controller\n");
1134 status = nvme_ctrlr_hw_reset(ctrlr);
1136 * Use pause instead of DELAY, so that we yield to any nvme interrupt
1137 * handlers on this CPU that were blocked on a qpair lock. We want
1138 * all nvme interrupts completed before proceeding with restarting the
1141 * XXX - any way to guarantee the interrupt handlers have quiesced?
1143 pause("nvmereset", hz / 10);
1145 nvme_ctrlr_start(ctrlr, true);
1147 nvme_ctrlr_fail(ctrlr);
1149 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
1153 * Poll all the queues enabled on the device for completion.
1156 nvme_ctrlr_poll(struct nvme_controller *ctrlr)
1160 nvme_qpair_process_completions(&ctrlr->adminq);
1162 for (i = 0; i < ctrlr->num_io_queues; i++)
1163 if (ctrlr->ioq && ctrlr->ioq[i].cpl)
1164 nvme_qpair_process_completions(&ctrlr->ioq[i]);
1168 * Poll the single-vector interrupt case: num_io_queues will be 1 and
1169 * there's only a single vector. While we're polling, we mask further
1170 * interrupts in the controller.
1173 nvme_ctrlr_intx_handler(void *arg)
1175 struct nvme_controller *ctrlr = arg;
1177 nvme_mmio_write_4(ctrlr, intms, 1);
1178 nvme_ctrlr_poll(ctrlr);
1179 nvme_mmio_write_4(ctrlr, intmc, 1);
1183 nvme_pt_done(void *arg, const struct nvme_completion *cpl)
1185 struct nvme_pt_command *pt = arg;
1186 struct mtx *mtx = pt->driver_lock;
1189 bzero(&pt->cpl, sizeof(pt->cpl));
1190 pt->cpl.cdw0 = cpl->cdw0;
1192 status = cpl->status;
1193 status &= ~NVME_STATUS_P_MASK;
1194 pt->cpl.status = status;
1197 pt->driver_lock = NULL;
1203 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
1204 struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
1207 struct nvme_request *req;
1209 struct buf *buf = NULL;
1211 vm_offset_t addr, end;
1215 * vmapbuf calls vm_fault_quick_hold_pages which only maps full
1216 * pages. Ensure this request has fewer than MAXPHYS bytes when
1217 * extended to full pages.
1219 addr = (vm_offset_t)pt->buf;
1220 end = round_page(addr + pt->len);
1221 addr = trunc_page(addr);
1222 if (end - addr > MAXPHYS)
1225 if (pt->len > ctrlr->max_xfer_size) {
1226 nvme_printf(ctrlr, "pt->len (%d) "
1227 "exceeds max_xfer_size (%d)\n", pt->len,
1228 ctrlr->max_xfer_size);
1231 if (is_user_buffer) {
1233 * Ensure the user buffer is wired for the duration of
1234 * this pass-through command.
1237 buf = getpbuf(NULL);
1238 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
1239 if (vmapbuf(buf, pt->buf, pt->len, 1) < 0) {
1243 req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
1246 req = nvme_allocate_request_vaddr(pt->buf, pt->len,
1249 req = nvme_allocate_request_null(nvme_pt_done, pt);
1251 /* Assume user space already converted to little-endian */
1252 req->cmd.opc = pt->cmd.opc;
1253 req->cmd.fuse = pt->cmd.fuse;
1254 req->cmd.rsvd2 = pt->cmd.rsvd2;
1255 req->cmd.rsvd3 = pt->cmd.rsvd3;
1256 req->cmd.cdw10 = pt->cmd.cdw10;
1257 req->cmd.cdw11 = pt->cmd.cdw11;
1258 req->cmd.cdw12 = pt->cmd.cdw12;
1259 req->cmd.cdw13 = pt->cmd.cdw13;
1260 req->cmd.cdw14 = pt->cmd.cdw14;
1261 req->cmd.cdw15 = pt->cmd.cdw15;
1263 req->cmd.nsid = htole32(nsid);
1265 mtx = mtx_pool_find(mtxpool_sleep, pt);
1266 pt->driver_lock = mtx;
1269 nvme_ctrlr_submit_admin_request(ctrlr, req);
1271 nvme_ctrlr_submit_io_request(ctrlr, req);
1274 while (pt->driver_lock != NULL)
1275 mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
1288 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
1291 struct nvme_controller *ctrlr;
1292 struct nvme_pt_command *pt;
1294 ctrlr = cdev->si_drv1;
1297 case NVME_RESET_CONTROLLER:
1298 nvme_ctrlr_reset(ctrlr);
1300 case NVME_PASSTHROUGH_CMD:
1301 pt = (struct nvme_pt_command *)arg;
1302 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid),
1303 1 /* is_user_buffer */, 1 /* is_admin_cmd */));
1306 struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)arg;
1307 strncpy(gnsid->cdev, device_get_nameunit(ctrlr->dev),
1308 sizeof(gnsid->cdev));
1309 gnsid->cdev[sizeof(gnsid->cdev) - 1] = '\0';
1313 case NVME_GET_MAX_XFER_SIZE:
1314 *(uint64_t *)arg = ctrlr->max_xfer_size;
1323 static struct cdevsw nvme_ctrlr_cdevsw = {
1324 .d_version = D_VERSION,
1326 .d_ioctl = nvme_ctrlr_ioctl
1330 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1332 struct make_dev_args md_args;
1335 uint32_t to, vs, pmrcap;
1337 int status, timeout_period;
1341 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1342 if (bus_get_domain(dev, &ctrlr->domain) != 0)
1345 cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
1347 device_printf(dev, "CapLo: 0x%08x: MQES %u%s%s%s%s, TO %u\n",
1348 cap_lo, NVME_CAP_LO_MQES(cap_lo),
1349 NVME_CAP_LO_CQR(cap_lo) ? ", CQR" : "",
1350 NVME_CAP_LO_AMS(cap_lo) ? ", AMS" : "",
1351 (NVME_CAP_LO_AMS(cap_lo) & 0x1) ? " WRRwUPC" : "",
1352 (NVME_CAP_LO_AMS(cap_lo) & 0x2) ? " VS" : "",
1353 NVME_CAP_LO_TO(cap_lo));
1355 cap_hi = nvme_mmio_read_4(ctrlr, cap_hi);
1357 device_printf(dev, "CapHi: 0x%08x: DSTRD %u%s, CSS %x%s, "
1358 "MPSMIN %u, MPSMAX %u%s%s\n", cap_hi,
1359 NVME_CAP_HI_DSTRD(cap_hi),
1360 NVME_CAP_HI_NSSRS(cap_hi) ? ", NSSRS" : "",
1361 NVME_CAP_HI_CSS(cap_hi),
1362 NVME_CAP_HI_BPS(cap_hi) ? ", BPS" : "",
1363 NVME_CAP_HI_MPSMIN(cap_hi),
1364 NVME_CAP_HI_MPSMAX(cap_hi),
1365 NVME_CAP_HI_PMRS(cap_hi) ? ", PMRS" : "",
1366 NVME_CAP_HI_CMBS(cap_hi) ? ", CMBS" : "");
1369 vs = nvme_mmio_read_4(ctrlr, vs);
1370 device_printf(dev, "Version: 0x%08x: %d.%d\n", vs,
1371 NVME_MAJOR(vs), NVME_MINOR(vs));
1373 if (bootverbose && NVME_CAP_HI_PMRS(cap_hi)) {
1374 pmrcap = nvme_mmio_read_4(ctrlr, pmrcap);
1375 device_printf(dev, "PMRCap: 0x%08x: BIR %u%s%s, PMRTU %u, "
1376 "PMRWBM %x, PMRTO %u%s\n", pmrcap,
1377 NVME_PMRCAP_BIR(pmrcap),
1378 NVME_PMRCAP_RDS(pmrcap) ? ", RDS" : "",
1379 NVME_PMRCAP_WDS(pmrcap) ? ", WDS" : "",
1380 NVME_PMRCAP_PMRTU(pmrcap),
1381 NVME_PMRCAP_PMRWBM(pmrcap),
1382 NVME_PMRCAP_PMRTO(pmrcap),
1383 NVME_PMRCAP_CMSS(pmrcap) ? ", CMSS" : "");
1386 ctrlr->dstrd = NVME_CAP_HI_DSTRD(cap_hi) + 2;
1388 mpsmin = NVME_CAP_HI_MPSMIN(cap_hi);
1389 ctrlr->min_page_size = 1 << (12 + mpsmin);
1391 /* Get ready timeout value from controller, in units of 500ms. */
1392 to = NVME_CAP_LO_TO(cap_lo) + 1;
1393 ctrlr->ready_timeout_in_ms = to * 500;
1395 timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
1396 TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
1397 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1398 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1399 ctrlr->timeout_period = timeout_period;
1401 nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
1402 TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
1404 ctrlr->enable_aborts = 0;
1405 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1407 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
1408 if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0)
1411 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1412 taskqueue_thread_enqueue, &ctrlr->taskqueue);
1413 taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq");
1415 ctrlr->is_resetting = 0;
1416 ctrlr->is_initialized = 0;
1417 ctrlr->notification_sent = 0;
1418 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1419 TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
1420 STAILQ_INIT(&ctrlr->fail_req);
1421 ctrlr->is_failed = false;
1423 make_dev_args_init(&md_args);
1424 md_args.mda_devsw = &nvme_ctrlr_cdevsw;
1425 md_args.mda_uid = UID_ROOT;
1426 md_args.mda_gid = GID_WHEEL;
1427 md_args.mda_mode = 0600;
1428 md_args.mda_unit = device_get_unit(dev);
1429 md_args.mda_si_drv1 = (void *)ctrlr;
1430 status = make_dev_s(&md_args, &ctrlr->cdev, "nvme%d",
1431 device_get_unit(dev));
1439 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1443 if (ctrlr->resource == NULL)
1447 * Check whether it is a hot unplug or a clean driver detach.
1448 * If device is not there any more, skip any shutdown commands.
1450 gone = (nvme_mmio_read_4(ctrlr, csts) == 0xffffffff);
1452 nvme_ctrlr_fail(ctrlr);
1454 nvme_notify_fail_consumers(ctrlr);
1456 for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1457 nvme_ns_destruct(&ctrlr->ns[i]);
1460 destroy_dev(ctrlr->cdev);
1462 if (ctrlr->is_initialized) {
1464 if (ctrlr->hmb_nchunks > 0)
1465 nvme_ctrlr_hmb_enable(ctrlr, false, false);
1466 nvme_ctrlr_delete_qpairs(ctrlr);
1468 nvme_ctrlr_hmb_free(ctrlr);
1470 if (ctrlr->ioq != NULL) {
1471 for (i = 0; i < ctrlr->num_io_queues; i++)
1472 nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1473 free(ctrlr->ioq, M_NVME);
1475 nvme_admin_qpair_destroy(&ctrlr->adminq);
1478 * Notify the controller of a shutdown, even though this is due to
1479 * a driver unload, not a system shutdown (this path is not invoked
1480 * during shutdown). This ensures the controller receives a
1481 * shutdown notification in case the system is shutdown before
1482 * reloading the driver.
1485 nvme_ctrlr_shutdown(ctrlr);
1488 nvme_ctrlr_disable(ctrlr);
1490 if (ctrlr->taskqueue)
1491 taskqueue_free(ctrlr->taskqueue);
1494 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1497 bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1498 rman_get_rid(ctrlr->res), ctrlr->res);
1500 if (ctrlr->bar4_resource != NULL) {
1501 bus_release_resource(dev, SYS_RES_MEMORY,
1502 ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1505 bus_release_resource(dev, SYS_RES_MEMORY,
1506 ctrlr->resource_id, ctrlr->resource);
1509 mtx_destroy(&ctrlr->lock);
1513 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
1517 int ticks = 0, timeout;
1519 cc = nvme_mmio_read_4(ctrlr, cc);
1520 cc &= ~(NVME_CC_REG_SHN_MASK << NVME_CC_REG_SHN_SHIFT);
1521 cc |= NVME_SHN_NORMAL << NVME_CC_REG_SHN_SHIFT;
1522 nvme_mmio_write_4(ctrlr, cc, cc);
1524 timeout = ctrlr->cdata.rtd3e == 0 ? 5 * hz :
1525 ((uint64_t)ctrlr->cdata.rtd3e * hz + 999999) / 1000000;
1527 csts = nvme_mmio_read_4(ctrlr, csts);
1528 if (csts == 0xffffffff) /* Hot unplug. */
1530 if (NVME_CSTS_GET_SHST(csts) == NVME_SHST_COMPLETE)
1532 if (ticks++ > timeout) {
1533 nvme_printf(ctrlr, "did not complete shutdown within"
1534 " %d ticks of notification\n", timeout);
1537 pause("nvme shn", 1);
1542 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1543 struct nvme_request *req)
1546 nvme_qpair_submit_request(&ctrlr->adminq, req);
1550 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1551 struct nvme_request *req)
1553 struct nvme_qpair *qpair;
1555 qpair = &ctrlr->ioq[QP(ctrlr, curcpu)];
1556 nvme_qpair_submit_request(qpair, req);
1560 nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1563 return (ctrlr->dev);
1566 const struct nvme_controller_data *
1567 nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1570 return (&ctrlr->cdata);
1574 nvme_ctrlr_suspend(struct nvme_controller *ctrlr)
1579 * Can't touch failed controllers, so it's already suspended.
1581 if (ctrlr->is_failed)
1585 * We don't want the reset taskqueue running, since it does similar
1586 * things, so prevent it from running after we start. Wait for any reset
1587 * that may have been started to complete. The reset process we follow
1588 * will ensure that any new I/O will queue and be given to the hardware
1589 * after we resume (though there should be none).
1591 while (atomic_cmpset_32(&ctrlr->is_resetting, 0, 1) == 0 && to-- > 0)
1592 pause("nvmesusp", 1);
1595 "Competing reset task didn't finish. Try again later.\n");
1596 return (EWOULDBLOCK);
1599 if (ctrlr->hmb_nchunks > 0)
1600 nvme_ctrlr_hmb_enable(ctrlr, false, false);
1603 * Per Section 7.6.2 of NVMe spec 1.4, to properly suspend, we need to
1604 * delete the hardware I/O queues, and then shutdown. This properly
1605 * flushes any metadata the drive may have stored so it can survive
1606 * having its power removed and prevents the unsafe shutdown count from
1607 * incriminating. Once we delete the qpairs, we have to disable them
1608 * before shutting down. The delay is out of paranoia in
1609 * nvme_ctrlr_hw_reset, and is repeated here (though we should have no
1610 * pending I/O that the delay copes with).
1612 nvme_ctrlr_delete_qpairs(ctrlr);
1613 nvme_ctrlr_disable_qpairs(ctrlr);
1615 nvme_ctrlr_shutdown(ctrlr);
1621 nvme_ctrlr_resume(struct nvme_controller *ctrlr)
1625 * Can't touch failed controllers, so nothing to do to resume.
1627 if (ctrlr->is_failed)
1631 * Have to reset the hardware twice, just like we do on attach. See
1632 * nmve_attach() for why.
1634 if (nvme_ctrlr_hw_reset(ctrlr) != 0)
1636 if (nvme_ctrlr_hw_reset(ctrlr) != 0)
1640 * Now that we've reset the hardware, we can restart the controller. Any
1641 * I/O that was pending is requeued. Any admin commands are aborted with
1642 * an error. Once we've restarted, take the controller out of reset.
1644 nvme_ctrlr_start(ctrlr, true);
1645 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
1650 * Since we can't bring the controller out of reset, announce and fail
1651 * the controller. However, we have to return success for the resume
1652 * itself, due to questionable APIs.
1654 nvme_printf(ctrlr, "Failed to reset on resume, failing.\n");
1655 nvme_ctrlr_fail(ctrlr);
1656 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);