2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (C) 2012-2016 Intel Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
33 #include <sys/param.h>
34 #include <sys/systm.h>
38 #include <sys/ioccom.h>
43 #include <sys/endian.h>
44 #include <machine/stdarg.h>
47 #include "nvme_private.h"
49 #define B4_CHK_RDY_DELAY_MS 2300 /* work around controller bug */
51 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
52 struct nvme_async_event_request *aer);
55 nvme_ctrlr_barrier(struct nvme_controller *ctrlr, int flags)
57 bus_barrier(ctrlr->resource, 0, rman_get_size(ctrlr->resource), flags);
61 nvme_ctrlr_devctl_log(struct nvme_controller *ctrlr, const char *type, const char *msg, ...)
67 if (sbuf_new(&sb, NULL, 0, SBUF_AUTOEXTEND | SBUF_NOWAIT) == NULL)
69 sbuf_printf(&sb, "%s: ", device_get_nameunit(ctrlr->dev));
71 sbuf_vprintf(&sb, msg, ap);
73 error = sbuf_finish(&sb);
75 printf("%s\n", sbuf_data(&sb));
78 sbuf_printf(&sb, "name=\"%s\" reason=\"", device_get_nameunit(ctrlr->dev));
80 sbuf_vprintf(&sb, msg, ap);
82 sbuf_printf(&sb, "\"");
83 error = sbuf_finish(&sb);
85 devctl_notify("nvme", "controller", type, sbuf_data(&sb));
90 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
92 struct nvme_qpair *qpair;
96 qpair = &ctrlr->adminq;
98 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1;
99 qpair->domain = ctrlr->domain;
101 num_entries = NVME_ADMIN_ENTRIES;
102 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
104 * If admin_entries was overridden to an invalid value, revert it
105 * back to our default value.
107 if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
108 num_entries > NVME_MAX_ADMIN_ENTRIES) {
109 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
110 "specified\n", num_entries);
111 num_entries = NVME_ADMIN_ENTRIES;
115 * The admin queue's max xfer size is treated differently than the
116 * max I/O xfer size. 16KB is sufficient here - maybe even less?
118 error = nvme_qpair_construct(qpair, num_entries, NVME_ADMIN_TRACKERS,
123 #define QP(ctrlr, c) ((c) * (ctrlr)->num_io_queues / mp_ncpus)
126 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
128 struct nvme_qpair *qpair;
132 int num_entries, num_trackers, max_entries;
135 * NVMe spec sets a hard limit of 64K max entries, but devices may
136 * specify a smaller limit, so we need to check the MQES field in the
137 * capabilities register. We have to cap the number of entries to the
138 * current stride allows for in BAR 0/1, otherwise the remainder entries
139 * are inaccessible. MQES should reflect this, and this is just a
143 (rman_get_size(ctrlr->resource) - nvme_mmio_offsetof(doorbell[0])) /
144 (1 << (ctrlr->dstrd + 1));
145 num_entries = NVME_IO_ENTRIES;
146 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
147 cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
148 mqes = NVME_CAP_LO_MQES(cap_lo);
149 num_entries = min(num_entries, mqes + 1);
150 num_entries = min(num_entries, max_entries);
152 num_trackers = NVME_IO_TRACKERS;
153 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
155 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
156 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
158 * No need to have more trackers than entries in the submit queue. Note
159 * also that for a queue size of N, we can only have (N-1) commands
160 * outstanding, hence the "-1" here.
162 num_trackers = min(num_trackers, (num_entries-1));
165 * Our best estimate for the maximum number of I/Os that we should
166 * normally have in flight at one time. This should be viewed as a hint,
167 * not a hard limit and will need to be revisited when the upper layers
168 * of the storage system grows multi-queue support.
170 ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4;
172 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
173 M_NVME, M_ZERO | M_WAITOK);
175 for (i = c = n = 0; i < ctrlr->num_io_queues; i++, c += n) {
176 qpair = &ctrlr->ioq[i];
179 * Admin queue has ID=0. IO queues start at ID=1 -
180 * hence the 'i+1' here.
183 if (ctrlr->num_io_queues > 1) {
184 /* Find number of CPUs served by this queue. */
185 for (n = 1; QP(ctrlr, c + n) == i; n++)
187 /* Shuffle multiple NVMe devices between CPUs. */
188 qpair->cpu = c + (device_get_unit(ctrlr->dev)+n/2) % n;
189 qpair->domain = pcpu_find(qpair->cpu)->pc_domain;
191 qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1;
192 qpair->domain = ctrlr->domain;
196 * For I/O queues, use the controller-wide max_xfer_size
197 * calculated in nvme_attach().
199 error = nvme_qpair_construct(qpair, num_entries, num_trackers,
205 * Do not bother binding interrupts if we only have one I/O
206 * interrupt thread for this controller.
208 if (ctrlr->num_io_queues > 1)
209 bus_bind_intr(ctrlr->dev, qpair->res, qpair->cpu);
216 nvme_ctrlr_fail(struct nvme_controller *ctrlr)
220 ctrlr->is_failed = true;
221 nvme_admin_qpair_disable(&ctrlr->adminq);
222 nvme_qpair_fail(&ctrlr->adminq);
223 if (ctrlr->ioq != NULL) {
224 for (i = 0; i < ctrlr->num_io_queues; i++) {
225 nvme_io_qpair_disable(&ctrlr->ioq[i]);
226 nvme_qpair_fail(&ctrlr->ioq[i]);
229 nvme_notify_fail_consumers(ctrlr);
233 nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
234 struct nvme_request *req)
237 mtx_lock(&ctrlr->lock);
238 STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
239 mtx_unlock(&ctrlr->lock);
240 if (!ctrlr->is_dying)
241 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
245 nvme_ctrlr_fail_req_task(void *arg, int pending)
247 struct nvme_controller *ctrlr = arg;
248 struct nvme_request *req;
250 mtx_lock(&ctrlr->lock);
251 while ((req = STAILQ_FIRST(&ctrlr->fail_req)) != NULL) {
252 STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
253 mtx_unlock(&ctrlr->lock);
254 nvme_qpair_manual_complete_request(req->qpair, req,
255 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST);
256 mtx_lock(&ctrlr->lock);
258 mtx_unlock(&ctrlr->lock);
262 * Wait for RDY to change.
264 * Starts sleeping for 1us and geometrically increases it the longer we wait,
268 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
270 int timeout = ticks + MSEC_2_TICKS(ctrlr->ready_timeout_in_ms);
271 sbintime_t delta_t = SBT_1US;
275 csts = nvme_mmio_read_4(ctrlr, csts);
276 if (csts == NVME_GONE) /* Hot unplug. */
278 if (((csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK)
281 if (timeout - ticks < 0) {
282 nvme_printf(ctrlr, "controller ready did not become %d "
283 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
287 pause_sbt("nvmerdy", delta_t, 0, C_PREL(1));
288 delta_t = min(SBT_1MS, delta_t * 3 / 2);
295 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
302 cc = nvme_mmio_read_4(ctrlr, cc);
303 csts = nvme_mmio_read_4(ctrlr, csts);
305 en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
306 rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
309 * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1
310 * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when
311 * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY
312 * isn't the desired value. Short circuit if we're already disabled.
315 /* Wait for RDY == 0 or timeout & fail */
318 return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
321 /* EN == 1, wait for RDY == 1 or timeout & fail */
322 err = nvme_ctrlr_wait_for_ready(ctrlr, 1);
327 cc &= ~NVME_CC_REG_EN_MASK;
328 nvme_mmio_write_4(ctrlr, cc, cc);
331 * A few drives have firmware bugs that freeze the drive if we access
332 * the mmio too soon after we disable.
334 if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY)
335 pause("nvmeR", MSEC_2_TICKS(B4_CHK_RDY_DELAY_MS));
336 return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
340 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
349 cc = nvme_mmio_read_4(ctrlr, cc);
350 csts = nvme_mmio_read_4(ctrlr, csts);
352 en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
353 rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
356 * See note in nvme_ctrlr_disable. Short circuit if we're already enabled.
361 return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
364 /* EN == 0 already wait for RDY == 0 or timeout & fail */
365 err = nvme_ctrlr_wait_for_ready(ctrlr, 0);
369 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
370 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
372 /* acqs and asqs are 0-based. */
373 qsize = ctrlr->adminq.num_entries - 1;
376 aqa = (qsize & NVME_AQA_REG_ACQS_MASK) << NVME_AQA_REG_ACQS_SHIFT;
377 aqa |= (qsize & NVME_AQA_REG_ASQS_MASK) << NVME_AQA_REG_ASQS_SHIFT;
378 nvme_mmio_write_4(ctrlr, aqa, aqa);
380 /* Initialization values for CC */
382 cc |= 1 << NVME_CC_REG_EN_SHIFT;
383 cc |= 0 << NVME_CC_REG_CSS_SHIFT;
384 cc |= 0 << NVME_CC_REG_AMS_SHIFT;
385 cc |= 0 << NVME_CC_REG_SHN_SHIFT;
386 cc |= 6 << NVME_CC_REG_IOSQES_SHIFT; /* SQ entry size == 64 == 2^6 */
387 cc |= 4 << NVME_CC_REG_IOCQES_SHIFT; /* CQ entry size == 16 == 2^4 */
390 * Use the Memory Page Size selected during device initialization. Note
391 * that value stored in mps is suitable to use here without adjusting by
394 cc |= ctrlr->mps << NVME_CC_REG_MPS_SHIFT;
396 nvme_ctrlr_barrier(ctrlr, BUS_SPACE_BARRIER_WRITE);
397 nvme_mmio_write_4(ctrlr, cc, cc);
399 return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
403 nvme_ctrlr_disable_qpairs(struct nvme_controller *ctrlr)
407 nvme_admin_qpair_disable(&ctrlr->adminq);
409 * I/O queues are not allocated before the initial HW
410 * reset, so do not try to disable them. Use is_initialized
411 * to determine if this is the initial HW reset.
413 if (ctrlr->is_initialized) {
414 for (i = 0; i < ctrlr->num_io_queues; i++)
415 nvme_io_qpair_disable(&ctrlr->ioq[i]);
420 nvme_pre_reset(struct nvme_controller *ctrlr)
423 * Make sure that all the ISRs are done before proceeding with the reset
424 * (and also keep any stray interrupts that happen during this process
425 * from racing this process). For startup, this is a nop, since the
426 * hardware is in a good state. But for recovery, where we randomly
427 * reset the hardware, this ensure that we're not racing the ISRs.
429 mtx_lock(&ctrlr->adminq.recovery);
430 for (int i = 0; i < ctrlr->num_io_queues; i++) {
431 mtx_lock(&ctrlr->ioq[i].recovery);
436 nvme_post_reset(struct nvme_controller *ctrlr)
439 * Reset complete, unblock ISRs
441 mtx_unlock(&ctrlr->adminq.recovery);
442 for (int i = 0; i < ctrlr->num_io_queues; i++) {
443 mtx_unlock(&ctrlr->ioq[i].recovery);
448 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
454 nvme_ctrlr_disable_qpairs(ctrlr);
456 err = nvme_ctrlr_disable(ctrlr);
460 err = nvme_ctrlr_enable(ctrlr);
468 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
472 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
474 if (cmpset == 0 || ctrlr->is_failed)
476 * Controller is already resetting or has failed. Return
477 * immediately since there is no need to kick off another
478 * reset in these cases.
482 if (!ctrlr->is_dying)
483 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
487 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
489 struct nvme_completion_poll_status status;
492 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
493 nvme_completion_poll_cb, &status);
494 nvme_completion_poll(&status);
495 if (nvme_completion_is_error(&status.cpl)) {
496 nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
500 /* Convert data to host endian */
501 nvme_controller_data_swapbytes(&ctrlr->cdata);
504 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
505 * controller supports.
507 if (ctrlr->cdata.mdts > 0)
508 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
509 1 << (ctrlr->cdata.mdts + NVME_MPS_SHIFT +
510 NVME_CAP_HI_MPSMIN(ctrlr->cap_hi)));
516 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
518 struct nvme_completion_poll_status status;
519 int cq_allocated, sq_allocated;
522 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
523 nvme_completion_poll_cb, &status);
524 nvme_completion_poll(&status);
525 if (nvme_completion_is_error(&status.cpl)) {
526 nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n");
531 * Data in cdw0 is 0-based.
532 * Lower 16-bits indicate number of submission queues allocated.
533 * Upper 16-bits indicate number of completion queues allocated.
535 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
536 cq_allocated = (status.cpl.cdw0 >> 16) + 1;
539 * Controller may allocate more queues than we requested,
540 * so use the minimum of the number requested and what was
541 * actually allocated.
543 ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
544 ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
545 if (ctrlr->num_io_queues > vm_ndomains)
546 ctrlr->num_io_queues -= ctrlr->num_io_queues % vm_ndomains;
552 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
554 struct nvme_completion_poll_status status;
555 struct nvme_qpair *qpair;
558 for (i = 0; i < ctrlr->num_io_queues; i++) {
559 qpair = &ctrlr->ioq[i];
562 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair,
563 nvme_completion_poll_cb, &status);
564 nvme_completion_poll(&status);
565 if (nvme_completion_is_error(&status.cpl)) {
566 nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
571 nvme_ctrlr_cmd_create_io_sq(ctrlr, qpair,
572 nvme_completion_poll_cb, &status);
573 nvme_completion_poll(&status);
574 if (nvme_completion_is_error(&status.cpl)) {
575 nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
584 nvme_ctrlr_delete_qpairs(struct nvme_controller *ctrlr)
586 struct nvme_completion_poll_status status;
587 struct nvme_qpair *qpair;
589 for (int i = 0; i < ctrlr->num_io_queues; i++) {
590 qpair = &ctrlr->ioq[i];
593 nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair,
594 nvme_completion_poll_cb, &status);
595 nvme_completion_poll(&status);
596 if (nvme_completion_is_error(&status.cpl)) {
597 nvme_printf(ctrlr, "nvme_destroy_io_sq failed!\n");
602 nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair,
603 nvme_completion_poll_cb, &status);
604 nvme_completion_poll(&status);
605 if (nvme_completion_is_error(&status.cpl)) {
606 nvme_printf(ctrlr, "nvme_destroy_io_cq failed!\n");
615 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
617 struct nvme_namespace *ns;
620 for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) {
622 nvme_ns_construct(ns, i+1, ctrlr);
629 is_log_page_id_valid(uint8_t page_id)
634 case NVME_LOG_HEALTH_INFORMATION:
635 case NVME_LOG_FIRMWARE_SLOT:
636 case NVME_LOG_CHANGED_NAMESPACE:
637 case NVME_LOG_COMMAND_EFFECT:
638 case NVME_LOG_RES_NOTIFICATION:
639 case NVME_LOG_SANITIZE_STATUS:
647 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
649 uint32_t log_page_size;
654 sizeof(struct nvme_error_information_entry) *
655 (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE);
657 case NVME_LOG_HEALTH_INFORMATION:
658 log_page_size = sizeof(struct nvme_health_information_page);
660 case NVME_LOG_FIRMWARE_SLOT:
661 log_page_size = sizeof(struct nvme_firmware_page);
663 case NVME_LOG_CHANGED_NAMESPACE:
664 log_page_size = sizeof(struct nvme_ns_list);
666 case NVME_LOG_COMMAND_EFFECT:
667 log_page_size = sizeof(struct nvme_command_effects_page);
669 case NVME_LOG_RES_NOTIFICATION:
670 log_page_size = sizeof(struct nvme_res_notification_page);
672 case NVME_LOG_SANITIZE_STATUS:
673 log_page_size = sizeof(struct nvme_sanitize_status_page);
680 return (log_page_size);
684 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
688 if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE)
689 nvme_ctrlr_devctl_log(ctrlr, "critical",
690 "available spare space below threshold");
692 if (state & NVME_CRIT_WARN_ST_TEMPERATURE)
693 nvme_ctrlr_devctl_log(ctrlr, "critical",
694 "temperature above threshold");
696 if (state & NVME_CRIT_WARN_ST_DEVICE_RELIABILITY)
697 nvme_ctrlr_devctl_log(ctrlr, "critical",
698 "device reliability degraded");
700 if (state & NVME_CRIT_WARN_ST_READ_ONLY)
701 nvme_ctrlr_devctl_log(ctrlr, "critical",
702 "media placed in read only mode");
704 if (state & NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP)
705 nvme_ctrlr_devctl_log(ctrlr, "critical",
706 "volatile memory backup device failed");
708 if (state & NVME_CRIT_WARN_ST_RESERVED_MASK)
709 nvme_ctrlr_devctl_log(ctrlr, "critical",
710 "unknown critical warning(s): state = 0x%02x", state);
714 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
716 struct nvme_async_event_request *aer = arg;
717 struct nvme_health_information_page *health_info;
718 struct nvme_ns_list *nsl;
719 struct nvme_error_information_entry *err;
723 * If the log page fetch for some reason completed with an error,
724 * don't pass log page data to the consumers. In practice, this case
725 * should never happen.
727 if (nvme_completion_is_error(cpl))
728 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
729 aer->log_page_id, NULL, 0);
731 /* Convert data to host endian */
732 switch (aer->log_page_id) {
734 err = (struct nvme_error_information_entry *)aer->log_page_buffer;
735 for (i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++)
736 nvme_error_information_entry_swapbytes(err++);
738 case NVME_LOG_HEALTH_INFORMATION:
739 nvme_health_information_page_swapbytes(
740 (struct nvme_health_information_page *)aer->log_page_buffer);
742 case NVME_LOG_FIRMWARE_SLOT:
743 nvme_firmware_page_swapbytes(
744 (struct nvme_firmware_page *)aer->log_page_buffer);
746 case NVME_LOG_CHANGED_NAMESPACE:
747 nvme_ns_list_swapbytes(
748 (struct nvme_ns_list *)aer->log_page_buffer);
750 case NVME_LOG_COMMAND_EFFECT:
751 nvme_command_effects_page_swapbytes(
752 (struct nvme_command_effects_page *)aer->log_page_buffer);
754 case NVME_LOG_RES_NOTIFICATION:
755 nvme_res_notification_page_swapbytes(
756 (struct nvme_res_notification_page *)aer->log_page_buffer);
758 case NVME_LOG_SANITIZE_STATUS:
759 nvme_sanitize_status_page_swapbytes(
760 (struct nvme_sanitize_status_page *)aer->log_page_buffer);
762 case INTEL_LOG_TEMP_STATS:
763 intel_log_temp_stats_swapbytes(
764 (struct intel_log_temp_stats *)aer->log_page_buffer);
770 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
771 health_info = (struct nvme_health_information_page *)
772 aer->log_page_buffer;
773 nvme_ctrlr_log_critical_warnings(aer->ctrlr,
774 health_info->critical_warning);
776 * Critical warnings reported through the
777 * SMART/health log page are persistent, so
778 * clear the associated bits in the async event
779 * config so that we do not receive repeated
780 * notifications for the same event.
782 aer->ctrlr->async_event_config &=
783 ~health_info->critical_warning;
784 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
785 aer->ctrlr->async_event_config, NULL, NULL);
786 } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE &&
788 nsl = (struct nvme_ns_list *)aer->log_page_buffer;
789 for (i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) {
790 if (nsl->ns[i] > NVME_MAX_NAMESPACES)
792 nvme_notify_ns(aer->ctrlr, nsl->ns[i]);
797 * Pass the cpl data from the original async event completion,
798 * not the log page fetch.
800 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
801 aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
805 * Repost another asynchronous event request to replace the one
806 * that just completed.
808 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
812 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
814 struct nvme_async_event_request *aer = arg;
816 if (nvme_completion_is_error(cpl)) {
818 * Do not retry failed async event requests. This avoids
819 * infinite loops where a new async event request is submitted
820 * to replace the one just failed, only to fail again and
821 * perpetuate the loop.
826 /* Associated log page is in bits 23:16 of completion entry dw0. */
827 aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
829 nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x,"
830 " page 0x%02x)\n", (cpl->cdw0 & 0x07), (cpl->cdw0 & 0xFF00) >> 8,
833 if (is_log_page_id_valid(aer->log_page_id)) {
834 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
836 memcpy(&aer->cpl, cpl, sizeof(*cpl));
837 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
838 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
839 aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
841 /* Wait to notify consumers until after log page is fetched. */
843 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
847 * Repost another asynchronous event request to replace the one
848 * that just completed.
850 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
855 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
856 struct nvme_async_event_request *aer)
858 struct nvme_request *req;
861 req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
865 * Disable timeout here, since asynchronous event requests should by
866 * nature never be timed out.
868 req->timeout = false;
869 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
870 nvme_ctrlr_submit_admin_request(ctrlr, req);
874 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
876 struct nvme_completion_poll_status status;
877 struct nvme_async_event_request *aer;
880 ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE |
881 NVME_CRIT_WARN_ST_DEVICE_RELIABILITY |
882 NVME_CRIT_WARN_ST_READ_ONLY |
883 NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP;
884 if (ctrlr->cdata.ver >= NVME_REV(1, 2))
885 ctrlr->async_event_config |= NVME_ASYNC_EVENT_NS_ATTRIBUTE |
886 NVME_ASYNC_EVENT_FW_ACTIVATE;
889 nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
890 0, NULL, 0, nvme_completion_poll_cb, &status);
891 nvme_completion_poll(&status);
892 if (nvme_completion_is_error(&status.cpl) ||
893 (status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
894 (status.cpl.cdw0 & 0xFFFF) == 0x0000) {
895 nvme_printf(ctrlr, "temperature threshold not supported\n");
897 ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE;
899 nvme_ctrlr_cmd_set_async_event_config(ctrlr,
900 ctrlr->async_event_config, NULL, NULL);
902 /* aerl is a zero-based value, so we need to add 1 here. */
903 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
905 for (i = 0; i < ctrlr->num_aers; i++) {
906 aer = &ctrlr->aer[i];
907 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
912 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
915 ctrlr->int_coal_time = 0;
916 TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
917 &ctrlr->int_coal_time);
919 ctrlr->int_coal_threshold = 0;
920 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
921 &ctrlr->int_coal_threshold);
923 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
924 ctrlr->int_coal_threshold, NULL, NULL);
928 nvme_ctrlr_hmb_free(struct nvme_controller *ctrlr)
930 struct nvme_hmb_chunk *hmbc;
933 if (ctrlr->hmb_desc_paddr) {
934 bus_dmamap_unload(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map);
935 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr,
936 ctrlr->hmb_desc_map);
937 ctrlr->hmb_desc_paddr = 0;
939 if (ctrlr->hmb_desc_tag) {
940 bus_dma_tag_destroy(ctrlr->hmb_desc_tag);
941 ctrlr->hmb_desc_tag = NULL;
943 for (i = 0; i < ctrlr->hmb_nchunks; i++) {
944 hmbc = &ctrlr->hmb_chunks[i];
945 bus_dmamap_unload(ctrlr->hmb_tag, hmbc->hmbc_map);
946 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr,
949 ctrlr->hmb_nchunks = 0;
950 if (ctrlr->hmb_tag) {
951 bus_dma_tag_destroy(ctrlr->hmb_tag);
952 ctrlr->hmb_tag = NULL;
954 if (ctrlr->hmb_chunks) {
955 free(ctrlr->hmb_chunks, M_NVME);
956 ctrlr->hmb_chunks = NULL;
961 nvme_ctrlr_hmb_alloc(struct nvme_controller *ctrlr)
963 struct nvme_hmb_chunk *hmbc;
964 size_t pref, min, minc, size;
968 /* Limit HMB to 5% of RAM size per device by default. */
969 max = (uint64_t)physmem * PAGE_SIZE / 20;
970 TUNABLE_UINT64_FETCH("hw.nvme.hmb_max", &max);
973 * Units of Host Memory Buffer in the Identify info are always in terms
976 min = (long long unsigned)ctrlr->cdata.hmmin * NVME_HMB_UNITS;
977 if (max == 0 || max < min)
979 pref = MIN((long long unsigned)ctrlr->cdata.hmpre * NVME_HMB_UNITS, max);
980 minc = MAX(ctrlr->cdata.hmminds * NVME_HMB_UNITS, ctrlr->page_size);
981 if (min > 0 && ctrlr->cdata.hmmaxd > 0)
982 minc = MAX(minc, min / ctrlr->cdata.hmmaxd);
983 ctrlr->hmb_chunk = pref;
987 * However, the chunk sizes, number of chunks, and alignment of chunks
988 * are all based on the current MPS (ctrlr->page_size).
990 ctrlr->hmb_chunk = roundup2(ctrlr->hmb_chunk, ctrlr->page_size);
991 ctrlr->hmb_nchunks = howmany(pref, ctrlr->hmb_chunk);
992 if (ctrlr->cdata.hmmaxd > 0 && ctrlr->hmb_nchunks > ctrlr->cdata.hmmaxd)
993 ctrlr->hmb_nchunks = ctrlr->cdata.hmmaxd;
994 ctrlr->hmb_chunks = malloc(sizeof(struct nvme_hmb_chunk) *
995 ctrlr->hmb_nchunks, M_NVME, M_WAITOK);
996 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
997 ctrlr->page_size, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
998 ctrlr->hmb_chunk, 1, ctrlr->hmb_chunk, 0, NULL, NULL, &ctrlr->hmb_tag);
1000 nvme_printf(ctrlr, "HMB tag create failed %d\n", err);
1001 nvme_ctrlr_hmb_free(ctrlr);
1005 for (i = 0; i < ctrlr->hmb_nchunks; i++) {
1006 hmbc = &ctrlr->hmb_chunks[i];
1007 if (bus_dmamem_alloc(ctrlr->hmb_tag,
1008 (void **)&hmbc->hmbc_vaddr, BUS_DMA_NOWAIT,
1010 nvme_printf(ctrlr, "failed to alloc HMB\n");
1013 if (bus_dmamap_load(ctrlr->hmb_tag, hmbc->hmbc_map,
1014 hmbc->hmbc_vaddr, ctrlr->hmb_chunk, nvme_single_map,
1015 &hmbc->hmbc_paddr, BUS_DMA_NOWAIT) != 0) {
1016 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr,
1018 nvme_printf(ctrlr, "failed to load HMB\n");
1021 bus_dmamap_sync(ctrlr->hmb_tag, hmbc->hmbc_map,
1022 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1025 if (i < ctrlr->hmb_nchunks && i * ctrlr->hmb_chunk < min &&
1026 ctrlr->hmb_chunk / 2 >= minc) {
1027 ctrlr->hmb_nchunks = i;
1028 nvme_ctrlr_hmb_free(ctrlr);
1029 ctrlr->hmb_chunk /= 2;
1032 ctrlr->hmb_nchunks = i;
1033 if (ctrlr->hmb_nchunks * ctrlr->hmb_chunk < min) {
1034 nvme_ctrlr_hmb_free(ctrlr);
1038 size = sizeof(struct nvme_hmb_desc) * ctrlr->hmb_nchunks;
1039 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
1040 16, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1041 size, 1, size, 0, NULL, NULL, &ctrlr->hmb_desc_tag);
1043 nvme_printf(ctrlr, "HMB desc tag create failed %d\n", err);
1044 nvme_ctrlr_hmb_free(ctrlr);
1047 if (bus_dmamem_alloc(ctrlr->hmb_desc_tag,
1048 (void **)&ctrlr->hmb_desc_vaddr, BUS_DMA_WAITOK,
1049 &ctrlr->hmb_desc_map)) {
1050 nvme_printf(ctrlr, "failed to alloc HMB desc\n");
1051 nvme_ctrlr_hmb_free(ctrlr);
1054 if (bus_dmamap_load(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map,
1055 ctrlr->hmb_desc_vaddr, size, nvme_single_map,
1056 &ctrlr->hmb_desc_paddr, BUS_DMA_NOWAIT) != 0) {
1057 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr,
1058 ctrlr->hmb_desc_map);
1059 nvme_printf(ctrlr, "failed to load HMB desc\n");
1060 nvme_ctrlr_hmb_free(ctrlr);
1064 for (i = 0; i < ctrlr->hmb_nchunks; i++) {
1065 ctrlr->hmb_desc_vaddr[i].addr =
1066 htole64(ctrlr->hmb_chunks[i].hmbc_paddr);
1067 ctrlr->hmb_desc_vaddr[i].size = htole32(ctrlr->hmb_chunk / ctrlr->page_size);
1069 bus_dmamap_sync(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map,
1070 BUS_DMASYNC_PREWRITE);
1072 nvme_printf(ctrlr, "Allocated %lluMB host memory buffer\n",
1073 (long long unsigned)ctrlr->hmb_nchunks * ctrlr->hmb_chunk
1078 nvme_ctrlr_hmb_enable(struct nvme_controller *ctrlr, bool enable, bool memret)
1080 struct nvme_completion_poll_status status;
1089 nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_HOST_MEMORY_BUFFER, cdw11,
1090 ctrlr->hmb_nchunks * ctrlr->hmb_chunk / ctrlr->page_size,
1091 ctrlr->hmb_desc_paddr, ctrlr->hmb_desc_paddr >> 32,
1092 ctrlr->hmb_nchunks, NULL, 0,
1093 nvme_completion_poll_cb, &status);
1094 nvme_completion_poll(&status);
1095 if (nvme_completion_is_error(&status.cpl))
1096 nvme_printf(ctrlr, "nvme_ctrlr_hmb_enable failed!\n");
1100 nvme_ctrlr_start(void *ctrlr_arg, bool resetting)
1102 struct nvme_controller *ctrlr = ctrlr_arg;
1103 uint32_t old_num_io_queues;
1109 * Only reset adminq here when we are restarting the
1110 * controller after a reset. During initialization,
1111 * we have already submitted admin commands to get
1112 * the number of I/O queues supported, so cannot reset
1113 * the adminq again here.
1116 nvme_qpair_reset(&ctrlr->adminq);
1117 nvme_admin_qpair_enable(&ctrlr->adminq);
1120 if (ctrlr->ioq != NULL) {
1121 for (i = 0; i < ctrlr->num_io_queues; i++)
1122 nvme_qpair_reset(&ctrlr->ioq[i]);
1126 * If it was a reset on initialization command timeout, just
1127 * return here, letting initialization code fail gracefully.
1129 if (resetting && !ctrlr->is_initialized)
1132 if (resetting && nvme_ctrlr_identify(ctrlr) != 0) {
1133 nvme_ctrlr_fail(ctrlr);
1138 * The number of qpairs are determined during controller initialization,
1139 * including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the
1140 * HW limit. We call SET_FEATURES again here so that it gets called
1141 * after any reset for controllers that depend on the driver to
1142 * explicit specify how many queues it will use. This value should
1143 * never change between resets, so panic if somehow that does happen.
1146 old_num_io_queues = ctrlr->num_io_queues;
1147 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
1148 nvme_ctrlr_fail(ctrlr);
1152 if (old_num_io_queues != ctrlr->num_io_queues) {
1153 panic("num_io_queues changed from %u to %u",
1154 old_num_io_queues, ctrlr->num_io_queues);
1158 if (ctrlr->cdata.hmpre > 0 && ctrlr->hmb_nchunks == 0) {
1159 nvme_ctrlr_hmb_alloc(ctrlr);
1160 if (ctrlr->hmb_nchunks > 0)
1161 nvme_ctrlr_hmb_enable(ctrlr, true, false);
1162 } else if (ctrlr->hmb_nchunks > 0)
1163 nvme_ctrlr_hmb_enable(ctrlr, true, true);
1165 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
1166 nvme_ctrlr_fail(ctrlr);
1170 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
1171 nvme_ctrlr_fail(ctrlr);
1175 nvme_ctrlr_configure_aer(ctrlr);
1176 nvme_ctrlr_configure_int_coalescing(ctrlr);
1178 for (i = 0; i < ctrlr->num_io_queues; i++)
1179 nvme_io_qpair_enable(&ctrlr->ioq[i]);
1184 nvme_ctrlr_start_config_hook(void *arg)
1186 struct nvme_controller *ctrlr = arg;
1191 * Don't call pre/post reset here. We've not yet created the qpairs,
1192 * haven't setup the ISRs, so there's no need to 'drain' them or
1195 if (nvme_ctrlr_hw_reset(ctrlr) != 0) {
1197 nvme_ctrlr_fail(ctrlr);
1198 config_intrhook_disestablish(&ctrlr->config_hook);
1202 #ifdef NVME_2X_RESET
1204 * Reset controller twice to ensure we do a transition from cc.en==1 to
1205 * cc.en==0. This is because we don't really know what status the
1206 * controller was left in when boot handed off to OS. Linux doesn't do
1207 * this, however, and when the controller is in state cc.en == 0, no
1210 if (nvme_ctrlr_hw_reset(ctrlr) != 0)
1214 nvme_qpair_reset(&ctrlr->adminq);
1215 nvme_admin_qpair_enable(&ctrlr->adminq);
1217 if (nvme_ctrlr_identify(ctrlr) == 0 &&
1218 nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
1219 nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
1220 nvme_ctrlr_start(ctrlr, false);
1224 nvme_sysctl_initialize_ctrlr(ctrlr);
1225 config_intrhook_disestablish(&ctrlr->config_hook);
1227 ctrlr->is_initialized = 1;
1228 nvme_notify_new_controller(ctrlr);
1233 nvme_ctrlr_reset_task(void *arg, int pending)
1235 struct nvme_controller *ctrlr = arg;
1238 nvme_ctrlr_devctl_log(ctrlr, "RESET", "resetting controller");
1239 nvme_pre_reset(ctrlr);
1240 status = nvme_ctrlr_hw_reset(ctrlr);
1241 nvme_post_reset(ctrlr);
1243 nvme_ctrlr_start(ctrlr, true);
1245 nvme_ctrlr_fail(ctrlr);
1247 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
1251 * Poll all the queues enabled on the device for completion.
1254 nvme_ctrlr_poll(struct nvme_controller *ctrlr)
1258 nvme_qpair_process_completions(&ctrlr->adminq);
1260 for (i = 0; i < ctrlr->num_io_queues; i++)
1261 if (ctrlr->ioq && ctrlr->ioq[i].cpl)
1262 nvme_qpair_process_completions(&ctrlr->ioq[i]);
1266 * Poll the single-vector interrupt case: num_io_queues will be 1 and
1267 * there's only a single vector. While we're polling, we mask further
1268 * interrupts in the controller.
1271 nvme_ctrlr_shared_handler(void *arg)
1273 struct nvme_controller *ctrlr = arg;
1275 nvme_mmio_write_4(ctrlr, intms, 1);
1276 nvme_ctrlr_poll(ctrlr);
1277 nvme_mmio_write_4(ctrlr, intmc, 1);
1281 nvme_pt_done(void *arg, const struct nvme_completion *cpl)
1283 struct nvme_pt_command *pt = arg;
1284 struct mtx *mtx = pt->driver_lock;
1287 bzero(&pt->cpl, sizeof(pt->cpl));
1288 pt->cpl.cdw0 = cpl->cdw0;
1290 status = cpl->status;
1291 status &= ~NVME_STATUS_P_MASK;
1292 pt->cpl.status = status;
1295 pt->driver_lock = NULL;
1301 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
1302 struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
1305 struct nvme_request *req;
1307 struct buf *buf = NULL;
1311 if (pt->len > ctrlr->max_xfer_size) {
1312 nvme_printf(ctrlr, "pt->len (%d) "
1313 "exceeds max_xfer_size (%d)\n", pt->len,
1314 ctrlr->max_xfer_size);
1317 if (is_user_buffer) {
1319 * Ensure the user buffer is wired for the duration of
1320 * this pass-through command.
1323 buf = uma_zalloc(pbuf_zone, M_WAITOK);
1324 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
1325 if (vmapbuf(buf, pt->buf, pt->len, 1) < 0) {
1329 req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
1332 req = nvme_allocate_request_vaddr(pt->buf, pt->len,
1335 req = nvme_allocate_request_null(nvme_pt_done, pt);
1337 /* Assume user space already converted to little-endian */
1338 req->cmd.opc = pt->cmd.opc;
1339 req->cmd.fuse = pt->cmd.fuse;
1340 req->cmd.rsvd2 = pt->cmd.rsvd2;
1341 req->cmd.rsvd3 = pt->cmd.rsvd3;
1342 req->cmd.cdw10 = pt->cmd.cdw10;
1343 req->cmd.cdw11 = pt->cmd.cdw11;
1344 req->cmd.cdw12 = pt->cmd.cdw12;
1345 req->cmd.cdw13 = pt->cmd.cdw13;
1346 req->cmd.cdw14 = pt->cmd.cdw14;
1347 req->cmd.cdw15 = pt->cmd.cdw15;
1349 req->cmd.nsid = htole32(nsid);
1351 mtx = mtx_pool_find(mtxpool_sleep, pt);
1352 pt->driver_lock = mtx;
1355 nvme_ctrlr_submit_admin_request(ctrlr, req);
1357 nvme_ctrlr_submit_io_request(ctrlr, req);
1360 while (pt->driver_lock != NULL)
1361 mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
1366 uma_zfree(pbuf_zone, buf);
1374 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
1377 struct nvme_controller *ctrlr;
1378 struct nvme_pt_command *pt;
1380 ctrlr = cdev->si_drv1;
1383 case NVME_RESET_CONTROLLER:
1384 nvme_ctrlr_reset(ctrlr);
1386 case NVME_PASSTHROUGH_CMD:
1387 pt = (struct nvme_pt_command *)arg;
1388 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid),
1389 1 /* is_user_buffer */, 1 /* is_admin_cmd */));
1392 struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)arg;
1393 strncpy(gnsid->cdev, device_get_nameunit(ctrlr->dev),
1394 sizeof(gnsid->cdev));
1395 gnsid->cdev[sizeof(gnsid->cdev) - 1] = '\0';
1399 case NVME_GET_MAX_XFER_SIZE:
1400 *(uint64_t *)arg = ctrlr->max_xfer_size;
1409 static struct cdevsw nvme_ctrlr_cdevsw = {
1410 .d_version = D_VERSION,
1412 .d_ioctl = nvme_ctrlr_ioctl
1416 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1418 struct make_dev_args md_args;
1421 uint32_t to, vs, pmrcap;
1422 int status, timeout_period;
1426 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1427 if (bus_get_domain(dev, &ctrlr->domain) != 0)
1430 ctrlr->cap_lo = cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
1432 device_printf(dev, "CapLo: 0x%08x: MQES %u%s%s%s%s, TO %u\n",
1433 cap_lo, NVME_CAP_LO_MQES(cap_lo),
1434 NVME_CAP_LO_CQR(cap_lo) ? ", CQR" : "",
1435 NVME_CAP_LO_AMS(cap_lo) ? ", AMS" : "",
1436 (NVME_CAP_LO_AMS(cap_lo) & 0x1) ? " WRRwUPC" : "",
1437 (NVME_CAP_LO_AMS(cap_lo) & 0x2) ? " VS" : "",
1438 NVME_CAP_LO_TO(cap_lo));
1440 ctrlr->cap_hi = cap_hi = nvme_mmio_read_4(ctrlr, cap_hi);
1442 device_printf(dev, "CapHi: 0x%08x: DSTRD %u%s, CSS %x%s, "
1443 "MPSMIN %u, MPSMAX %u%s%s\n", cap_hi,
1444 NVME_CAP_HI_DSTRD(cap_hi),
1445 NVME_CAP_HI_NSSRS(cap_hi) ? ", NSSRS" : "",
1446 NVME_CAP_HI_CSS(cap_hi),
1447 NVME_CAP_HI_BPS(cap_hi) ? ", BPS" : "",
1448 NVME_CAP_HI_MPSMIN(cap_hi),
1449 NVME_CAP_HI_MPSMAX(cap_hi),
1450 NVME_CAP_HI_PMRS(cap_hi) ? ", PMRS" : "",
1451 NVME_CAP_HI_CMBS(cap_hi) ? ", CMBS" : "");
1454 vs = nvme_mmio_read_4(ctrlr, vs);
1455 device_printf(dev, "Version: 0x%08x: %d.%d\n", vs,
1456 NVME_MAJOR(vs), NVME_MINOR(vs));
1458 if (bootverbose && NVME_CAP_HI_PMRS(cap_hi)) {
1459 pmrcap = nvme_mmio_read_4(ctrlr, pmrcap);
1460 device_printf(dev, "PMRCap: 0x%08x: BIR %u%s%s, PMRTU %u, "
1461 "PMRWBM %x, PMRTO %u%s\n", pmrcap,
1462 NVME_PMRCAP_BIR(pmrcap),
1463 NVME_PMRCAP_RDS(pmrcap) ? ", RDS" : "",
1464 NVME_PMRCAP_WDS(pmrcap) ? ", WDS" : "",
1465 NVME_PMRCAP_PMRTU(pmrcap),
1466 NVME_PMRCAP_PMRWBM(pmrcap),
1467 NVME_PMRCAP_PMRTO(pmrcap),
1468 NVME_PMRCAP_CMSS(pmrcap) ? ", CMSS" : "");
1471 ctrlr->dstrd = NVME_CAP_HI_DSTRD(cap_hi) + 2;
1473 ctrlr->mps = NVME_CAP_HI_MPSMIN(cap_hi);
1474 ctrlr->page_size = 1 << (NVME_MPS_SHIFT + ctrlr->mps);
1476 /* Get ready timeout value from controller, in units of 500ms. */
1477 to = NVME_CAP_LO_TO(cap_lo) + 1;
1478 ctrlr->ready_timeout_in_ms = to * 500;
1480 timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
1481 TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
1482 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1483 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1484 ctrlr->timeout_period = timeout_period;
1486 nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
1487 TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
1489 ctrlr->enable_aborts = 0;
1490 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1492 /* Cap transfers by the maximum addressable by page-sized PRP (4KB pages -> 2MB). */
1493 ctrlr->max_xfer_size = MIN(maxphys, (ctrlr->page_size / 8 * ctrlr->page_size));
1494 if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0)
1498 * Create 2 threads for the taskqueue. The reset thread will block when
1499 * it detects that the controller has failed until all I/O has been
1500 * failed up the stack. The fail_req task needs to be able to run in
1501 * this case to finish the request failure for some cases.
1503 * We could partially solve this race by draining the failed requeust
1504 * queue before proceding to free the sim, though nothing would stop
1505 * new I/O from coming in after we do that drain, but before we reach
1506 * cam_sim_free, so this big hammer is used instead.
1508 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1509 taskqueue_thread_enqueue, &ctrlr->taskqueue);
1510 taskqueue_start_threads(&ctrlr->taskqueue, 2, PI_DISK, "nvme taskq");
1512 ctrlr->is_resetting = 0;
1513 ctrlr->is_initialized = 0;
1514 ctrlr->notification_sent = 0;
1515 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1516 TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
1517 STAILQ_INIT(&ctrlr->fail_req);
1518 ctrlr->is_failed = false;
1520 make_dev_args_init(&md_args);
1521 md_args.mda_devsw = &nvme_ctrlr_cdevsw;
1522 md_args.mda_uid = UID_ROOT;
1523 md_args.mda_gid = GID_WHEEL;
1524 md_args.mda_mode = 0600;
1525 md_args.mda_unit = device_get_unit(dev);
1526 md_args.mda_si_drv1 = (void *)ctrlr;
1527 status = make_dev_s(&md_args, &ctrlr->cdev, "nvme%d",
1528 device_get_unit(dev));
1536 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1540 ctrlr->is_dying = true;
1542 if (ctrlr->resource == NULL)
1544 if (!mtx_initialized(&ctrlr->adminq.lock))
1548 * Check whether it is a hot unplug or a clean driver detach.
1549 * If device is not there any more, skip any shutdown commands.
1551 gone = (nvme_mmio_read_4(ctrlr, csts) == NVME_GONE);
1553 nvme_ctrlr_fail(ctrlr);
1555 nvme_notify_fail_consumers(ctrlr);
1557 for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1558 nvme_ns_destruct(&ctrlr->ns[i]);
1561 destroy_dev(ctrlr->cdev);
1563 if (ctrlr->is_initialized) {
1565 if (ctrlr->hmb_nchunks > 0)
1566 nvme_ctrlr_hmb_enable(ctrlr, false, false);
1567 nvme_ctrlr_delete_qpairs(ctrlr);
1569 nvme_ctrlr_hmb_free(ctrlr);
1571 if (ctrlr->ioq != NULL) {
1572 for (i = 0; i < ctrlr->num_io_queues; i++)
1573 nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1574 free(ctrlr->ioq, M_NVME);
1576 nvme_admin_qpair_destroy(&ctrlr->adminq);
1579 * Notify the controller of a shutdown, even though this is due to
1580 * a driver unload, not a system shutdown (this path is not invoked
1581 * during shutdown). This ensures the controller receives a
1582 * shutdown notification in case the system is shutdown before
1583 * reloading the driver.
1586 nvme_ctrlr_shutdown(ctrlr);
1589 nvme_ctrlr_disable(ctrlr);
1592 if (ctrlr->taskqueue)
1593 taskqueue_free(ctrlr->taskqueue);
1596 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1599 bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1600 rman_get_rid(ctrlr->res), ctrlr->res);
1602 if (ctrlr->bar4_resource != NULL) {
1603 bus_release_resource(dev, SYS_RES_MEMORY,
1604 ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1607 bus_release_resource(dev, SYS_RES_MEMORY,
1608 ctrlr->resource_id, ctrlr->resource);
1611 mtx_destroy(&ctrlr->lock);
1615 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
1621 cc = nvme_mmio_read_4(ctrlr, cc);
1622 cc &= ~(NVME_CC_REG_SHN_MASK << NVME_CC_REG_SHN_SHIFT);
1623 cc |= NVME_SHN_NORMAL << NVME_CC_REG_SHN_SHIFT;
1624 nvme_mmio_write_4(ctrlr, cc, cc);
1626 timeout = ticks + (ctrlr->cdata.rtd3e == 0 ? 5 * hz :
1627 ((uint64_t)ctrlr->cdata.rtd3e * hz + 999999) / 1000000);
1629 csts = nvme_mmio_read_4(ctrlr, csts);
1630 if (csts == NVME_GONE) /* Hot unplug. */
1632 if (NVME_CSTS_GET_SHST(csts) == NVME_SHST_COMPLETE)
1634 if (timeout - ticks < 0) {
1635 nvme_printf(ctrlr, "shutdown timeout\n");
1638 pause("nvmeshut", 1);
1643 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1644 struct nvme_request *req)
1647 nvme_qpair_submit_request(&ctrlr->adminq, req);
1651 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1652 struct nvme_request *req)
1654 struct nvme_qpair *qpair;
1656 qpair = &ctrlr->ioq[QP(ctrlr, curcpu)];
1657 nvme_qpair_submit_request(qpair, req);
1661 nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1664 return (ctrlr->dev);
1667 const struct nvme_controller_data *
1668 nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1671 return (&ctrlr->cdata);
1675 nvme_ctrlr_suspend(struct nvme_controller *ctrlr)
1680 * Can't touch failed controllers, so it's already suspended.
1682 if (ctrlr->is_failed)
1686 * We don't want the reset taskqueue running, since it does similar
1687 * things, so prevent it from running after we start. Wait for any reset
1688 * that may have been started to complete. The reset process we follow
1689 * will ensure that any new I/O will queue and be given to the hardware
1690 * after we resume (though there should be none).
1692 while (atomic_cmpset_32(&ctrlr->is_resetting, 0, 1) == 0 && to-- > 0)
1693 pause("nvmesusp", 1);
1696 "Competing reset task didn't finish. Try again later.\n");
1697 return (EWOULDBLOCK);
1700 if (ctrlr->hmb_nchunks > 0)
1701 nvme_ctrlr_hmb_enable(ctrlr, false, false);
1704 * Per Section 7.6.2 of NVMe spec 1.4, to properly suspend, we need to
1705 * delete the hardware I/O queues, and then shutdown. This properly
1706 * flushes any metadata the drive may have stored so it can survive
1707 * having its power removed and prevents the unsafe shutdown count from
1708 * incriminating. Once we delete the qpairs, we have to disable them
1709 * before shutting down.
1711 nvme_ctrlr_delete_qpairs(ctrlr);
1712 nvme_ctrlr_disable_qpairs(ctrlr);
1713 nvme_ctrlr_shutdown(ctrlr);
1719 nvme_ctrlr_resume(struct nvme_controller *ctrlr)
1723 * Can't touch failed controllers, so nothing to do to resume.
1725 if (ctrlr->is_failed)
1728 nvme_pre_reset(ctrlr);
1729 if (nvme_ctrlr_hw_reset(ctrlr) != 0)
1731 #ifdef NVME_2X_RESET
1733 * Prior to FreeBSD 13.1, FreeBSD's nvme driver reset the hardware twice
1734 * to get it into a known good state. However, the hardware's state is
1735 * good and we don't need to do this for proper functioning.
1737 if (nvme_ctrlr_hw_reset(ctrlr) != 0)
1740 nvme_post_reset(ctrlr);
1743 * Now that we've reset the hardware, we can restart the controller. Any
1744 * I/O that was pending is requeued. Any admin commands are aborted with
1745 * an error. Once we've restarted, take the controller out of reset.
1747 nvme_ctrlr_start(ctrlr, true);
1748 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
1753 * Since we can't bring the controller out of reset, announce and fail
1754 * the controller. However, we have to return success for the resume
1755 * itself, due to questionable APIs.
1757 nvme_post_reset(ctrlr);
1758 nvme_printf(ctrlr, "Failed to reset on resume, failing.\n");
1759 nvme_ctrlr_fail(ctrlr);
1760 (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);