2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2012-2016 Intel Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
39 #include <sys/ioccom.h>
43 #include <sys/endian.h>
45 #include <dev/pci/pcireg.h>
46 #include <dev/pci/pcivar.h>
48 #include "nvme_private.h"
50 #define B4_CHK_RDY_DELAY_MS 2300 /* work around controller bug */
52 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
53 struct nvme_async_event_request *aer);
54 static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr);
57 nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
60 ctrlr->resource_id = PCIR_BAR(0);
62 ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
63 &ctrlr->resource_id, RF_ACTIVE);
65 if(ctrlr->resource == NULL) {
66 nvme_printf(ctrlr, "unable to allocate pci resource\n");
70 ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
71 ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
72 ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
75 * The NVMe spec allows for the MSI-X table to be placed behind
76 * BAR 4/5, separate from the control/doorbell registers. Always
77 * try to map this bar, because it must be mapped prior to calling
78 * pci_alloc_msix(). If the table isn't behind BAR 4/5,
79 * bus_alloc_resource() will just return NULL which is OK.
81 ctrlr->bar4_resource_id = PCIR_BAR(4);
82 ctrlr->bar4_resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
83 &ctrlr->bar4_resource_id, RF_ACTIVE);
89 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
91 struct nvme_qpair *qpair;
95 qpair = &ctrlr->adminq;
97 num_entries = NVME_ADMIN_ENTRIES;
98 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
100 * If admin_entries was overridden to an invalid value, revert it
101 * back to our default value.
103 if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
104 num_entries > NVME_MAX_ADMIN_ENTRIES) {
105 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
106 "specified\n", num_entries);
107 num_entries = NVME_ADMIN_ENTRIES;
111 * The admin queue's max xfer size is treated differently than the
112 * max I/O xfer size. 16KB is sufficient here - maybe even less?
114 error = nvme_qpair_construct(qpair,
124 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
126 struct nvme_qpair *qpair;
129 int i, error, num_entries, num_trackers;
131 num_entries = NVME_IO_ENTRIES;
132 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
135 * NVMe spec sets a hard limit of 64K max entries, but
136 * devices may specify a smaller limit, so we need to check
137 * the MQES field in the capabilities register.
139 cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
140 mqes = (cap_lo >> NVME_CAP_LO_REG_MQES_SHIFT) & NVME_CAP_LO_REG_MQES_MASK;
141 num_entries = min(num_entries, mqes + 1);
143 num_trackers = NVME_IO_TRACKERS;
144 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
146 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
147 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
149 * No need to have more trackers than entries in the submit queue.
150 * Note also that for a queue size of N, we can only have (N-1)
151 * commands outstanding, hence the "-1" here.
153 num_trackers = min(num_trackers, (num_entries-1));
156 * Our best estimate for the maximum number of I/Os that we should
157 * noramlly have in flight at one time. This should be viewed as a hint,
158 * not a hard limit and will need to be revisitted when the upper layers
159 * of the storage system grows multi-queue support.
161 ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4;
164 * This was calculated previously when setting up interrupts, but
165 * a controller could theoretically support fewer I/O queues than
166 * MSI-X vectors. So calculate again here just to be safe.
168 ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues);
170 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
171 M_NVME, M_ZERO | M_WAITOK);
173 for (i = 0; i < ctrlr->num_io_queues; i++) {
174 qpair = &ctrlr->ioq[i];
177 * Admin queue has ID=0. IO queues start at ID=1 -
178 * hence the 'i+1' here.
180 * For I/O queues, use the controller-wide max_xfer_size
181 * calculated in nvme_attach().
183 error = nvme_qpair_construct(qpair,
185 ctrlr->msix_enabled ? i+1 : 0, /* vector */
193 * Do not bother binding interrupts if we only have one I/O
194 * interrupt thread for this controller.
196 if (ctrlr->num_io_queues > 1)
197 bus_bind_intr(ctrlr->dev, qpair->res,
198 i * ctrlr->num_cpus_per_ioq);
205 nvme_ctrlr_fail(struct nvme_controller *ctrlr)
209 ctrlr->is_failed = TRUE;
210 nvme_qpair_fail(&ctrlr->adminq);
211 if (ctrlr->ioq != NULL) {
212 for (i = 0; i < ctrlr->num_io_queues; i++)
213 nvme_qpair_fail(&ctrlr->ioq[i]);
215 nvme_notify_fail_consumers(ctrlr);
219 nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
220 struct nvme_request *req)
223 mtx_lock(&ctrlr->lock);
224 STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
225 mtx_unlock(&ctrlr->lock);
226 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
230 nvme_ctrlr_fail_req_task(void *arg, int pending)
232 struct nvme_controller *ctrlr = arg;
233 struct nvme_request *req;
235 mtx_lock(&ctrlr->lock);
236 while ((req = STAILQ_FIRST(&ctrlr->fail_req)) != NULL) {
237 STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
238 mtx_unlock(&ctrlr->lock);
239 nvme_qpair_manual_complete_request(req->qpair, req,
240 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
241 mtx_lock(&ctrlr->lock);
243 mtx_unlock(&ctrlr->lock);
247 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
252 csts = nvme_mmio_read_4(ctrlr, csts);
255 while (((csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK) != desired_val) {
256 if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
257 nvme_printf(ctrlr, "controller ready did not become %d "
258 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
262 csts = nvme_mmio_read_4(ctrlr, csts);
269 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
276 cc = nvme_mmio_read_4(ctrlr, cc);
277 csts = nvme_mmio_read_4(ctrlr, csts);
279 en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
280 rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
283 * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1
284 * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when
285 * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY
286 * isn't the desired value. Short circuit if we're already disabled.
290 /* EN == 1, wait for RDY == 1 or fail */
291 err = nvme_ctrlr_wait_for_ready(ctrlr, 1);
296 /* EN == 0 already wait for RDY == 0 */
300 return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
303 cc &= ~NVME_CC_REG_EN_MASK;
304 nvme_mmio_write_4(ctrlr, cc, cc);
306 * Some drives have issues with accessing the mmio after we
307 * disable, so delay for a bit after we write the bit to
308 * cope with these issues.
310 if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY)
311 pause("nvmeR", B4_CHK_RDY_DELAY_MS * hz / 1000);
312 return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
316 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
325 cc = nvme_mmio_read_4(ctrlr, cc);
326 csts = nvme_mmio_read_4(ctrlr, csts);
328 en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
329 rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
332 * See note in nvme_ctrlr_disable. Short circuit if we're already enabled.
338 return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
340 /* EN == 0 already wait for RDY == 0 or fail */
341 err = nvme_ctrlr_wait_for_ready(ctrlr, 0);
346 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
348 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
351 /* acqs and asqs are 0-based. */
352 qsize = ctrlr->adminq.num_entries - 1;
355 aqa = (qsize & NVME_AQA_REG_ACQS_MASK) << NVME_AQA_REG_ACQS_SHIFT;
356 aqa |= (qsize & NVME_AQA_REG_ASQS_MASK) << NVME_AQA_REG_ASQS_SHIFT;
357 nvme_mmio_write_4(ctrlr, aqa, aqa);
360 /* Initialization values for CC */
362 cc |= 1 << NVME_CC_REG_EN_SHIFT;
363 cc |= 0 << NVME_CC_REG_CSS_SHIFT;
364 cc |= 0 << NVME_CC_REG_AMS_SHIFT;
365 cc |= 0 << NVME_CC_REG_SHN_SHIFT;
366 cc |= 6 << NVME_CC_REG_IOSQES_SHIFT; /* SQ entry size == 64 == 2^6 */
367 cc |= 4 << NVME_CC_REG_IOCQES_SHIFT; /* CQ entry size == 16 == 2^4 */
369 /* This evaluates to 0, which is according to spec. */
370 cc |= (PAGE_SIZE >> 13) << NVME_CC_REG_MPS_SHIFT;
372 nvme_mmio_write_4(ctrlr, cc, cc);
374 return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
378 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
382 nvme_admin_qpair_disable(&ctrlr->adminq);
384 * I/O queues are not allocated before the initial HW
385 * reset, so do not try to disable them. Use is_initialized
386 * to determine if this is the initial HW reset.
388 if (ctrlr->is_initialized) {
389 for (i = 0; i < ctrlr->num_io_queues; i++)
390 nvme_io_qpair_disable(&ctrlr->ioq[i]);
395 err = nvme_ctrlr_disable(ctrlr);
398 return (nvme_ctrlr_enable(ctrlr));
402 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
406 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
408 if (cmpset == 0 || ctrlr->is_failed)
410 * Controller is already resetting or has failed. Return
411 * immediately since there is no need to kick off another
412 * reset in these cases.
416 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
420 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
422 struct nvme_completion_poll_status status;
425 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
426 nvme_completion_poll_cb, &status);
427 while (!atomic_load_acq_int(&status.done))
429 if (nvme_completion_is_error(&status.cpl)) {
430 nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
434 /* Convert data to host endian */
435 nvme_controller_data_swapbytes(&ctrlr->cdata);
438 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
439 * controller supports.
441 if (ctrlr->cdata.mdts > 0)
442 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
443 ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
449 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
451 struct nvme_completion_poll_status status;
452 int cq_allocated, sq_allocated;
455 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
456 nvme_completion_poll_cb, &status);
457 while (!atomic_load_acq_int(&status.done))
459 if (nvme_completion_is_error(&status.cpl)) {
460 nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n");
465 * Data in cdw0 is 0-based.
466 * Lower 16-bits indicate number of submission queues allocated.
467 * Upper 16-bits indicate number of completion queues allocated.
469 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
470 cq_allocated = (status.cpl.cdw0 >> 16) + 1;
473 * Controller may allocate more queues than we requested,
474 * so use the minimum of the number requested and what was
475 * actually allocated.
477 ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
478 ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
484 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
486 struct nvme_completion_poll_status status;
487 struct nvme_qpair *qpair;
490 for (i = 0; i < ctrlr->num_io_queues; i++) {
491 qpair = &ctrlr->ioq[i];
494 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector,
495 nvme_completion_poll_cb, &status);
496 while (!atomic_load_acq_int(&status.done))
498 if (nvme_completion_is_error(&status.cpl)) {
499 nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
504 nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
505 nvme_completion_poll_cb, &status);
506 while (!atomic_load_acq_int(&status.done))
508 if (nvme_completion_is_error(&status.cpl)) {
509 nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
518 nvme_ctrlr_destroy_qpair(struct nvme_controller *ctrlr, struct nvme_qpair *qpair)
520 struct nvme_completion_poll_status status;
523 nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair,
524 nvme_completion_poll_cb, &status);
525 while (!atomic_load_acq_int(&status.done))
527 if (nvme_completion_is_error(&status.cpl)) {
528 nvme_printf(ctrlr, "nvme_destroy_io_sq failed!\n");
533 nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair,
534 nvme_completion_poll_cb, &status);
535 while (!atomic_load_acq_int(&status.done))
537 if (nvme_completion_is_error(&status.cpl)) {
538 nvme_printf(ctrlr, "nvme_destroy_io_cq failed!\n");
546 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
548 struct nvme_namespace *ns;
551 for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) {
553 nvme_ns_construct(ns, i+1, ctrlr);
560 is_log_page_id_valid(uint8_t page_id)
565 case NVME_LOG_HEALTH_INFORMATION:
566 case NVME_LOG_FIRMWARE_SLOT:
567 case NVME_LOG_CHANGED_NAMESPACE:
575 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
577 uint32_t log_page_size;
582 sizeof(struct nvme_error_information_entry) *
583 (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE);
585 case NVME_LOG_HEALTH_INFORMATION:
586 log_page_size = sizeof(struct nvme_health_information_page);
588 case NVME_LOG_FIRMWARE_SLOT:
589 log_page_size = sizeof(struct nvme_firmware_page);
591 case NVME_LOG_CHANGED_NAMESPACE:
592 log_page_size = sizeof(struct nvme_ns_list);
599 return (log_page_size);
603 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
607 if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE)
608 nvme_printf(ctrlr, "available spare space below threshold\n");
610 if (state & NVME_CRIT_WARN_ST_TEMPERATURE)
611 nvme_printf(ctrlr, "temperature above threshold\n");
613 if (state & NVME_CRIT_WARN_ST_DEVICE_RELIABILITY)
614 nvme_printf(ctrlr, "device reliability degraded\n");
616 if (state & NVME_CRIT_WARN_ST_READ_ONLY)
617 nvme_printf(ctrlr, "media placed in read only mode\n");
619 if (state & NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP)
620 nvme_printf(ctrlr, "volatile memory backup device failed\n");
622 if (state & NVME_CRIT_WARN_ST_RESERVED_MASK)
624 "unknown critical warning(s): state = 0x%02x\n", state);
628 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
630 struct nvme_async_event_request *aer = arg;
631 struct nvme_health_information_page *health_info;
632 struct nvme_ns_list *nsl;
633 struct nvme_error_information_entry *err;
637 * If the log page fetch for some reason completed with an error,
638 * don't pass log page data to the consumers. In practice, this case
639 * should never happen.
641 if (nvme_completion_is_error(cpl))
642 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
643 aer->log_page_id, NULL, 0);
645 /* Convert data to host endian */
646 switch (aer->log_page_id) {
648 err = (struct nvme_error_information_entry *)aer->log_page_buffer;
649 for (i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++)
650 nvme_error_information_entry_swapbytes(err++);
652 case NVME_LOG_HEALTH_INFORMATION:
653 nvme_health_information_page_swapbytes(
654 (struct nvme_health_information_page *)aer->log_page_buffer);
656 case NVME_LOG_FIRMWARE_SLOT:
657 nvme_firmware_page_swapbytes(
658 (struct nvme_firmware_page *)aer->log_page_buffer);
660 case NVME_LOG_CHANGED_NAMESPACE:
661 nvme_ns_list_swapbytes(
662 (struct nvme_ns_list *)aer->log_page_buffer);
664 case INTEL_LOG_TEMP_STATS:
665 intel_log_temp_stats_swapbytes(
666 (struct intel_log_temp_stats *)aer->log_page_buffer);
672 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
673 health_info = (struct nvme_health_information_page *)
674 aer->log_page_buffer;
675 nvme_ctrlr_log_critical_warnings(aer->ctrlr,
676 health_info->critical_warning);
678 * Critical warnings reported through the
679 * SMART/health log page are persistent, so
680 * clear the associated bits in the async event
681 * config so that we do not receive repeated
682 * notifications for the same event.
684 aer->ctrlr->async_event_config &=
685 ~health_info->critical_warning;
686 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
687 aer->ctrlr->async_event_config, NULL, NULL);
688 } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE &&
690 nsl = (struct nvme_ns_list *)aer->log_page_buffer;
691 for (i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) {
692 if (nsl->ns[i] > NVME_MAX_NAMESPACES)
694 nvme_notify_ns(aer->ctrlr, nsl->ns[i]);
700 * Pass the cpl data from the original async event completion,
701 * not the log page fetch.
703 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
704 aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
708 * Repost another asynchronous event request to replace the one
709 * that just completed.
711 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
715 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
717 struct nvme_async_event_request *aer = arg;
719 if (nvme_completion_is_error(cpl)) {
721 * Do not retry failed async event requests. This avoids
722 * infinite loops where a new async event request is submitted
723 * to replace the one just failed, only to fail again and
724 * perpetuate the loop.
729 /* Associated log page is in bits 23:16 of completion entry dw0. */
730 aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
732 nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x,"
733 " page 0x%02x)\n", (cpl->cdw0 & 0x03), (cpl->cdw0 & 0xFF00) >> 8,
736 if (is_log_page_id_valid(aer->log_page_id)) {
737 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
739 memcpy(&aer->cpl, cpl, sizeof(*cpl));
740 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
741 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
742 aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
744 /* Wait to notify consumers until after log page is fetched. */
746 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
750 * Repost another asynchronous event request to replace the one
751 * that just completed.
753 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
758 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
759 struct nvme_async_event_request *aer)
761 struct nvme_request *req;
764 req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
768 * Disable timeout here, since asynchronous event requests should by
769 * nature never be timed out.
771 req->timeout = FALSE;
772 req->cmd.opc_fuse = NVME_CMD_SET_OPC(NVME_OPC_ASYNC_EVENT_REQUEST);
773 nvme_ctrlr_submit_admin_request(ctrlr, req);
777 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
779 struct nvme_completion_poll_status status;
780 struct nvme_async_event_request *aer;
783 ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE |
784 NVME_CRIT_WARN_ST_DEVICE_RELIABILITY |
785 NVME_CRIT_WARN_ST_READ_ONLY |
786 NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP;
787 if (ctrlr->cdata.ver >= NVME_REV(1, 2))
788 ctrlr->async_event_config |= 0x300;
791 nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
792 0, NULL, 0, nvme_completion_poll_cb, &status);
793 while (!atomic_load_acq_int(&status.done))
795 if (nvme_completion_is_error(&status.cpl) ||
796 (status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
797 (status.cpl.cdw0 & 0xFFFF) == 0x0000) {
798 nvme_printf(ctrlr, "temperature threshold not supported\n");
800 ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE;
802 nvme_ctrlr_cmd_set_async_event_config(ctrlr,
803 ctrlr->async_event_config, NULL, NULL);
805 /* aerl is a zero-based value, so we need to add 1 here. */
806 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
808 for (i = 0; i < ctrlr->num_aers; i++) {
809 aer = &ctrlr->aer[i];
810 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
815 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
818 ctrlr->int_coal_time = 0;
819 TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
820 &ctrlr->int_coal_time);
822 ctrlr->int_coal_threshold = 0;
823 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
824 &ctrlr->int_coal_threshold);
826 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
827 ctrlr->int_coal_threshold, NULL, NULL);
831 nvme_ctrlr_start(void *ctrlr_arg)
833 struct nvme_controller *ctrlr = ctrlr_arg;
834 uint32_t old_num_io_queues;
838 * Only reset adminq here when we are restarting the
839 * controller after a reset. During initialization,
840 * we have already submitted admin commands to get
841 * the number of I/O queues supported, so cannot reset
842 * the adminq again here.
844 if (ctrlr->is_resetting) {
845 nvme_qpair_reset(&ctrlr->adminq);
848 for (i = 0; i < ctrlr->num_io_queues; i++)
849 nvme_qpair_reset(&ctrlr->ioq[i]);
851 nvme_admin_qpair_enable(&ctrlr->adminq);
853 if (nvme_ctrlr_identify(ctrlr) != 0) {
854 nvme_ctrlr_fail(ctrlr);
859 * The number of qpairs are determined during controller initialization,
860 * including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the
861 * HW limit. We call SET_FEATURES again here so that it gets called
862 * after any reset for controllers that depend on the driver to
863 * explicit specify how many queues it will use. This value should
864 * never change between resets, so panic if somehow that does happen.
866 if (ctrlr->is_resetting) {
867 old_num_io_queues = ctrlr->num_io_queues;
868 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
869 nvme_ctrlr_fail(ctrlr);
873 if (old_num_io_queues != ctrlr->num_io_queues) {
874 panic("num_io_queues changed from %u to %u",
875 old_num_io_queues, ctrlr->num_io_queues);
879 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
880 nvme_ctrlr_fail(ctrlr);
884 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
885 nvme_ctrlr_fail(ctrlr);
889 nvme_ctrlr_configure_aer(ctrlr);
890 nvme_ctrlr_configure_int_coalescing(ctrlr);
892 for (i = 0; i < ctrlr->num_io_queues; i++)
893 nvme_io_qpair_enable(&ctrlr->ioq[i]);
897 nvme_ctrlr_start_config_hook(void *arg)
899 struct nvme_controller *ctrlr = arg;
901 nvme_qpair_reset(&ctrlr->adminq);
902 nvme_admin_qpair_enable(&ctrlr->adminq);
904 if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
905 nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
906 nvme_ctrlr_start(ctrlr);
908 nvme_ctrlr_fail(ctrlr);
910 nvme_sysctl_initialize_ctrlr(ctrlr);
911 config_intrhook_disestablish(&ctrlr->config_hook);
913 ctrlr->is_initialized = 1;
914 nvme_notify_new_controller(ctrlr);
918 nvme_ctrlr_reset_task(void *arg, int pending)
920 struct nvme_controller *ctrlr = arg;
923 nvme_printf(ctrlr, "resetting controller\n");
924 status = nvme_ctrlr_hw_reset(ctrlr);
926 * Use pause instead of DELAY, so that we yield to any nvme interrupt
927 * handlers on this CPU that were blocked on a qpair lock. We want
928 * all nvme interrupts completed before proceeding with restarting the
931 * XXX - any way to guarantee the interrupt handlers have quiesced?
933 pause("nvmereset", hz / 10);
935 nvme_ctrlr_start(ctrlr);
937 nvme_ctrlr_fail(ctrlr);
939 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
943 * Poll all the queues enabled on the device for completion.
946 nvme_ctrlr_poll(struct nvme_controller *ctrlr)
950 nvme_qpair_process_completions(&ctrlr->adminq);
952 for (i = 0; i < ctrlr->num_io_queues; i++)
953 if (ctrlr->ioq && ctrlr->ioq[i].cpl)
954 nvme_qpair_process_completions(&ctrlr->ioq[i]);
958 * Poll the single-vector intertrupt case: num_io_queues will be 1 and
959 * there's only a single vector. While we're polling, we mask further
960 * interrupts in the controller.
963 nvme_ctrlr_intx_handler(void *arg)
965 struct nvme_controller *ctrlr = arg;
967 nvme_mmio_write_4(ctrlr, intms, 1);
968 nvme_ctrlr_poll(ctrlr);
969 nvme_mmio_write_4(ctrlr, intmc, 1);
973 nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr)
976 ctrlr->msix_enabled = 0;
977 ctrlr->num_io_queues = 1;
978 ctrlr->num_cpus_per_ioq = mp_ncpus;
980 ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
981 &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
983 if (ctrlr->res == NULL) {
984 nvme_printf(ctrlr, "unable to allocate shared IRQ\n");
988 bus_setup_intr(ctrlr->dev, ctrlr->res,
989 INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler,
992 if (ctrlr->tag == NULL) {
993 nvme_printf(ctrlr, "unable to setup intx handler\n");
1001 nvme_pt_done(void *arg, const struct nvme_completion *cpl)
1003 struct nvme_pt_command *pt = arg;
1004 struct mtx *mtx = pt->driver_lock;
1007 bzero(&pt->cpl, sizeof(pt->cpl));
1008 pt->cpl.cdw0 = cpl->cdw0;
1010 status = cpl->status;
1011 status &= ~NVME_STATUS_P_MASK;
1012 pt->cpl.status = status;
1015 pt->driver_lock = NULL;
1021 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
1022 struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
1025 struct nvme_request *req;
1027 struct buf *buf = NULL;
1029 vm_offset_t addr, end;
1033 * vmapbuf calls vm_fault_quick_hold_pages which only maps full
1034 * pages. Ensure this request has fewer than MAXPHYS bytes when
1035 * extended to full pages.
1037 addr = (vm_offset_t)pt->buf;
1038 end = round_page(addr + pt->len);
1039 addr = trunc_page(addr);
1040 if (end - addr > MAXPHYS)
1043 if (pt->len > ctrlr->max_xfer_size) {
1044 nvme_printf(ctrlr, "pt->len (%d) "
1045 "exceeds max_xfer_size (%d)\n", pt->len,
1046 ctrlr->max_xfer_size);
1049 if (is_user_buffer) {
1051 * Ensure the user buffer is wired for the duration of
1052 * this passthrough command.
1055 buf = getpbuf(NULL);
1056 buf->b_data = pt->buf;
1057 buf->b_bufsize = pt->len;
1058 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
1059 #ifdef NVME_UNMAPPED_BIO_SUPPORT
1060 if (vmapbuf(buf, 1) < 0) {
1062 if (vmapbuf(buf) < 0) {
1067 req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
1070 req = nvme_allocate_request_vaddr(pt->buf, pt->len,
1073 req = nvme_allocate_request_null(nvme_pt_done, pt);
1075 /* Assume userspace already converted to little-endian */
1076 req->cmd.opc_fuse = pt->cmd.opc_fuse;
1077 req->cmd.cdw10 = pt->cmd.cdw10;
1078 req->cmd.cdw11 = pt->cmd.cdw11;
1079 req->cmd.cdw12 = pt->cmd.cdw12;
1080 req->cmd.cdw13 = pt->cmd.cdw13;
1081 req->cmd.cdw14 = pt->cmd.cdw14;
1082 req->cmd.cdw15 = pt->cmd.cdw15;
1084 req->cmd.nsid = htole32(nsid);
1086 mtx = mtx_pool_find(mtxpool_sleep, pt);
1087 pt->driver_lock = mtx;
1090 nvme_ctrlr_submit_admin_request(ctrlr, req);
1092 nvme_ctrlr_submit_io_request(ctrlr, req);
1095 while (pt->driver_lock != NULL)
1096 mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
1109 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
1112 struct nvme_controller *ctrlr;
1113 struct nvme_pt_command *pt;
1115 ctrlr = cdev->si_drv1;
1118 case NVME_RESET_CONTROLLER:
1119 nvme_ctrlr_reset(ctrlr);
1121 case NVME_PASSTHROUGH_CMD:
1122 pt = (struct nvme_pt_command *)arg;
1123 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid),
1124 1 /* is_user_buffer */, 1 /* is_admin_cmd */));
1132 static struct cdevsw nvme_ctrlr_cdevsw = {
1133 .d_version = D_VERSION,
1135 .d_ioctl = nvme_ctrlr_ioctl
1139 nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr)
1142 int per_cpu_io_queues;
1143 int min_cpus_per_ioq;
1144 int num_vectors_requested, num_vectors_allocated;
1145 int num_vectors_available;
1148 min_cpus_per_ioq = 1;
1149 TUNABLE_INT_FETCH("hw.nvme.min_cpus_per_ioq", &min_cpus_per_ioq);
1151 if (min_cpus_per_ioq < 1) {
1152 min_cpus_per_ioq = 1;
1153 } else if (min_cpus_per_ioq > mp_ncpus) {
1154 min_cpus_per_ioq = mp_ncpus;
1157 per_cpu_io_queues = 1;
1158 TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
1160 if (per_cpu_io_queues == 0) {
1161 min_cpus_per_ioq = mp_ncpus;
1164 ctrlr->force_intx = 0;
1165 TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx);
1168 * FreeBSD currently cannot allocate more than about 190 vectors at
1169 * boot, meaning that systems with high core count and many devices
1170 * requesting per-CPU interrupt vectors will not get their full
1171 * allotment. So first, try to allocate as many as we may need to
1172 * understand what is available, then immediately release them.
1173 * Then figure out how many of those we will actually use, based on
1174 * assigning an equal number of cores to each I/O queue.
1177 /* One vector for per core I/O queue, plus one vector for admin queue. */
1178 num_vectors_available = min(pci_msix_count(dev), mp_ncpus + 1);
1179 if (pci_alloc_msix(dev, &num_vectors_available) != 0) {
1180 num_vectors_available = 0;
1182 pci_release_msi(dev);
1184 if (ctrlr->force_intx || num_vectors_available < 2) {
1185 nvme_ctrlr_configure_intx(ctrlr);
1190 * Do not use all vectors for I/O queues - one must be saved for the
1193 ctrlr->num_cpus_per_ioq = max(min_cpus_per_ioq,
1194 howmany(mp_ncpus, num_vectors_available - 1));
1196 ctrlr->num_io_queues = howmany(mp_ncpus, ctrlr->num_cpus_per_ioq);
1197 num_vectors_requested = ctrlr->num_io_queues + 1;
1198 num_vectors_allocated = num_vectors_requested;
1201 * Now just allocate the number of vectors we need. This should
1202 * succeed, since we previously called pci_alloc_msix()
1203 * successfully returning at least this many vectors, but just to
1204 * be safe, if something goes wrong just revert to INTx.
1206 if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) {
1207 nvme_ctrlr_configure_intx(ctrlr);
1211 if (num_vectors_allocated < num_vectors_requested) {
1212 pci_release_msi(dev);
1213 nvme_ctrlr_configure_intx(ctrlr);
1217 ctrlr->msix_enabled = 1;
1221 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1223 struct make_dev_args md_args;
1229 int status, timeout_period;
1233 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1235 status = nvme_ctrlr_allocate_bar(ctrlr);
1241 * Software emulators may set the doorbell stride to something
1242 * other than zero, but this driver is not set up to handle that.
1244 cap_hi = nvme_mmio_read_4(ctrlr, cap_hi);
1245 dstrd = (cap_hi >> NVME_CAP_HI_REG_DSTRD_SHIFT) & NVME_CAP_HI_REG_DSTRD_MASK;
1249 mpsmin = (cap_hi >> NVME_CAP_HI_REG_MPSMIN_SHIFT) & NVME_CAP_HI_REG_MPSMIN_MASK;
1250 ctrlr->min_page_size = 1 << (12 + mpsmin);
1252 /* Get ready timeout value from controller, in units of 500ms. */
1253 cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
1254 to = (cap_lo >> NVME_CAP_LO_REG_TO_SHIFT) & NVME_CAP_LO_REG_TO_MASK;
1255 ctrlr->ready_timeout_in_ms = to * 500;
1257 timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
1258 TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
1259 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1260 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1261 ctrlr->timeout_period = timeout_period;
1263 nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
1264 TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
1266 ctrlr->enable_aborts = 0;
1267 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1269 nvme_ctrlr_setup_interrupts(ctrlr);
1271 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
1272 if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0)
1275 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1276 taskqueue_thread_enqueue, &ctrlr->taskqueue);
1277 taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq");
1279 ctrlr->is_resetting = 0;
1280 ctrlr->is_initialized = 0;
1281 ctrlr->notification_sent = 0;
1282 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1283 TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
1284 STAILQ_INIT(&ctrlr->fail_req);
1285 ctrlr->is_failed = FALSE;
1287 make_dev_args_init(&md_args);
1288 md_args.mda_devsw = &nvme_ctrlr_cdevsw;
1289 md_args.mda_uid = UID_ROOT;
1290 md_args.mda_gid = GID_WHEEL;
1291 md_args.mda_mode = 0600;
1292 md_args.mda_unit = device_get_unit(dev);
1293 md_args.mda_si_drv1 = (void *)ctrlr;
1294 status = make_dev_s(&md_args, &ctrlr->cdev, "nvme%d",
1295 device_get_unit(dev));
1303 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1307 if (ctrlr->resource == NULL)
1310 nvme_notify_fail_consumers(ctrlr);
1312 for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1313 nvme_ns_destruct(&ctrlr->ns[i]);
1316 destroy_dev(ctrlr->cdev);
1318 for (i = 0; i < ctrlr->num_io_queues; i++) {
1319 nvme_ctrlr_destroy_qpair(ctrlr, &ctrlr->ioq[i]);
1320 nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1322 free(ctrlr->ioq, M_NVME);
1324 nvme_admin_qpair_destroy(&ctrlr->adminq);
1327 * Notify the controller of a shutdown, even though this is due to
1328 * a driver unload, not a system shutdown (this path is not invoked
1329 * during shutdown). This ensures the controller receives a
1330 * shutdown notification in case the system is shutdown before
1331 * reloading the driver.
1333 nvme_ctrlr_shutdown(ctrlr);
1335 nvme_ctrlr_disable(ctrlr);
1337 if (ctrlr->taskqueue)
1338 taskqueue_free(ctrlr->taskqueue);
1341 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1344 bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1345 rman_get_rid(ctrlr->res), ctrlr->res);
1347 if (ctrlr->msix_enabled)
1348 pci_release_msi(dev);
1350 if (ctrlr->bar4_resource != NULL) {
1351 bus_release_resource(dev, SYS_RES_MEMORY,
1352 ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1355 bus_release_resource(dev, SYS_RES_MEMORY,
1356 ctrlr->resource_id, ctrlr->resource);
1359 mtx_destroy(&ctrlr->lock);
1363 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
1369 cc = nvme_mmio_read_4(ctrlr, cc);
1370 cc &= ~(NVME_CC_REG_SHN_MASK << NVME_CC_REG_SHN_SHIFT);
1371 cc |= NVME_SHN_NORMAL << NVME_CC_REG_SHN_SHIFT;
1372 nvme_mmio_write_4(ctrlr, cc, cc);
1374 csts = nvme_mmio_read_4(ctrlr, csts);
1375 while ((NVME_CSTS_GET_SHST(csts) != NVME_SHST_COMPLETE) && (ticks++ < 5*hz)) {
1376 pause("nvme shn", 1);
1377 csts = nvme_mmio_read_4(ctrlr, csts);
1379 if (NVME_CSTS_GET_SHST(csts) != NVME_SHST_COMPLETE)
1380 nvme_printf(ctrlr, "did not complete shutdown within 5 seconds "
1381 "of notification\n");
1385 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1386 struct nvme_request *req)
1389 nvme_qpair_submit_request(&ctrlr->adminq, req);
1393 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1394 struct nvme_request *req)
1396 struct nvme_qpair *qpair;
1398 qpair = &ctrlr->ioq[curcpu / ctrlr->num_cpus_per_ioq];
1399 nvme_qpair_submit_request(qpair, req);
1403 nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1406 return (ctrlr->dev);
1409 const struct nvme_controller_data *
1410 nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1413 return (&ctrlr->cdata);