2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2012-2016 Intel Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
39 #include <sys/ioccom.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
47 #include "nvme_private.h"
49 #define B4_CHK_RDY_DELAY_MS 2300 /* work arond controller bug */
51 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
52 struct nvme_async_event_request *aer);
53 static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr);
56 nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
59 ctrlr->resource_id = PCIR_BAR(0);
61 ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
62 &ctrlr->resource_id, RF_ACTIVE);
64 if(ctrlr->resource == NULL) {
65 nvme_printf(ctrlr, "unable to allocate pci resource\n");
69 ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
70 ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
71 ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
74 * The NVMe spec allows for the MSI-X table to be placed behind
75 * BAR 4/5, separate from the control/doorbell registers. Always
76 * try to map this bar, because it must be mapped prior to calling
77 * pci_alloc_msix(). If the table isn't behind BAR 4/5,
78 * bus_alloc_resource() will just return NULL which is OK.
80 ctrlr->bar4_resource_id = PCIR_BAR(4);
81 ctrlr->bar4_resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
82 &ctrlr->bar4_resource_id, RF_ACTIVE);
88 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
90 struct nvme_qpair *qpair;
94 qpair = &ctrlr->adminq;
96 num_entries = NVME_ADMIN_ENTRIES;
97 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
99 * If admin_entries was overridden to an invalid value, revert it
100 * back to our default value.
102 if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
103 num_entries > NVME_MAX_ADMIN_ENTRIES) {
104 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
105 "specified\n", num_entries);
106 num_entries = NVME_ADMIN_ENTRIES;
110 * The admin queue's max xfer size is treated differently than the
111 * max I/O xfer size. 16KB is sufficient here - maybe even less?
113 error = nvme_qpair_construct(qpair,
123 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
125 struct nvme_qpair *qpair;
126 union cap_lo_register cap_lo;
127 int i, error, num_entries, num_trackers;
129 num_entries = NVME_IO_ENTRIES;
130 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
133 * NVMe spec sets a hard limit of 64K max entries, but
134 * devices may specify a smaller limit, so we need to check
135 * the MQES field in the capabilities register.
137 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
138 num_entries = min(num_entries, cap_lo.bits.mqes+1);
140 num_trackers = NVME_IO_TRACKERS;
141 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
143 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
144 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
146 * No need to have more trackers than entries in the submit queue.
147 * Note also that for a queue size of N, we can only have (N-1)
148 * commands outstanding, hence the "-1" here.
150 num_trackers = min(num_trackers, (num_entries-1));
153 * Our best estimate for the maximum number of I/Os that we should
154 * noramlly have in flight at one time. This should be viewed as a hint,
155 * not a hard limit and will need to be revisitted when the upper layers
156 * of the storage system grows multi-queue support.
158 ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4;
161 * This was calculated previously when setting up interrupts, but
162 * a controller could theoretically support fewer I/O queues than
163 * MSI-X vectors. So calculate again here just to be safe.
165 ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues);
167 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
168 M_NVME, M_ZERO | M_WAITOK);
170 for (i = 0; i < ctrlr->num_io_queues; i++) {
171 qpair = &ctrlr->ioq[i];
174 * Admin queue has ID=0. IO queues start at ID=1 -
175 * hence the 'i+1' here.
177 * For I/O queues, use the controller-wide max_xfer_size
178 * calculated in nvme_attach().
180 error = nvme_qpair_construct(qpair,
182 ctrlr->msix_enabled ? i+1 : 0, /* vector */
190 * Do not bother binding interrupts if we only have one I/O
191 * interrupt thread for this controller.
193 if (ctrlr->num_io_queues > 1)
194 bus_bind_intr(ctrlr->dev, qpair->res,
195 i * ctrlr->num_cpus_per_ioq);
202 nvme_ctrlr_fail(struct nvme_controller *ctrlr)
206 ctrlr->is_failed = TRUE;
207 nvme_qpair_fail(&ctrlr->adminq);
208 if (ctrlr->ioq != NULL) {
209 for (i = 0; i < ctrlr->num_io_queues; i++)
210 nvme_qpair_fail(&ctrlr->ioq[i]);
212 nvme_notify_fail_consumers(ctrlr);
216 nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
217 struct nvme_request *req)
220 mtx_lock(&ctrlr->lock);
221 STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
222 mtx_unlock(&ctrlr->lock);
223 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
227 nvme_ctrlr_fail_req_task(void *arg, int pending)
229 struct nvme_controller *ctrlr = arg;
230 struct nvme_request *req;
232 mtx_lock(&ctrlr->lock);
233 while (!STAILQ_EMPTY(&ctrlr->fail_req)) {
234 req = STAILQ_FIRST(&ctrlr->fail_req);
235 STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
236 nvme_qpair_manual_complete_request(req->qpair, req,
237 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
239 mtx_unlock(&ctrlr->lock);
243 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
246 union csts_register csts;
248 csts.raw = nvme_mmio_read_4(ctrlr, csts);
251 while (csts.bits.rdy != desired_val) {
252 if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
253 nvme_printf(ctrlr, "controller ready did not become %d "
254 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
258 csts.raw = nvme_mmio_read_4(ctrlr, csts);
265 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
267 union cc_register cc;
268 union csts_register csts;
271 cc.raw = nvme_mmio_read_4(ctrlr, cc);
272 csts.raw = nvme_mmio_read_4(ctrlr, csts);
275 * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1
276 * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when
277 * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY
278 * isn't the desired value. Short circuit if we're already disabled.
280 if (cc.bits.en == 1) {
281 if (csts.bits.rdy == 0) {
282 /* EN == 1, wait for RDY == 1 or fail */
283 err = nvme_ctrlr_wait_for_ready(ctrlr, 1);
288 /* EN == 0 already wait for RDY == 0 */
289 if (csts.bits.rdy == 0)
292 return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
296 nvme_mmio_write_4(ctrlr, cc, cc.raw);
298 * Some drives have issues with accessing the mmio after we
299 * disable, so delay for a bit after we write the bit to
300 * cope with these issues.
302 if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY)
303 pause("nvmeR", B4_CHK_RDY_DELAY_MS * hz / 1000);
304 return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
308 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
310 union cc_register cc;
311 union csts_register csts;
312 union aqa_register aqa;
315 cc.raw = nvme_mmio_read_4(ctrlr, cc);
316 csts.raw = nvme_mmio_read_4(ctrlr, csts);
319 * See note in nvme_ctrlr_disable. Short circuit if we're already enabled.
321 if (cc.bits.en == 1) {
322 if (csts.bits.rdy == 1)
325 return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
327 /* EN == 0 already wait for RDY == 0 or fail */
328 err = nvme_ctrlr_wait_for_ready(ctrlr, 0);
333 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
335 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
339 /* acqs and asqs are 0-based. */
340 aqa.bits.acqs = ctrlr->adminq.num_entries-1;
341 aqa.bits.asqs = ctrlr->adminq.num_entries-1;
342 nvme_mmio_write_4(ctrlr, aqa, aqa.raw);
349 cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
350 cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
352 /* This evaluates to 0, which is according to spec. */
353 cc.bits.mps = (PAGE_SIZE >> 13);
355 nvme_mmio_write_4(ctrlr, cc, cc.raw);
357 return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
361 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
365 nvme_admin_qpair_disable(&ctrlr->adminq);
367 * I/O queues are not allocated before the initial HW
368 * reset, so do not try to disable them. Use is_initialized
369 * to determine if this is the initial HW reset.
371 if (ctrlr->is_initialized) {
372 for (i = 0; i < ctrlr->num_io_queues; i++)
373 nvme_io_qpair_disable(&ctrlr->ioq[i]);
378 err = nvme_ctrlr_disable(ctrlr);
381 return (nvme_ctrlr_enable(ctrlr));
385 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
389 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
391 if (cmpset == 0 || ctrlr->is_failed)
393 * Controller is already resetting or has failed. Return
394 * immediately since there is no need to kick off another
395 * reset in these cases.
399 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
403 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
405 struct nvme_completion_poll_status status;
408 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
409 nvme_completion_poll_cb, &status);
410 while (!atomic_load_acq_int(&status.done))
412 if (nvme_completion_is_error(&status.cpl)) {
413 nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
418 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
419 * controller supports.
421 if (ctrlr->cdata.mdts > 0)
422 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
423 ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
429 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
431 struct nvme_completion_poll_status status;
432 int cq_allocated, sq_allocated;
435 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
436 nvme_completion_poll_cb, &status);
437 while (!atomic_load_acq_int(&status.done))
439 if (nvme_completion_is_error(&status.cpl)) {
440 nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n");
445 * Data in cdw0 is 0-based.
446 * Lower 16-bits indicate number of submission queues allocated.
447 * Upper 16-bits indicate number of completion queues allocated.
449 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
450 cq_allocated = (status.cpl.cdw0 >> 16) + 1;
453 * Controller may allocate more queues than we requested,
454 * so use the minimum of the number requested and what was
455 * actually allocated.
457 ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
458 ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
464 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
466 struct nvme_completion_poll_status status;
467 struct nvme_qpair *qpair;
470 for (i = 0; i < ctrlr->num_io_queues; i++) {
471 qpair = &ctrlr->ioq[i];
474 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector,
475 nvme_completion_poll_cb, &status);
476 while (!atomic_load_acq_int(&status.done))
478 if (nvme_completion_is_error(&status.cpl)) {
479 nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
484 nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
485 nvme_completion_poll_cb, &status);
486 while (!atomic_load_acq_int(&status.done))
488 if (nvme_completion_is_error(&status.cpl)) {
489 nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
498 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
500 struct nvme_namespace *ns;
503 for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) {
505 nvme_ns_construct(ns, i+1, ctrlr);
512 is_log_page_id_valid(uint8_t page_id)
517 case NVME_LOG_HEALTH_INFORMATION:
518 case NVME_LOG_FIRMWARE_SLOT:
526 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
528 uint32_t log_page_size;
533 sizeof(struct nvme_error_information_entry) *
535 NVME_MAX_AER_LOG_SIZE);
537 case NVME_LOG_HEALTH_INFORMATION:
538 log_page_size = sizeof(struct nvme_health_information_page);
540 case NVME_LOG_FIRMWARE_SLOT:
541 log_page_size = sizeof(struct nvme_firmware_page);
548 return (log_page_size);
552 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
553 union nvme_critical_warning_state state)
556 if (state.bits.available_spare == 1)
557 nvme_printf(ctrlr, "available spare space below threshold\n");
559 if (state.bits.temperature == 1)
560 nvme_printf(ctrlr, "temperature above threshold\n");
562 if (state.bits.device_reliability == 1)
563 nvme_printf(ctrlr, "device reliability degraded\n");
565 if (state.bits.read_only == 1)
566 nvme_printf(ctrlr, "media placed in read only mode\n");
568 if (state.bits.volatile_memory_backup == 1)
569 nvme_printf(ctrlr, "volatile memory backup device failed\n");
571 if (state.bits.reserved != 0)
573 "unknown critical warning(s): state = 0x%02x\n", state.raw);
577 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
579 struct nvme_async_event_request *aer = arg;
580 struct nvme_health_information_page *health_info;
583 * If the log page fetch for some reason completed with an error,
584 * don't pass log page data to the consumers. In practice, this case
585 * should never happen.
587 if (nvme_completion_is_error(cpl))
588 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
589 aer->log_page_id, NULL, 0);
591 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
592 health_info = (struct nvme_health_information_page *)
593 aer->log_page_buffer;
594 nvme_ctrlr_log_critical_warnings(aer->ctrlr,
595 health_info->critical_warning);
597 * Critical warnings reported through the
598 * SMART/health log page are persistent, so
599 * clear the associated bits in the async event
600 * config so that we do not receive repeated
601 * notifications for the same event.
603 aer->ctrlr->async_event_config.raw &=
604 ~health_info->critical_warning.raw;
605 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
606 aer->ctrlr->async_event_config, NULL, NULL);
611 * Pass the cpl data from the original async event completion,
612 * not the log page fetch.
614 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
615 aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
619 * Repost another asynchronous event request to replace the one
620 * that just completed.
622 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
626 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
628 struct nvme_async_event_request *aer = arg;
630 if (nvme_completion_is_error(cpl)) {
632 * Do not retry failed async event requests. This avoids
633 * infinite loops where a new async event request is submitted
634 * to replace the one just failed, only to fail again and
635 * perpetuate the loop.
640 /* Associated log page is in bits 23:16 of completion entry dw0. */
641 aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
643 nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n",
646 if (is_log_page_id_valid(aer->log_page_id)) {
647 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
649 memcpy(&aer->cpl, cpl, sizeof(*cpl));
650 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
651 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
652 aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
654 /* Wait to notify consumers until after log page is fetched. */
656 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
660 * Repost another asynchronous event request to replace the one
661 * that just completed.
663 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
668 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
669 struct nvme_async_event_request *aer)
671 struct nvme_request *req;
674 req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
678 * Disable timeout here, since asynchronous event requests should by
679 * nature never be timed out.
681 req->timeout = FALSE;
682 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
683 nvme_ctrlr_submit_admin_request(ctrlr, req);
687 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
689 struct nvme_completion_poll_status status;
690 struct nvme_async_event_request *aer;
693 ctrlr->async_event_config.raw = 0xFF;
694 ctrlr->async_event_config.bits.reserved = 0;
697 nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
698 0, NULL, 0, nvme_completion_poll_cb, &status);
699 while (!atomic_load_acq_int(&status.done))
701 if (nvme_completion_is_error(&status.cpl) ||
702 (status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
703 (status.cpl.cdw0 & 0xFFFF) == 0x0000) {
704 nvme_printf(ctrlr, "temperature threshold not supported\n");
705 ctrlr->async_event_config.bits.temperature = 0;
708 nvme_ctrlr_cmd_set_async_event_config(ctrlr,
709 ctrlr->async_event_config, NULL, NULL);
711 /* aerl is a zero-based value, so we need to add 1 here. */
712 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
714 for (i = 0; i < ctrlr->num_aers; i++) {
715 aer = &ctrlr->aer[i];
716 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
721 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
724 ctrlr->int_coal_time = 0;
725 TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
726 &ctrlr->int_coal_time);
728 ctrlr->int_coal_threshold = 0;
729 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
730 &ctrlr->int_coal_threshold);
732 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
733 ctrlr->int_coal_threshold, NULL, NULL);
737 nvme_ctrlr_start(void *ctrlr_arg)
739 struct nvme_controller *ctrlr = ctrlr_arg;
740 uint32_t old_num_io_queues;
744 * Only reset adminq here when we are restarting the
745 * controller after a reset. During initialization,
746 * we have already submitted admin commands to get
747 * the number of I/O queues supported, so cannot reset
748 * the adminq again here.
750 if (ctrlr->is_resetting) {
751 nvme_qpair_reset(&ctrlr->adminq);
754 for (i = 0; i < ctrlr->num_io_queues; i++)
755 nvme_qpair_reset(&ctrlr->ioq[i]);
757 nvme_admin_qpair_enable(&ctrlr->adminq);
759 if (nvme_ctrlr_identify(ctrlr) != 0) {
760 nvme_ctrlr_fail(ctrlr);
765 * The number of qpairs are determined during controller initialization,
766 * including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the
767 * HW limit. We call SET_FEATURES again here so that it gets called
768 * after any reset for controllers that depend on the driver to
769 * explicit specify how many queues it will use. This value should
770 * never change between resets, so panic if somehow that does happen.
772 if (ctrlr->is_resetting) {
773 old_num_io_queues = ctrlr->num_io_queues;
774 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
775 nvme_ctrlr_fail(ctrlr);
779 if (old_num_io_queues != ctrlr->num_io_queues) {
780 panic("num_io_queues changed from %u to %u",
781 old_num_io_queues, ctrlr->num_io_queues);
785 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
786 nvme_ctrlr_fail(ctrlr);
790 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
791 nvme_ctrlr_fail(ctrlr);
795 nvme_ctrlr_configure_aer(ctrlr);
796 nvme_ctrlr_configure_int_coalescing(ctrlr);
798 for (i = 0; i < ctrlr->num_io_queues; i++)
799 nvme_io_qpair_enable(&ctrlr->ioq[i]);
803 nvme_ctrlr_start_config_hook(void *arg)
805 struct nvme_controller *ctrlr = arg;
807 nvme_qpair_reset(&ctrlr->adminq);
808 nvme_admin_qpair_enable(&ctrlr->adminq);
810 if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
811 nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
812 nvme_ctrlr_start(ctrlr);
814 nvme_ctrlr_fail(ctrlr);
816 nvme_sysctl_initialize_ctrlr(ctrlr);
817 config_intrhook_disestablish(&ctrlr->config_hook);
819 ctrlr->is_initialized = 1;
820 nvme_notify_new_controller(ctrlr);
824 nvme_ctrlr_reset_task(void *arg, int pending)
826 struct nvme_controller *ctrlr = arg;
829 nvme_printf(ctrlr, "resetting controller\n");
830 status = nvme_ctrlr_hw_reset(ctrlr);
832 * Use pause instead of DELAY, so that we yield to any nvme interrupt
833 * handlers on this CPU that were blocked on a qpair lock. We want
834 * all nvme interrupts completed before proceeding with restarting the
837 * XXX - any way to guarantee the interrupt handlers have quiesced?
839 pause("nvmereset", hz / 10);
841 nvme_ctrlr_start(ctrlr);
843 nvme_ctrlr_fail(ctrlr);
845 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
849 * Poll all the queues enabled on the device for completion.
852 nvme_ctrlr_poll(struct nvme_controller *ctrlr)
856 nvme_qpair_process_completions(&ctrlr->adminq);
858 for (i = 0; i < ctrlr->num_io_queues; i++)
859 if (ctrlr->ioq && ctrlr->ioq[i].cpl)
860 nvme_qpair_process_completions(&ctrlr->ioq[i]);
864 * Poll the single-vector intertrupt case: num_io_queues will be 1 and
865 * there's only a single vector. While we're polling, we mask further
866 * interrupts in the controller.
869 nvme_ctrlr_intx_handler(void *arg)
871 struct nvme_controller *ctrlr = arg;
873 nvme_mmio_write_4(ctrlr, intms, 1);
874 nvme_ctrlr_poll(ctrlr);
875 nvme_mmio_write_4(ctrlr, intmc, 1);
879 nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr)
882 ctrlr->msix_enabled = 0;
883 ctrlr->num_io_queues = 1;
884 ctrlr->num_cpus_per_ioq = mp_ncpus;
886 ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
887 &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
889 if (ctrlr->res == NULL) {
890 nvme_printf(ctrlr, "unable to allocate shared IRQ\n");
894 bus_setup_intr(ctrlr->dev, ctrlr->res,
895 INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler,
898 if (ctrlr->tag == NULL) {
899 nvme_printf(ctrlr, "unable to setup intx handler\n");
907 nvme_pt_done(void *arg, const struct nvme_completion *cpl)
909 struct nvme_pt_command *pt = arg;
911 bzero(&pt->cpl, sizeof(pt->cpl));
912 pt->cpl.cdw0 = cpl->cdw0;
913 pt->cpl.status = cpl->status;
914 pt->cpl.status.p = 0;
916 mtx_lock(pt->driver_lock);
918 mtx_unlock(pt->driver_lock);
922 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
923 struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
926 struct nvme_request *req;
928 struct buf *buf = NULL;
930 vm_offset_t addr, end;
934 * vmapbuf calls vm_fault_quick_hold_pages which only maps full
935 * pages. Ensure this request has fewer than MAXPHYS bytes when
936 * extended to full pages.
938 addr = (vm_offset_t)pt->buf;
939 end = round_page(addr + pt->len);
940 addr = trunc_page(addr);
941 if (end - addr > MAXPHYS)
944 if (pt->len > ctrlr->max_xfer_size) {
945 nvme_printf(ctrlr, "pt->len (%d) "
946 "exceeds max_xfer_size (%d)\n", pt->len,
947 ctrlr->max_xfer_size);
950 if (is_user_buffer) {
952 * Ensure the user buffer is wired for the duration of
953 * this passthrough command.
957 buf->b_data = pt->buf;
958 buf->b_bufsize = pt->len;
959 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
960 #ifdef NVME_UNMAPPED_BIO_SUPPORT
961 if (vmapbuf(buf, 1) < 0) {
963 if (vmapbuf(buf) < 0) {
968 req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
971 req = nvme_allocate_request_vaddr(pt->buf, pt->len,
974 req = nvme_allocate_request_null(nvme_pt_done, pt);
976 req->cmd.opc = pt->cmd.opc;
977 req->cmd.cdw10 = pt->cmd.cdw10;
978 req->cmd.cdw11 = pt->cmd.cdw11;
979 req->cmd.cdw12 = pt->cmd.cdw12;
980 req->cmd.cdw13 = pt->cmd.cdw13;
981 req->cmd.cdw14 = pt->cmd.cdw14;
982 req->cmd.cdw15 = pt->cmd.cdw15;
984 req->cmd.nsid = nsid;
989 mtx = &ctrlr->ns[nsid-1].lock;
992 pt->driver_lock = mtx;
995 nvme_ctrlr_submit_admin_request(ctrlr, req);
997 nvme_ctrlr_submit_io_request(ctrlr, req);
999 mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
1002 pt->driver_lock = NULL;
1014 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
1017 struct nvme_controller *ctrlr;
1018 struct nvme_pt_command *pt;
1020 ctrlr = cdev->si_drv1;
1023 case NVME_RESET_CONTROLLER:
1024 nvme_ctrlr_reset(ctrlr);
1026 case NVME_PASSTHROUGH_CMD:
1027 pt = (struct nvme_pt_command *)arg;
1028 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, pt->cmd.nsid,
1029 1 /* is_user_buffer */, 1 /* is_admin_cmd */));
1037 static struct cdevsw nvme_ctrlr_cdevsw = {
1038 .d_version = D_VERSION,
1040 .d_ioctl = nvme_ctrlr_ioctl
1044 nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr)
1047 int per_cpu_io_queues;
1048 int min_cpus_per_ioq;
1049 int num_vectors_requested, num_vectors_allocated;
1050 int num_vectors_available;
1053 min_cpus_per_ioq = 1;
1054 TUNABLE_INT_FETCH("hw.nvme.min_cpus_per_ioq", &min_cpus_per_ioq);
1056 if (min_cpus_per_ioq < 1) {
1057 min_cpus_per_ioq = 1;
1058 } else if (min_cpus_per_ioq > mp_ncpus) {
1059 min_cpus_per_ioq = mp_ncpus;
1062 per_cpu_io_queues = 1;
1063 TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
1065 if (per_cpu_io_queues == 0) {
1066 min_cpus_per_ioq = mp_ncpus;
1069 ctrlr->force_intx = 0;
1070 TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx);
1073 * FreeBSD currently cannot allocate more than about 190 vectors at
1074 * boot, meaning that systems with high core count and many devices
1075 * requesting per-CPU interrupt vectors will not get their full
1076 * allotment. So first, try to allocate as many as we may need to
1077 * understand what is available, then immediately release them.
1078 * Then figure out how many of those we will actually use, based on
1079 * assigning an equal number of cores to each I/O queue.
1082 /* One vector for per core I/O queue, plus one vector for admin queue. */
1083 num_vectors_available = min(pci_msix_count(dev), mp_ncpus + 1);
1084 if (pci_alloc_msix(dev, &num_vectors_available) != 0) {
1085 num_vectors_available = 0;
1087 pci_release_msi(dev);
1089 if (ctrlr->force_intx || num_vectors_available < 2) {
1090 nvme_ctrlr_configure_intx(ctrlr);
1095 * Do not use all vectors for I/O queues - one must be saved for the
1098 ctrlr->num_cpus_per_ioq = max(min_cpus_per_ioq,
1099 howmany(mp_ncpus, num_vectors_available - 1));
1101 ctrlr->num_io_queues = howmany(mp_ncpus, ctrlr->num_cpus_per_ioq);
1102 num_vectors_requested = ctrlr->num_io_queues + 1;
1103 num_vectors_allocated = num_vectors_requested;
1106 * Now just allocate the number of vectors we need. This should
1107 * succeed, since we previously called pci_alloc_msix()
1108 * successfully returning at least this many vectors, but just to
1109 * be safe, if something goes wrong just revert to INTx.
1111 if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) {
1112 nvme_ctrlr_configure_intx(ctrlr);
1116 if (num_vectors_allocated < num_vectors_requested) {
1117 pci_release_msi(dev);
1118 nvme_ctrlr_configure_intx(ctrlr);
1122 ctrlr->msix_enabled = 1;
1126 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1128 union cap_lo_register cap_lo;
1129 union cap_hi_register cap_hi;
1130 int status, timeout_period;
1134 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1136 status = nvme_ctrlr_allocate_bar(ctrlr);
1142 * Software emulators may set the doorbell stride to something
1143 * other than zero, but this driver is not set up to handle that.
1145 cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi);
1146 if (cap_hi.bits.dstrd != 0)
1149 ctrlr->min_page_size = 1 << (12 + cap_hi.bits.mpsmin);
1151 /* Get ready timeout value from controller, in units of 500ms. */
1152 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
1153 ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500;
1155 timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
1156 TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
1157 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1158 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1159 ctrlr->timeout_period = timeout_period;
1161 nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
1162 TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
1164 ctrlr->enable_aborts = 0;
1165 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1167 nvme_ctrlr_setup_interrupts(ctrlr);
1169 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
1170 if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0)
1173 ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, device_get_unit(dev),
1174 UID_ROOT, GID_WHEEL, 0600, "nvme%d", device_get_unit(dev));
1176 if (ctrlr->cdev == NULL)
1179 ctrlr->cdev->si_drv1 = (void *)ctrlr;
1181 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1182 taskqueue_thread_enqueue, &ctrlr->taskqueue);
1183 taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq");
1185 ctrlr->is_resetting = 0;
1186 ctrlr->is_initialized = 0;
1187 ctrlr->notification_sent = 0;
1188 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1190 TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
1191 STAILQ_INIT(&ctrlr->fail_req);
1192 ctrlr->is_failed = FALSE;
1198 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1203 * Notify the controller of a shutdown, even though this is due to
1204 * a driver unload, not a system shutdown (this path is not invoked
1205 * during shutdown). This ensures the controller receives a
1206 * shutdown notification in case the system is shutdown before
1207 * reloading the driver.
1209 nvme_ctrlr_shutdown(ctrlr);
1211 nvme_ctrlr_disable(ctrlr);
1212 taskqueue_free(ctrlr->taskqueue);
1214 for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1215 nvme_ns_destruct(&ctrlr->ns[i]);
1218 destroy_dev(ctrlr->cdev);
1220 for (i = 0; i < ctrlr->num_io_queues; i++) {
1221 nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1224 free(ctrlr->ioq, M_NVME);
1226 nvme_admin_qpair_destroy(&ctrlr->adminq);
1228 if (ctrlr->resource != NULL) {
1229 bus_release_resource(dev, SYS_RES_MEMORY,
1230 ctrlr->resource_id, ctrlr->resource);
1233 if (ctrlr->bar4_resource != NULL) {
1234 bus_release_resource(dev, SYS_RES_MEMORY,
1235 ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1239 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1242 bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1243 rman_get_rid(ctrlr->res), ctrlr->res);
1245 if (ctrlr->msix_enabled)
1246 pci_release_msi(dev);
1250 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
1252 union cc_register cc;
1253 union csts_register csts;
1256 cc.raw = nvme_mmio_read_4(ctrlr, cc);
1257 cc.bits.shn = NVME_SHN_NORMAL;
1258 nvme_mmio_write_4(ctrlr, cc, cc.raw);
1259 csts.raw = nvme_mmio_read_4(ctrlr, csts);
1260 while ((csts.bits.shst != NVME_SHST_COMPLETE) && (ticks++ < 5*hz)) {
1261 pause("nvme shn", 1);
1262 csts.raw = nvme_mmio_read_4(ctrlr, csts);
1264 if (csts.bits.shst != NVME_SHST_COMPLETE)
1265 nvme_printf(ctrlr, "did not complete shutdown within 5 seconds "
1266 "of notification\n");
1270 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1271 struct nvme_request *req)
1274 nvme_qpair_submit_request(&ctrlr->adminq, req);
1278 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1279 struct nvme_request *req)
1281 struct nvme_qpair *qpair;
1283 qpair = &ctrlr->ioq[curcpu / ctrlr->num_cpus_per_ioq];
1284 nvme_qpair_submit_request(qpair, req);
1288 nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1291 return (ctrlr->dev);
1294 const struct nvme_controller_data *
1295 nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1298 return (&ctrlr->cdata);