2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2012-2016 Intel Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
39 #include <sys/ioccom.h>
43 #include <sys/endian.h>
45 #include "nvme_private.h"
47 #define B4_CHK_RDY_DELAY_MS 2300 /* work around controller bug */
49 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
50 struct nvme_async_event_request *aer);
53 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
55 struct nvme_qpair *qpair;
59 qpair = &ctrlr->adminq;
61 num_entries = NVME_ADMIN_ENTRIES;
62 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
64 * If admin_entries was overridden to an invalid value, revert it
65 * back to our default value.
67 if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
68 num_entries > NVME_MAX_ADMIN_ENTRIES) {
69 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
70 "specified\n", num_entries);
71 num_entries = NVME_ADMIN_ENTRIES;
75 * The admin queue's max xfer size is treated differently than the
76 * max I/O xfer size. 16KB is sufficient here - maybe even less?
78 error = nvme_qpair_construct(qpair,
88 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
90 struct nvme_qpair *qpair;
93 int i, error, num_entries, num_trackers;
95 num_entries = NVME_IO_ENTRIES;
96 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
99 * NVMe spec sets a hard limit of 64K max entries, but
100 * devices may specify a smaller limit, so we need to check
101 * the MQES field in the capabilities register.
103 cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
104 mqes = NVME_CAP_LO_MQES(cap_lo);
105 num_entries = min(num_entries, mqes + 1);
107 num_trackers = NVME_IO_TRACKERS;
108 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
110 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
111 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
113 * No need to have more trackers than entries in the submit queue.
114 * Note also that for a queue size of N, we can only have (N-1)
115 * commands outstanding, hence the "-1" here.
117 num_trackers = min(num_trackers, (num_entries-1));
120 * Our best estimate for the maximum number of I/Os that we should
121 * noramlly have in flight at one time. This should be viewed as a hint,
122 * not a hard limit and will need to be revisitted when the upper layers
123 * of the storage system grows multi-queue support.
125 ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4;
128 * This was calculated previously when setting up interrupts, but
129 * a controller could theoretically support fewer I/O queues than
130 * MSI-X vectors. So calculate again here just to be safe.
132 ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues);
134 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
135 M_NVME, M_ZERO | M_WAITOK);
137 for (i = 0; i < ctrlr->num_io_queues; i++) {
138 qpair = &ctrlr->ioq[i];
141 * Admin queue has ID=0. IO queues start at ID=1 -
142 * hence the 'i+1' here.
144 * For I/O queues, use the controller-wide max_xfer_size
145 * calculated in nvme_attach().
147 error = nvme_qpair_construct(qpair,
149 ctrlr->msix_enabled ? i+1 : 0, /* vector */
157 * Do not bother binding interrupts if we only have one I/O
158 * interrupt thread for this controller.
160 if (ctrlr->num_io_queues > 1)
161 bus_bind_intr(ctrlr->dev, qpair->res,
162 i * ctrlr->num_cpus_per_ioq);
169 nvme_ctrlr_fail(struct nvme_controller *ctrlr)
173 ctrlr->is_failed = TRUE;
174 nvme_admin_qpair_disable(&ctrlr->adminq);
175 nvme_qpair_fail(&ctrlr->adminq);
176 if (ctrlr->ioq != NULL) {
177 for (i = 0; i < ctrlr->num_io_queues; i++) {
178 nvme_io_qpair_disable(&ctrlr->ioq[i]);
179 nvme_qpair_fail(&ctrlr->ioq[i]);
182 nvme_notify_fail_consumers(ctrlr);
186 nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
187 struct nvme_request *req)
190 mtx_lock(&ctrlr->lock);
191 STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
192 mtx_unlock(&ctrlr->lock);
193 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
197 nvme_ctrlr_fail_req_task(void *arg, int pending)
199 struct nvme_controller *ctrlr = arg;
200 struct nvme_request *req;
202 mtx_lock(&ctrlr->lock);
203 while ((req = STAILQ_FIRST(&ctrlr->fail_req)) != NULL) {
204 STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
205 mtx_unlock(&ctrlr->lock);
206 nvme_qpair_manual_complete_request(req->qpair, req,
207 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST);
208 mtx_lock(&ctrlr->lock);
210 mtx_unlock(&ctrlr->lock);
214 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
221 csts = nvme_mmio_read_4(ctrlr, csts);
222 if (csts == 0xffffffff) /* Hot unplug. */
224 if (((csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK)
227 if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
228 nvme_printf(ctrlr, "controller ready did not become %d "
229 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
239 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
246 cc = nvme_mmio_read_4(ctrlr, cc);
247 csts = nvme_mmio_read_4(ctrlr, csts);
249 en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
250 rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
253 * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1
254 * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when
255 * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY
256 * isn't the desired value. Short circuit if we're already disabled.
260 /* EN == 1, wait for RDY == 1 or fail */
261 err = nvme_ctrlr_wait_for_ready(ctrlr, 1);
266 /* EN == 0 already wait for RDY == 0 */
270 return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
273 cc &= ~NVME_CC_REG_EN_MASK;
274 nvme_mmio_write_4(ctrlr, cc, cc);
276 * Some drives have issues with accessing the mmio after we
277 * disable, so delay for a bit after we write the bit to
278 * cope with these issues.
280 if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY)
281 pause("nvmeR", B4_CHK_RDY_DELAY_MS * hz / 1000);
282 return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
286 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
295 cc = nvme_mmio_read_4(ctrlr, cc);
296 csts = nvme_mmio_read_4(ctrlr, csts);
298 en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
299 rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
302 * See note in nvme_ctrlr_disable. Short circuit if we're already enabled.
308 return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
310 /* EN == 0 already wait for RDY == 0 or fail */
311 err = nvme_ctrlr_wait_for_ready(ctrlr, 0);
316 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
318 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
321 /* acqs and asqs are 0-based. */
322 qsize = ctrlr->adminq.num_entries - 1;
325 aqa = (qsize & NVME_AQA_REG_ACQS_MASK) << NVME_AQA_REG_ACQS_SHIFT;
326 aqa |= (qsize & NVME_AQA_REG_ASQS_MASK) << NVME_AQA_REG_ASQS_SHIFT;
327 nvme_mmio_write_4(ctrlr, aqa, aqa);
330 /* Initialization values for CC */
332 cc |= 1 << NVME_CC_REG_EN_SHIFT;
333 cc |= 0 << NVME_CC_REG_CSS_SHIFT;
334 cc |= 0 << NVME_CC_REG_AMS_SHIFT;
335 cc |= 0 << NVME_CC_REG_SHN_SHIFT;
336 cc |= 6 << NVME_CC_REG_IOSQES_SHIFT; /* SQ entry size == 64 == 2^6 */
337 cc |= 4 << NVME_CC_REG_IOCQES_SHIFT; /* CQ entry size == 16 == 2^4 */
339 /* This evaluates to 0, which is according to spec. */
340 cc |= (PAGE_SIZE >> 13) << NVME_CC_REG_MPS_SHIFT;
342 nvme_mmio_write_4(ctrlr, cc, cc);
344 return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
348 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
352 nvme_admin_qpair_disable(&ctrlr->adminq);
354 * I/O queues are not allocated before the initial HW
355 * reset, so do not try to disable them. Use is_initialized
356 * to determine if this is the initial HW reset.
358 if (ctrlr->is_initialized) {
359 for (i = 0; i < ctrlr->num_io_queues; i++)
360 nvme_io_qpair_disable(&ctrlr->ioq[i]);
365 err = nvme_ctrlr_disable(ctrlr);
368 return (nvme_ctrlr_enable(ctrlr));
372 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
376 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
378 if (cmpset == 0 || ctrlr->is_failed)
380 * Controller is already resetting or has failed. Return
381 * immediately since there is no need to kick off another
382 * reset in these cases.
386 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
390 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
392 struct nvme_completion_poll_status status;
395 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
396 nvme_completion_poll_cb, &status);
397 while (!atomic_load_acq_int(&status.done))
399 if (nvme_completion_is_error(&status.cpl)) {
400 nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
404 /* Convert data to host endian */
405 nvme_controller_data_swapbytes(&ctrlr->cdata);
408 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
409 * controller supports.
411 if (ctrlr->cdata.mdts > 0)
412 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
413 ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
419 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
421 struct nvme_completion_poll_status status;
422 int cq_allocated, sq_allocated;
425 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
426 nvme_completion_poll_cb, &status);
427 while (!atomic_load_acq_int(&status.done))
429 if (nvme_completion_is_error(&status.cpl)) {
430 nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n");
435 * Data in cdw0 is 0-based.
436 * Lower 16-bits indicate number of submission queues allocated.
437 * Upper 16-bits indicate number of completion queues allocated.
439 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
440 cq_allocated = (status.cpl.cdw0 >> 16) + 1;
443 * Controller may allocate more queues than we requested,
444 * so use the minimum of the number requested and what was
445 * actually allocated.
447 ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
448 ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
454 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
456 struct nvme_completion_poll_status status;
457 struct nvme_qpair *qpair;
460 for (i = 0; i < ctrlr->num_io_queues; i++) {
461 qpair = &ctrlr->ioq[i];
464 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector,
465 nvme_completion_poll_cb, &status);
466 while (!atomic_load_acq_int(&status.done))
468 if (nvme_completion_is_error(&status.cpl)) {
469 nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
474 nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
475 nvme_completion_poll_cb, &status);
476 while (!atomic_load_acq_int(&status.done))
478 if (nvme_completion_is_error(&status.cpl)) {
479 nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
488 nvme_ctrlr_destroy_qpairs(struct nvme_controller *ctrlr)
490 struct nvme_completion_poll_status status;
491 struct nvme_qpair *qpair;
493 for (int i = 0; i < ctrlr->num_io_queues; i++) {
494 qpair = &ctrlr->ioq[i];
497 nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair,
498 nvme_completion_poll_cb, &status);
499 while (!atomic_load_acq_int(&status.done))
501 if (nvme_completion_is_error(&status.cpl)) {
502 nvme_printf(ctrlr, "nvme_destroy_io_sq failed!\n");
507 nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair,
508 nvme_completion_poll_cb, &status);
509 while (!atomic_load_acq_int(&status.done))
511 if (nvme_completion_is_error(&status.cpl)) {
512 nvme_printf(ctrlr, "nvme_destroy_io_cq failed!\n");
521 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
523 struct nvme_namespace *ns;
526 for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) {
528 nvme_ns_construct(ns, i+1, ctrlr);
535 is_log_page_id_valid(uint8_t page_id)
540 case NVME_LOG_HEALTH_INFORMATION:
541 case NVME_LOG_FIRMWARE_SLOT:
542 case NVME_LOG_CHANGED_NAMESPACE:
543 case NVME_LOG_COMMAND_EFFECT:
544 case NVME_LOG_RES_NOTIFICATION:
545 case NVME_LOG_SANITIZE_STATUS:
553 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
555 uint32_t log_page_size;
560 sizeof(struct nvme_error_information_entry) *
561 (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE);
563 case NVME_LOG_HEALTH_INFORMATION:
564 log_page_size = sizeof(struct nvme_health_information_page);
566 case NVME_LOG_FIRMWARE_SLOT:
567 log_page_size = sizeof(struct nvme_firmware_page);
569 case NVME_LOG_CHANGED_NAMESPACE:
570 log_page_size = sizeof(struct nvme_ns_list);
572 case NVME_LOG_COMMAND_EFFECT:
573 log_page_size = sizeof(struct nvme_command_effects_page);
575 case NVME_LOG_RES_NOTIFICATION:
576 log_page_size = sizeof(struct nvme_res_notification_page);
578 case NVME_LOG_SANITIZE_STATUS:
579 log_page_size = sizeof(struct nvme_sanitize_status_page);
586 return (log_page_size);
590 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
594 if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE)
595 nvme_printf(ctrlr, "available spare space below threshold\n");
597 if (state & NVME_CRIT_WARN_ST_TEMPERATURE)
598 nvme_printf(ctrlr, "temperature above threshold\n");
600 if (state & NVME_CRIT_WARN_ST_DEVICE_RELIABILITY)
601 nvme_printf(ctrlr, "device reliability degraded\n");
603 if (state & NVME_CRIT_WARN_ST_READ_ONLY)
604 nvme_printf(ctrlr, "media placed in read only mode\n");
606 if (state & NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP)
607 nvme_printf(ctrlr, "volatile memory backup device failed\n");
609 if (state & NVME_CRIT_WARN_ST_RESERVED_MASK)
611 "unknown critical warning(s): state = 0x%02x\n", state);
615 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
617 struct nvme_async_event_request *aer = arg;
618 struct nvme_health_information_page *health_info;
619 struct nvme_ns_list *nsl;
620 struct nvme_error_information_entry *err;
624 * If the log page fetch for some reason completed with an error,
625 * don't pass log page data to the consumers. In practice, this case
626 * should never happen.
628 if (nvme_completion_is_error(cpl))
629 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
630 aer->log_page_id, NULL, 0);
632 /* Convert data to host endian */
633 switch (aer->log_page_id) {
635 err = (struct nvme_error_information_entry *)aer->log_page_buffer;
636 for (i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++)
637 nvme_error_information_entry_swapbytes(err++);
639 case NVME_LOG_HEALTH_INFORMATION:
640 nvme_health_information_page_swapbytes(
641 (struct nvme_health_information_page *)aer->log_page_buffer);
643 case NVME_LOG_FIRMWARE_SLOT:
644 nvme_firmware_page_swapbytes(
645 (struct nvme_firmware_page *)aer->log_page_buffer);
647 case NVME_LOG_CHANGED_NAMESPACE:
648 nvme_ns_list_swapbytes(
649 (struct nvme_ns_list *)aer->log_page_buffer);
651 case NVME_LOG_COMMAND_EFFECT:
652 nvme_command_effects_page_swapbytes(
653 (struct nvme_command_effects_page *)aer->log_page_buffer);
655 case NVME_LOG_RES_NOTIFICATION:
656 nvme_res_notification_page_swapbytes(
657 (struct nvme_res_notification_page *)aer->log_page_buffer);
659 case NVME_LOG_SANITIZE_STATUS:
660 nvme_sanitize_status_page_swapbytes(
661 (struct nvme_sanitize_status_page *)aer->log_page_buffer);
663 case INTEL_LOG_TEMP_STATS:
664 intel_log_temp_stats_swapbytes(
665 (struct intel_log_temp_stats *)aer->log_page_buffer);
671 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
672 health_info = (struct nvme_health_information_page *)
673 aer->log_page_buffer;
674 nvme_ctrlr_log_critical_warnings(aer->ctrlr,
675 health_info->critical_warning);
677 * Critical warnings reported through the
678 * SMART/health log page are persistent, so
679 * clear the associated bits in the async event
680 * config so that we do not receive repeated
681 * notifications for the same event.
683 aer->ctrlr->async_event_config &=
684 ~health_info->critical_warning;
685 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
686 aer->ctrlr->async_event_config, NULL, NULL);
687 } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE &&
689 nsl = (struct nvme_ns_list *)aer->log_page_buffer;
690 for (i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) {
691 if (nsl->ns[i] > NVME_MAX_NAMESPACES)
693 nvme_notify_ns(aer->ctrlr, nsl->ns[i]);
699 * Pass the cpl data from the original async event completion,
700 * not the log page fetch.
702 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
703 aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
707 * Repost another asynchronous event request to replace the one
708 * that just completed.
710 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
714 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
716 struct nvme_async_event_request *aer = arg;
718 if (nvme_completion_is_error(cpl)) {
720 * Do not retry failed async event requests. This avoids
721 * infinite loops where a new async event request is submitted
722 * to replace the one just failed, only to fail again and
723 * perpetuate the loop.
728 /* Associated log page is in bits 23:16 of completion entry dw0. */
729 aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
731 nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x,"
732 " page 0x%02x)\n", (cpl->cdw0 & 0x07), (cpl->cdw0 & 0xFF00) >> 8,
735 if (is_log_page_id_valid(aer->log_page_id)) {
736 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
738 memcpy(&aer->cpl, cpl, sizeof(*cpl));
739 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
740 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
741 aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
743 /* Wait to notify consumers until after log page is fetched. */
745 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
749 * Repost another asynchronous event request to replace the one
750 * that just completed.
752 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
757 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
758 struct nvme_async_event_request *aer)
760 struct nvme_request *req;
763 req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
767 * Disable timeout here, since asynchronous event requests should by
768 * nature never be timed out.
770 req->timeout = FALSE;
771 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
772 nvme_ctrlr_submit_admin_request(ctrlr, req);
776 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
778 struct nvme_completion_poll_status status;
779 struct nvme_async_event_request *aer;
782 ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE |
783 NVME_CRIT_WARN_ST_DEVICE_RELIABILITY |
784 NVME_CRIT_WARN_ST_READ_ONLY |
785 NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP;
786 if (ctrlr->cdata.ver >= NVME_REV(1, 2))
787 ctrlr->async_event_config |= 0x300;
790 nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
791 0, NULL, 0, nvme_completion_poll_cb, &status);
792 while (!atomic_load_acq_int(&status.done))
794 if (nvme_completion_is_error(&status.cpl) ||
795 (status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
796 (status.cpl.cdw0 & 0xFFFF) == 0x0000) {
797 nvme_printf(ctrlr, "temperature threshold not supported\n");
799 ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE;
801 nvme_ctrlr_cmd_set_async_event_config(ctrlr,
802 ctrlr->async_event_config, NULL, NULL);
804 /* aerl is a zero-based value, so we need to add 1 here. */
805 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
807 for (i = 0; i < ctrlr->num_aers; i++) {
808 aer = &ctrlr->aer[i];
809 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
814 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
817 ctrlr->int_coal_time = 0;
818 TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
819 &ctrlr->int_coal_time);
821 ctrlr->int_coal_threshold = 0;
822 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
823 &ctrlr->int_coal_threshold);
825 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
826 ctrlr->int_coal_threshold, NULL, NULL);
830 nvme_ctrlr_start(void *ctrlr_arg)
832 struct nvme_controller *ctrlr = ctrlr_arg;
833 uint32_t old_num_io_queues;
837 * Only reset adminq here when we are restarting the
838 * controller after a reset. During initialization,
839 * we have already submitted admin commands to get
840 * the number of I/O queues supported, so cannot reset
841 * the adminq again here.
843 if (ctrlr->is_resetting)
844 nvme_qpair_reset(&ctrlr->adminq);
846 for (i = 0; i < ctrlr->num_io_queues; i++)
847 nvme_qpair_reset(&ctrlr->ioq[i]);
849 nvme_admin_qpair_enable(&ctrlr->adminq);
851 if (nvme_ctrlr_identify(ctrlr) != 0) {
852 nvme_ctrlr_fail(ctrlr);
857 * The number of qpairs are determined during controller initialization,
858 * including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the
859 * HW limit. We call SET_FEATURES again here so that it gets called
860 * after any reset for controllers that depend on the driver to
861 * explicit specify how many queues it will use. This value should
862 * never change between resets, so panic if somehow that does happen.
864 if (ctrlr->is_resetting) {
865 old_num_io_queues = ctrlr->num_io_queues;
866 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
867 nvme_ctrlr_fail(ctrlr);
871 if (old_num_io_queues != ctrlr->num_io_queues) {
872 panic("num_io_queues changed from %u to %u",
873 old_num_io_queues, ctrlr->num_io_queues);
877 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
878 nvme_ctrlr_fail(ctrlr);
882 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
883 nvme_ctrlr_fail(ctrlr);
887 nvme_ctrlr_configure_aer(ctrlr);
888 nvme_ctrlr_configure_int_coalescing(ctrlr);
890 for (i = 0; i < ctrlr->num_io_queues; i++)
891 nvme_io_qpair_enable(&ctrlr->ioq[i]);
895 nvme_ctrlr_start_config_hook(void *arg)
897 struct nvme_controller *ctrlr = arg;
899 nvme_qpair_reset(&ctrlr->adminq);
900 nvme_admin_qpair_enable(&ctrlr->adminq);
902 if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
903 nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
904 nvme_ctrlr_start(ctrlr);
906 nvme_ctrlr_fail(ctrlr);
908 nvme_sysctl_initialize_ctrlr(ctrlr);
909 config_intrhook_disestablish(&ctrlr->config_hook);
911 ctrlr->is_initialized = 1;
912 nvme_notify_new_controller(ctrlr);
916 nvme_ctrlr_reset_task(void *arg, int pending)
918 struct nvme_controller *ctrlr = arg;
921 nvme_printf(ctrlr, "resetting controller\n");
922 status = nvme_ctrlr_hw_reset(ctrlr);
924 * Use pause instead of DELAY, so that we yield to any nvme interrupt
925 * handlers on this CPU that were blocked on a qpair lock. We want
926 * all nvme interrupts completed before proceeding with restarting the
929 * XXX - any way to guarantee the interrupt handlers have quiesced?
931 pause("nvmereset", hz / 10);
933 nvme_ctrlr_start(ctrlr);
935 nvme_ctrlr_fail(ctrlr);
937 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
941 * Poll all the queues enabled on the device for completion.
944 nvme_ctrlr_poll(struct nvme_controller *ctrlr)
948 nvme_qpair_process_completions(&ctrlr->adminq);
950 for (i = 0; i < ctrlr->num_io_queues; i++)
951 if (ctrlr->ioq && ctrlr->ioq[i].cpl)
952 nvme_qpair_process_completions(&ctrlr->ioq[i]);
956 * Poll the single-vector intertrupt case: num_io_queues will be 1 and
957 * there's only a single vector. While we're polling, we mask further
958 * interrupts in the controller.
961 nvme_ctrlr_intx_handler(void *arg)
963 struct nvme_controller *ctrlr = arg;
965 nvme_mmio_write_4(ctrlr, intms, 1);
966 nvme_ctrlr_poll(ctrlr);
967 nvme_mmio_write_4(ctrlr, intmc, 1);
971 nvme_pt_done(void *arg, const struct nvme_completion *cpl)
973 struct nvme_pt_command *pt = arg;
974 struct mtx *mtx = pt->driver_lock;
977 bzero(&pt->cpl, sizeof(pt->cpl));
978 pt->cpl.cdw0 = cpl->cdw0;
980 status = cpl->status;
981 status &= ~NVME_STATUS_P_MASK;
982 pt->cpl.status = status;
985 pt->driver_lock = NULL;
991 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
992 struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
995 struct nvme_request *req;
997 struct buf *buf = NULL;
999 vm_offset_t addr, end;
1003 * vmapbuf calls vm_fault_quick_hold_pages which only maps full
1004 * pages. Ensure this request has fewer than MAXPHYS bytes when
1005 * extended to full pages.
1007 addr = (vm_offset_t)pt->buf;
1008 end = round_page(addr + pt->len);
1009 addr = trunc_page(addr);
1010 if (end - addr > MAXPHYS)
1013 if (pt->len > ctrlr->max_xfer_size) {
1014 nvme_printf(ctrlr, "pt->len (%d) "
1015 "exceeds max_xfer_size (%d)\n", pt->len,
1016 ctrlr->max_xfer_size);
1019 if (is_user_buffer) {
1021 * Ensure the user buffer is wired for the duration of
1022 * this passthrough command.
1025 buf = getpbuf(NULL);
1026 buf->b_data = pt->buf;
1027 buf->b_bufsize = pt->len;
1028 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
1029 if (vmapbuf(buf, 1) < 0) {
1033 req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
1036 req = nvme_allocate_request_vaddr(pt->buf, pt->len,
1039 req = nvme_allocate_request_null(nvme_pt_done, pt);
1041 /* Assume userspace already converted to little-endian */
1042 req->cmd.opc = pt->cmd.opc;
1043 req->cmd.fuse = pt->cmd.fuse;
1044 req->cmd.rsvd2 = pt->cmd.rsvd2;
1045 req->cmd.rsvd3 = pt->cmd.rsvd3;
1046 req->cmd.cdw10 = pt->cmd.cdw10;
1047 req->cmd.cdw11 = pt->cmd.cdw11;
1048 req->cmd.cdw12 = pt->cmd.cdw12;
1049 req->cmd.cdw13 = pt->cmd.cdw13;
1050 req->cmd.cdw14 = pt->cmd.cdw14;
1051 req->cmd.cdw15 = pt->cmd.cdw15;
1053 req->cmd.nsid = htole32(nsid);
1055 mtx = mtx_pool_find(mtxpool_sleep, pt);
1056 pt->driver_lock = mtx;
1059 nvme_ctrlr_submit_admin_request(ctrlr, req);
1061 nvme_ctrlr_submit_io_request(ctrlr, req);
1064 while (pt->driver_lock != NULL)
1065 mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
1078 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
1081 struct nvme_controller *ctrlr;
1082 struct nvme_pt_command *pt;
1084 ctrlr = cdev->si_drv1;
1087 case NVME_RESET_CONTROLLER:
1088 nvme_ctrlr_reset(ctrlr);
1090 case NVME_PASSTHROUGH_CMD:
1091 pt = (struct nvme_pt_command *)arg;
1092 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid),
1093 1 /* is_user_buffer */, 1 /* is_admin_cmd */));
1096 struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)arg;
1097 strncpy(gnsid->cdev, device_get_nameunit(ctrlr->dev),
1098 sizeof(gnsid->cdev));
1109 static struct cdevsw nvme_ctrlr_cdevsw = {
1110 .d_version = D_VERSION,
1112 .d_ioctl = nvme_ctrlr_ioctl
1116 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1118 struct make_dev_args md_args;
1124 int status, timeout_period;
1128 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1131 * Software emulators may set the doorbell stride to something
1132 * other than zero, but this driver is not set up to handle that.
1134 cap_hi = nvme_mmio_read_4(ctrlr, cap_hi);
1135 dstrd = NVME_CAP_HI_DSTRD(cap_hi);
1139 mpsmin = NVME_CAP_HI_MPSMIN(cap_hi);
1140 ctrlr->min_page_size = 1 << (12 + mpsmin);
1142 /* Get ready timeout value from controller, in units of 500ms. */
1143 cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
1144 to = NVME_CAP_LO_TO(cap_lo) + 1;
1145 ctrlr->ready_timeout_in_ms = to * 500;
1147 timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
1148 TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
1149 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1150 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1151 ctrlr->timeout_period = timeout_period;
1153 nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
1154 TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
1156 ctrlr->enable_aborts = 0;
1157 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1159 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
1160 if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0)
1163 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1164 taskqueue_thread_enqueue, &ctrlr->taskqueue);
1165 taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq");
1167 ctrlr->is_resetting = 0;
1168 ctrlr->is_initialized = 0;
1169 ctrlr->notification_sent = 0;
1170 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1171 TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
1172 STAILQ_INIT(&ctrlr->fail_req);
1173 ctrlr->is_failed = FALSE;
1175 make_dev_args_init(&md_args);
1176 md_args.mda_devsw = &nvme_ctrlr_cdevsw;
1177 md_args.mda_uid = UID_ROOT;
1178 md_args.mda_gid = GID_WHEEL;
1179 md_args.mda_mode = 0600;
1180 md_args.mda_unit = device_get_unit(dev);
1181 md_args.mda_si_drv1 = (void *)ctrlr;
1182 status = make_dev_s(&md_args, &ctrlr->cdev, "nvme%d",
1183 device_get_unit(dev));
1191 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1195 if (ctrlr->resource == NULL)
1199 * Check whether it is a hot unplug or a clean driver detach.
1200 * If device is not there any more, skip any shutdown commands.
1202 gone = (nvme_mmio_read_4(ctrlr, csts) == 0xffffffff);
1204 nvme_ctrlr_fail(ctrlr);
1206 nvme_notify_fail_consumers(ctrlr);
1208 for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1209 nvme_ns_destruct(&ctrlr->ns[i]);
1212 destroy_dev(ctrlr->cdev);
1215 nvme_ctrlr_destroy_qpairs(ctrlr);
1216 for (i = 0; i < ctrlr->num_io_queues; i++)
1217 nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1218 free(ctrlr->ioq, M_NVME);
1219 nvme_admin_qpair_destroy(&ctrlr->adminq);
1222 * Notify the controller of a shutdown, even though this is due to
1223 * a driver unload, not a system shutdown (this path is not invoked
1224 * during shutdown). This ensures the controller receives a
1225 * shutdown notification in case the system is shutdown before
1226 * reloading the driver.
1229 nvme_ctrlr_shutdown(ctrlr);
1232 nvme_ctrlr_disable(ctrlr);
1234 if (ctrlr->taskqueue)
1235 taskqueue_free(ctrlr->taskqueue);
1238 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1241 bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1242 rman_get_rid(ctrlr->res), ctrlr->res);
1244 if (ctrlr->bar4_resource != NULL) {
1245 bus_release_resource(dev, SYS_RES_MEMORY,
1246 ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1249 bus_release_resource(dev, SYS_RES_MEMORY,
1250 ctrlr->resource_id, ctrlr->resource);
1253 mtx_destroy(&ctrlr->lock);
1257 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
1263 cc = nvme_mmio_read_4(ctrlr, cc);
1264 cc &= ~(NVME_CC_REG_SHN_MASK << NVME_CC_REG_SHN_SHIFT);
1265 cc |= NVME_SHN_NORMAL << NVME_CC_REG_SHN_SHIFT;
1266 nvme_mmio_write_4(ctrlr, cc, cc);
1269 csts = nvme_mmio_read_4(ctrlr, csts);
1270 if (csts == 0xffffffff) /* Hot unplug. */
1272 if (NVME_CSTS_GET_SHST(csts) == NVME_SHST_COMPLETE)
1274 if (ticks++ > 5*hz) {
1275 nvme_printf(ctrlr, "did not complete shutdown within"
1276 " 5 seconds of notification\n");
1279 pause("nvme shn", 1);
1284 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1285 struct nvme_request *req)
1288 nvme_qpair_submit_request(&ctrlr->adminq, req);
1292 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1293 struct nvme_request *req)
1295 struct nvme_qpair *qpair;
1297 qpair = &ctrlr->ioq[curcpu / ctrlr->num_cpus_per_ioq];
1298 nvme_qpair_submit_request(qpair, req);
1302 nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1305 return (ctrlr->dev);
1308 const struct nvme_controller_data *
1309 nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1312 return (&ctrlr->cdata);