2 * Copyright (C) 2012-2015 Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
35 #include <sys/ioccom.h>
40 #include <dev/pci/pcireg.h>
41 #include <dev/pci/pcivar.h>
43 #include "nvme_private.h"
45 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
46 struct nvme_async_event_request *aer);
49 nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
52 ctrlr->resource_id = PCIR_BAR(0);
54 ctrlr->resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY,
55 &ctrlr->resource_id, 0, ~0, 1, RF_ACTIVE);
57 if(ctrlr->resource == NULL) {
58 nvme_printf(ctrlr, "unable to allocate pci resource\n");
62 ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
63 ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
64 ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
67 * The NVMe spec allows for the MSI-X table to be placed behind
68 * BAR 4/5, separate from the control/doorbell registers. Always
69 * try to map this bar, because it must be mapped prior to calling
70 * pci_alloc_msix(). If the table isn't behind BAR 4/5,
71 * bus_alloc_resource() will just return NULL which is OK.
73 ctrlr->bar4_resource_id = PCIR_BAR(4);
74 ctrlr->bar4_resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY,
75 &ctrlr->bar4_resource_id, 0, ~0, 1, RF_ACTIVE);
81 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
83 struct nvme_qpair *qpair;
86 qpair = &ctrlr->adminq;
88 num_entries = NVME_ADMIN_ENTRIES;
89 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
91 * If admin_entries was overridden to an invalid value, revert it
92 * back to our default value.
94 if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
95 num_entries > NVME_MAX_ADMIN_ENTRIES) {
96 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
97 "specified\n", num_entries);
98 num_entries = NVME_ADMIN_ENTRIES;
102 * The admin queue's max xfer size is treated differently than the
103 * max I/O xfer size. 16KB is sufficient here - maybe even less?
105 nvme_qpair_construct(qpair,
114 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
116 struct nvme_qpair *qpair;
117 union cap_lo_register cap_lo;
118 int i, num_entries, num_trackers;
120 num_entries = NVME_IO_ENTRIES;
121 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
124 * NVMe spec sets a hard limit of 64K max entries, but
125 * devices may specify a smaller limit, so we need to check
126 * the MQES field in the capabilities register.
128 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
129 num_entries = min(num_entries, cap_lo.bits.mqes+1);
131 num_trackers = NVME_IO_TRACKERS;
132 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
134 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
135 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
137 * No need to have more trackers than entries in the submit queue.
138 * Note also that for a queue size of N, we can only have (N-1)
139 * commands outstanding, hence the "-1" here.
141 num_trackers = min(num_trackers, (num_entries-1));
143 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
144 M_NVME, M_ZERO | M_WAITOK);
146 for (i = 0; i < ctrlr->num_io_queues; i++) {
147 qpair = &ctrlr->ioq[i];
150 * Admin queue has ID=0. IO queues start at ID=1 -
151 * hence the 'i+1' here.
153 * For I/O queues, use the controller-wide max_xfer_size
154 * calculated in nvme_attach().
156 nvme_qpair_construct(qpair,
158 ctrlr->msix_enabled ? i+1 : 0, /* vector */
163 if (ctrlr->per_cpu_io_queues)
164 bus_bind_intr(ctrlr->dev, qpair->res, i);
171 nvme_ctrlr_fail(struct nvme_controller *ctrlr)
175 ctrlr->is_failed = TRUE;
176 nvme_qpair_fail(&ctrlr->adminq);
177 for (i = 0; i < ctrlr->num_io_queues; i++)
178 nvme_qpair_fail(&ctrlr->ioq[i]);
179 nvme_notify_fail_consumers(ctrlr);
183 nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
184 struct nvme_request *req)
187 mtx_lock(&ctrlr->lock);
188 STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
189 mtx_unlock(&ctrlr->lock);
190 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
194 nvme_ctrlr_fail_req_task(void *arg, int pending)
196 struct nvme_controller *ctrlr = arg;
197 struct nvme_request *req;
199 mtx_lock(&ctrlr->lock);
200 while (!STAILQ_EMPTY(&ctrlr->fail_req)) {
201 req = STAILQ_FIRST(&ctrlr->fail_req);
202 STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
203 nvme_qpair_manual_complete_request(req->qpair, req,
204 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
206 mtx_unlock(&ctrlr->lock);
210 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
213 union cc_register cc;
214 union csts_register csts;
216 cc.raw = nvme_mmio_read_4(ctrlr, cc);
217 csts.raw = nvme_mmio_read_4(ctrlr, csts);
219 if (cc.bits.en != desired_val) {
220 nvme_printf(ctrlr, "%s called with desired_val = %d "
221 "but cc.en = %d\n", __func__, desired_val, cc.bits.en);
227 while (csts.bits.rdy != desired_val) {
229 if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
230 nvme_printf(ctrlr, "controller ready did not become %d "
231 "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
234 csts.raw = nvme_mmio_read_4(ctrlr, csts);
241 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
243 union cc_register cc;
244 union csts_register csts;
246 cc.raw = nvme_mmio_read_4(ctrlr, cc);
247 csts.raw = nvme_mmio_read_4(ctrlr, csts);
249 if (cc.bits.en == 1 && csts.bits.rdy == 0)
250 nvme_ctrlr_wait_for_ready(ctrlr, 1);
253 nvme_mmio_write_4(ctrlr, cc, cc.raw);
255 nvme_ctrlr_wait_for_ready(ctrlr, 0);
259 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
261 union cc_register cc;
262 union csts_register csts;
263 union aqa_register aqa;
265 cc.raw = nvme_mmio_read_4(ctrlr, cc);
266 csts.raw = nvme_mmio_read_4(ctrlr, csts);
268 if (cc.bits.en == 1) {
269 if (csts.bits.rdy == 1)
272 return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
275 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
277 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
281 /* acqs and asqs are 0-based. */
282 aqa.bits.acqs = ctrlr->adminq.num_entries-1;
283 aqa.bits.asqs = ctrlr->adminq.num_entries-1;
284 nvme_mmio_write_4(ctrlr, aqa, aqa.raw);
291 cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
292 cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
294 /* This evaluates to 0, which is according to spec. */
295 cc.bits.mps = (PAGE_SIZE >> 13);
297 nvme_mmio_write_4(ctrlr, cc, cc.raw);
300 return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
304 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
308 nvme_admin_qpair_disable(&ctrlr->adminq);
309 for (i = 0; i < ctrlr->num_io_queues; i++)
310 nvme_io_qpair_disable(&ctrlr->ioq[i]);
314 nvme_ctrlr_disable(ctrlr);
315 return (nvme_ctrlr_enable(ctrlr));
319 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
323 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
325 if (cmpset == 0 || ctrlr->is_failed)
327 * Controller is already resetting or has failed. Return
328 * immediately since there is no need to kick off another
329 * reset in these cases.
333 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
337 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
339 struct nvme_completion_poll_status status;
342 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
343 nvme_completion_poll_cb, &status);
344 while (status.done == FALSE)
346 if (nvme_completion_is_error(&status.cpl)) {
347 nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
352 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
353 * controller supports.
355 if (ctrlr->cdata.mdts > 0)
356 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
357 ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
363 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
365 struct nvme_completion_poll_status status;
366 int cq_allocated, i, sq_allocated;
369 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
370 nvme_completion_poll_cb, &status);
371 while (status.done == FALSE)
373 if (nvme_completion_is_error(&status.cpl)) {
374 nvme_printf(ctrlr, "nvme_set_num_queues failed!\n");
379 * Data in cdw0 is 0-based.
380 * Lower 16-bits indicate number of submission queues allocated.
381 * Upper 16-bits indicate number of completion queues allocated.
383 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
384 cq_allocated = (status.cpl.cdw0 >> 16) + 1;
387 * Check that the controller was able to allocate the number of
388 * queues we requested. If not, revert to one IO queue pair.
390 if (sq_allocated < ctrlr->num_io_queues ||
391 cq_allocated < ctrlr->num_io_queues) {
394 * Destroy extra IO queue pairs that were created at
395 * controller construction time but are no longer
396 * needed. This will only happen when a controller
397 * supports fewer queues than MSI-X vectors. This
398 * is not the normal case, but does occur with the
399 * Chatham prototype board.
401 for (i = 1; i < ctrlr->num_io_queues; i++)
402 nvme_io_qpair_destroy(&ctrlr->ioq[i]);
404 ctrlr->num_io_queues = 1;
405 ctrlr->per_cpu_io_queues = 0;
412 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
414 struct nvme_completion_poll_status status;
415 struct nvme_qpair *qpair;
418 for (i = 0; i < ctrlr->num_io_queues; i++) {
419 qpair = &ctrlr->ioq[i];
422 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector,
423 nvme_completion_poll_cb, &status);
424 while (status.done == FALSE)
426 if (nvme_completion_is_error(&status.cpl)) {
427 nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
432 nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
433 nvme_completion_poll_cb, &status);
434 while (status.done == FALSE)
436 if (nvme_completion_is_error(&status.cpl)) {
437 nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
446 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
448 struct nvme_namespace *ns;
451 for (i = 0; i < ctrlr->cdata.nn; i++) {
453 status = nvme_ns_construct(ns, i+1, ctrlr);
462 is_log_page_id_valid(uint8_t page_id)
467 case NVME_LOG_HEALTH_INFORMATION:
468 case NVME_LOG_FIRMWARE_SLOT:
476 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
478 uint32_t log_page_size;
483 sizeof(struct nvme_error_information_entry) *
485 NVME_MAX_AER_LOG_SIZE);
487 case NVME_LOG_HEALTH_INFORMATION:
488 log_page_size = sizeof(struct nvme_health_information_page);
490 case NVME_LOG_FIRMWARE_SLOT:
491 log_page_size = sizeof(struct nvme_firmware_page);
498 return (log_page_size);
502 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
503 union nvme_critical_warning_state state)
506 if (state.bits.available_spare == 1)
507 nvme_printf(ctrlr, "available spare space below threshold\n");
509 if (state.bits.temperature == 1)
510 nvme_printf(ctrlr, "temperature above threshold\n");
512 if (state.bits.device_reliability == 1)
513 nvme_printf(ctrlr, "device reliability degraded\n");
515 if (state.bits.read_only == 1)
516 nvme_printf(ctrlr, "media placed in read only mode\n");
518 if (state.bits.volatile_memory_backup == 1)
519 nvme_printf(ctrlr, "volatile memory backup device failed\n");
521 if (state.bits.reserved != 0)
523 "unknown critical warning(s): state = 0x%02x\n", state.raw);
527 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
529 struct nvme_async_event_request *aer = arg;
530 struct nvme_health_information_page *health_info;
533 * If the log page fetch for some reason completed with an error,
534 * don't pass log page data to the consumers. In practice, this case
535 * should never happen.
537 if (nvme_completion_is_error(cpl))
538 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
539 aer->log_page_id, NULL, 0);
541 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
542 health_info = (struct nvme_health_information_page *)
543 aer->log_page_buffer;
544 nvme_ctrlr_log_critical_warnings(aer->ctrlr,
545 health_info->critical_warning);
547 * Critical warnings reported through the
548 * SMART/health log page are persistent, so
549 * clear the associated bits in the async event
550 * config so that we do not receive repeated
551 * notifications for the same event.
553 aer->ctrlr->async_event_config.raw &=
554 ~health_info->critical_warning.raw;
555 nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
556 aer->ctrlr->async_event_config, NULL, NULL);
561 * Pass the cpl data from the original async event completion,
562 * not the log page fetch.
564 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
565 aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
569 * Repost another asynchronous event request to replace the one
570 * that just completed.
572 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
576 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
578 struct nvme_async_event_request *aer = arg;
580 if (nvme_completion_is_error(cpl)) {
582 * Do not retry failed async event requests. This avoids
583 * infinite loops where a new async event request is submitted
584 * to replace the one just failed, only to fail again and
585 * perpetuate the loop.
590 /* Associated log page is in bits 23:16 of completion entry dw0. */
591 aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
593 nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n",
596 if (is_log_page_id_valid(aer->log_page_id)) {
597 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
599 memcpy(&aer->cpl, cpl, sizeof(*cpl));
600 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
601 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
602 aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
604 /* Wait to notify consumers until after log page is fetched. */
606 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
610 * Repost another asynchronous event request to replace the one
611 * that just completed.
613 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
618 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
619 struct nvme_async_event_request *aer)
621 struct nvme_request *req;
624 req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
628 * Disable timeout here, since asynchronous event requests should by
629 * nature never be timed out.
631 req->timeout = FALSE;
632 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
633 nvme_ctrlr_submit_admin_request(ctrlr, req);
637 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
639 struct nvme_completion_poll_status status;
640 struct nvme_async_event_request *aer;
643 ctrlr->async_event_config.raw = 0xFF;
644 ctrlr->async_event_config.bits.reserved = 0;
647 nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
648 0, NULL, 0, nvme_completion_poll_cb, &status);
649 while (status.done == FALSE)
651 if (nvme_completion_is_error(&status.cpl) ||
652 (status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
653 (status.cpl.cdw0 & 0xFFFF) == 0x0000) {
654 nvme_printf(ctrlr, "temperature threshold not supported\n");
655 ctrlr->async_event_config.bits.temperature = 0;
658 nvme_ctrlr_cmd_set_async_event_config(ctrlr,
659 ctrlr->async_event_config, NULL, NULL);
661 /* aerl is a zero-based value, so we need to add 1 here. */
662 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
664 for (i = 0; i < ctrlr->num_aers; i++) {
665 aer = &ctrlr->aer[i];
666 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
671 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
674 ctrlr->int_coal_time = 0;
675 TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
676 &ctrlr->int_coal_time);
678 ctrlr->int_coal_threshold = 0;
679 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
680 &ctrlr->int_coal_threshold);
682 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
683 ctrlr->int_coal_threshold, NULL, NULL);
687 nvme_ctrlr_start(void *ctrlr_arg)
689 struct nvme_controller *ctrlr = ctrlr_arg;
692 nvme_qpair_reset(&ctrlr->adminq);
693 for (i = 0; i < ctrlr->num_io_queues; i++)
694 nvme_qpair_reset(&ctrlr->ioq[i]);
696 nvme_admin_qpair_enable(&ctrlr->adminq);
698 if (nvme_ctrlr_identify(ctrlr) != 0) {
699 nvme_ctrlr_fail(ctrlr);
703 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
704 nvme_ctrlr_fail(ctrlr);
708 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
709 nvme_ctrlr_fail(ctrlr);
713 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
714 nvme_ctrlr_fail(ctrlr);
718 nvme_ctrlr_configure_aer(ctrlr);
719 nvme_ctrlr_configure_int_coalescing(ctrlr);
721 for (i = 0; i < ctrlr->num_io_queues; i++)
722 nvme_io_qpair_enable(&ctrlr->ioq[i]);
726 nvme_ctrlr_start_config_hook(void *arg)
728 struct nvme_controller *ctrlr = arg;
730 nvme_ctrlr_start(ctrlr);
731 config_intrhook_disestablish(&ctrlr->config_hook);
733 ctrlr->is_initialized = 1;
734 nvme_notify_new_controller(ctrlr);
738 nvme_ctrlr_reset_task(void *arg, int pending)
740 struct nvme_controller *ctrlr = arg;
743 nvme_printf(ctrlr, "resetting controller\n");
744 status = nvme_ctrlr_hw_reset(ctrlr);
746 * Use pause instead of DELAY, so that we yield to any nvme interrupt
747 * handlers on this CPU that were blocked on a qpair lock. We want
748 * all nvme interrupts completed before proceeding with restarting the
751 * XXX - any way to guarantee the interrupt handlers have quiesced?
753 pause("nvmereset", hz / 10);
755 nvme_ctrlr_start(ctrlr);
757 nvme_ctrlr_fail(ctrlr);
759 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
763 nvme_ctrlr_intx_handler(void *arg)
765 struct nvme_controller *ctrlr = arg;
767 nvme_mmio_write_4(ctrlr, intms, 1);
769 nvme_qpair_process_completions(&ctrlr->adminq);
771 if (ctrlr->ioq[0].cpl)
772 nvme_qpair_process_completions(&ctrlr->ioq[0]);
774 nvme_mmio_write_4(ctrlr, intmc, 1);
778 nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr)
781 ctrlr->num_io_queues = 1;
782 ctrlr->per_cpu_io_queues = 0;
784 ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
785 &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
787 if (ctrlr->res == NULL) {
788 nvme_printf(ctrlr, "unable to allocate shared IRQ\n");
792 bus_setup_intr(ctrlr->dev, ctrlr->res,
793 INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler,
796 if (ctrlr->tag == NULL) {
797 nvme_printf(ctrlr, "unable to setup intx handler\n");
805 nvme_pt_done(void *arg, const struct nvme_completion *cpl)
807 struct nvme_pt_command *pt = arg;
809 bzero(&pt->cpl, sizeof(pt->cpl));
810 pt->cpl.cdw0 = cpl->cdw0;
811 pt->cpl.status = cpl->status;
812 pt->cpl.status.p = 0;
814 mtx_lock(pt->driver_lock);
816 mtx_unlock(pt->driver_lock);
820 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
821 struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
824 struct nvme_request *req;
826 struct buf *buf = NULL;
830 if (pt->len > ctrlr->max_xfer_size) {
831 nvme_printf(ctrlr, "pt->len (%d) "
832 "exceeds max_xfer_size (%d)\n", pt->len,
833 ctrlr->max_xfer_size);
836 if (is_user_buffer) {
838 * Ensure the user buffer is wired for the duration of
839 * this passthrough command.
843 buf->b_data = pt->buf;
844 buf->b_bufsize = pt->len;
845 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
846 #ifdef NVME_UNMAPPED_BIO_SUPPORT
847 if (vmapbuf(buf, 1) < 0) {
849 if (vmapbuf(buf) < 0) {
854 req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
857 req = nvme_allocate_request_vaddr(pt->buf, pt->len,
860 req = nvme_allocate_request_null(nvme_pt_done, pt);
862 req->cmd.opc = pt->cmd.opc;
863 req->cmd.cdw10 = pt->cmd.cdw10;
864 req->cmd.cdw11 = pt->cmd.cdw11;
865 req->cmd.cdw12 = pt->cmd.cdw12;
866 req->cmd.cdw13 = pt->cmd.cdw13;
867 req->cmd.cdw14 = pt->cmd.cdw14;
868 req->cmd.cdw15 = pt->cmd.cdw15;
870 req->cmd.nsid = nsid;
875 mtx = &ctrlr->ns[nsid-1].lock;
878 pt->driver_lock = mtx;
881 nvme_ctrlr_submit_admin_request(ctrlr, req);
883 nvme_ctrlr_submit_io_request(ctrlr, req);
885 mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
888 pt->driver_lock = NULL;
900 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
903 struct nvme_controller *ctrlr;
904 struct nvme_pt_command *pt;
906 ctrlr = cdev->si_drv1;
909 case NVME_RESET_CONTROLLER:
910 nvme_ctrlr_reset(ctrlr);
912 case NVME_PASSTHROUGH_CMD:
913 pt = (struct nvme_pt_command *)arg;
914 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, pt->cmd.nsid,
915 1 /* is_user_buffer */, 1 /* is_admin_cmd */));
923 static struct cdevsw nvme_ctrlr_cdevsw = {
924 .d_version = D_VERSION,
926 .d_ioctl = nvme_ctrlr_ioctl
930 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
932 union cap_lo_register cap_lo;
933 union cap_hi_register cap_hi;
934 int i, per_cpu_io_queues, rid;
935 int num_vectors_requested, num_vectors_allocated;
936 int status, timeout_period;
940 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
942 status = nvme_ctrlr_allocate_bar(ctrlr);
948 * Software emulators may set the doorbell stride to something
949 * other than zero, but this driver is not set up to handle that.
951 cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi);
952 if (cap_hi.bits.dstrd != 0)
955 ctrlr->min_page_size = 1 << (12 + cap_hi.bits.mpsmin);
957 /* Get ready timeout value from controller, in units of 500ms. */
958 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
959 ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500;
961 timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
962 TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
963 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
964 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
965 ctrlr->timeout_period = timeout_period;
967 nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
968 TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
970 per_cpu_io_queues = 1;
971 TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
972 ctrlr->per_cpu_io_queues = per_cpu_io_queues ? TRUE : FALSE;
974 if (ctrlr->per_cpu_io_queues)
975 ctrlr->num_io_queues = mp_ncpus;
977 ctrlr->num_io_queues = 1;
979 ctrlr->force_intx = 0;
980 TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx);
982 ctrlr->enable_aborts = 0;
983 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
985 ctrlr->msix_enabled = 1;
987 if (ctrlr->force_intx) {
988 ctrlr->msix_enabled = 0;
992 /* One vector per IO queue, plus one vector for admin queue. */
993 num_vectors_requested = ctrlr->num_io_queues + 1;
996 * If we cannot even allocate 2 vectors (one for admin, one for
997 * I/O), then revert to INTx.
999 if (pci_msix_count(dev) < 2) {
1000 ctrlr->msix_enabled = 0;
1002 } else if (pci_msix_count(dev) < num_vectors_requested) {
1003 ctrlr->per_cpu_io_queues = FALSE;
1004 ctrlr->num_io_queues = 1;
1005 num_vectors_requested = 2; /* one for admin, one for I/O */
1008 num_vectors_allocated = num_vectors_requested;
1009 if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) {
1010 ctrlr->msix_enabled = 0;
1012 } else if (num_vectors_allocated < num_vectors_requested) {
1013 if (num_vectors_allocated < 2) {
1014 pci_release_msi(dev);
1015 ctrlr->msix_enabled = 0;
1018 ctrlr->per_cpu_io_queues = FALSE;
1019 ctrlr->num_io_queues = 1;
1021 * Release whatever vectors were allocated, and just
1022 * reallocate the two needed for the admin and single
1025 num_vectors_allocated = 2;
1026 pci_release_msi(dev);
1027 if (pci_alloc_msix(dev, &num_vectors_allocated) != 0)
1028 panic("could not reallocate any vectors\n");
1029 if (num_vectors_allocated != 2)
1030 panic("could not reallocate 2 vectors\n");
1035 * On earlier FreeBSD releases, there are reports that
1036 * pci_alloc_msix() can return successfully with all vectors
1037 * requested, but a subsequent bus_alloc_resource_any()
1038 * for one of those vectors fails. This issue occurs more
1039 * readily with multiple devices using per-CPU vectors.
1040 * To workaround this issue, try to allocate the resources now,
1041 * and fall back to INTx if we cannot allocate all of them.
1042 * This issue cannot be reproduced on more recent versions of
1043 * FreeBSD which have increased the maximum number of MSI-X
1044 * vectors, but adding the workaround makes it easier for
1045 * vendors wishing to import this driver into kernels based on
1046 * older versions of FreeBSD.
1048 for (i = 0; i < num_vectors_allocated; i++) {
1050 ctrlr->msi_res[i] = bus_alloc_resource_any(ctrlr->dev,
1051 SYS_RES_IRQ, &rid, RF_ACTIVE);
1053 if (ctrlr->msi_res[i] == NULL) {
1054 ctrlr->msix_enabled = 0;
1057 bus_release_resource(ctrlr->dev,
1059 rman_get_rid(ctrlr->msi_res[i]),
1062 pci_release_msi(dev);
1063 nvme_printf(ctrlr, "could not obtain all MSI-X "
1064 "resources, reverting to intx\n");
1071 if (!ctrlr->msix_enabled)
1072 nvme_ctrlr_configure_intx(ctrlr);
1074 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
1075 nvme_ctrlr_construct_admin_qpair(ctrlr);
1076 status = nvme_ctrlr_construct_io_qpairs(ctrlr);
1081 ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, device_get_unit(dev),
1082 UID_ROOT, GID_WHEEL, 0600, "nvme%d", device_get_unit(dev));
1084 if (ctrlr->cdev == NULL)
1087 ctrlr->cdev->si_drv1 = (void *)ctrlr;
1089 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1090 taskqueue_thread_enqueue, &ctrlr->taskqueue);
1091 taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq");
1093 ctrlr->is_resetting = 0;
1094 ctrlr->is_initialized = 0;
1095 ctrlr->notification_sent = 0;
1096 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1098 TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
1099 STAILQ_INIT(&ctrlr->fail_req);
1100 ctrlr->is_failed = FALSE;
1106 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1111 * Notify the controller of a shutdown, even though this is due to
1112 * a driver unload, not a system shutdown (this path is not invoked
1113 * during shutdown). This ensures the controller receives a
1114 * shutdown notification in case the system is shutdown before
1115 * reloading the driver.
1117 nvme_ctrlr_shutdown(ctrlr);
1119 nvme_ctrlr_disable(ctrlr);
1120 taskqueue_free(ctrlr->taskqueue);
1122 for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1123 nvme_ns_destruct(&ctrlr->ns[i]);
1126 destroy_dev(ctrlr->cdev);
1128 for (i = 0; i < ctrlr->num_io_queues; i++) {
1129 nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1132 free(ctrlr->ioq, M_NVME);
1134 nvme_admin_qpair_destroy(&ctrlr->adminq);
1136 if (ctrlr->resource != NULL) {
1137 bus_release_resource(dev, SYS_RES_MEMORY,
1138 ctrlr->resource_id, ctrlr->resource);
1141 if (ctrlr->bar4_resource != NULL) {
1142 bus_release_resource(dev, SYS_RES_MEMORY,
1143 ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1147 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1150 bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1151 rman_get_rid(ctrlr->res), ctrlr->res);
1153 if (ctrlr->msix_enabled)
1154 pci_release_msi(dev);
1158 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
1160 union cc_register cc;
1161 union csts_register csts;
1164 cc.raw = nvme_mmio_read_4(ctrlr, cc);
1165 cc.bits.shn = NVME_SHN_NORMAL;
1166 nvme_mmio_write_4(ctrlr, cc, cc.raw);
1167 csts.raw = nvme_mmio_read_4(ctrlr, csts);
1168 while ((csts.bits.shst != NVME_SHST_COMPLETE) && (ticks++ < 5*hz)) {
1169 pause("nvme shn", 1);
1170 csts.raw = nvme_mmio_read_4(ctrlr, csts);
1172 if (csts.bits.shst != NVME_SHST_COMPLETE)
1173 nvme_printf(ctrlr, "did not complete shutdown within 5 seconds "
1174 "of notification\n");
1178 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1179 struct nvme_request *req)
1182 nvme_qpair_submit_request(&ctrlr->adminq, req);
1186 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1187 struct nvme_request *req)
1189 struct nvme_qpair *qpair;
1191 if (ctrlr->per_cpu_io_queues)
1192 qpair = &ctrlr->ioq[curcpu];
1194 qpair = &ctrlr->ioq[0];
1196 nvme_qpair_submit_request(qpair, req);
1200 nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1203 return (ctrlr->dev);
1206 const struct nvme_controller_data *
1207 nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1210 return (&ctrlr->cdata);