2 * Copyright (C) 2012 Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
35 #include <sys/ioccom.h>
40 #include <dev/pci/pcireg.h>
41 #include <dev/pci/pcivar.h>
43 #include "nvme_private.h"
45 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
46 struct nvme_async_event_request *aer);
49 nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
52 /* Chatham puts the NVMe MMRs behind BAR 2/3, not BAR 0/1. */
53 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
54 ctrlr->resource_id = PCIR_BAR(2);
56 ctrlr->resource_id = PCIR_BAR(0);
58 ctrlr->resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY,
59 &ctrlr->resource_id, 0, ~0, 1, RF_ACTIVE);
61 if(ctrlr->resource == NULL) {
62 nvme_printf(ctrlr, "unable to allocate pci resource\n");
66 ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
67 ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
68 ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
71 * The NVMe spec allows for the MSI-X table to be placed behind
72 * BAR 4/5, separate from the control/doorbell registers. Always
73 * try to map this bar, because it must be mapped prior to calling
74 * pci_alloc_msix(). If the table isn't behind BAR 4/5,
75 * bus_alloc_resource() will just return NULL which is OK.
77 ctrlr->bar4_resource_id = PCIR_BAR(4);
78 ctrlr->bar4_resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY,
79 &ctrlr->bar4_resource_id, 0, ~0, 1, RF_ACTIVE);
86 nvme_ctrlr_allocate_chatham_bar(struct nvme_controller *ctrlr)
89 ctrlr->chatham_resource_id = PCIR_BAR(CHATHAM_CONTROL_BAR);
90 ctrlr->chatham_resource = bus_alloc_resource(ctrlr->dev,
91 SYS_RES_MEMORY, &ctrlr->chatham_resource_id, 0, ~0, 1,
94 if(ctrlr->chatham_resource == NULL) {
95 nvme_printf(ctrlr, "unable to alloc pci resource\n");
99 ctrlr->chatham_bus_tag = rman_get_bustag(ctrlr->chatham_resource);
100 ctrlr->chatham_bus_handle =
101 rman_get_bushandle(ctrlr->chatham_resource);
107 nvme_ctrlr_setup_chatham(struct nvme_controller *ctrlr)
109 uint64_t reg1, reg2, reg3;
110 uint64_t temp1, temp2;
112 uint32_t use_flash_timings = 0;
116 temp3 = chatham_read_4(ctrlr, 0x8080);
118 device_printf(ctrlr->dev, "Chatham version: 0x%x\n", temp3);
120 ctrlr->chatham_lbas = chatham_read_4(ctrlr, 0x8068) - 0x110;
121 ctrlr->chatham_size = ctrlr->chatham_lbas * 512;
123 device_printf(ctrlr->dev, "Chatham size: %jd\n",
124 (intmax_t)ctrlr->chatham_size);
126 reg1 = reg2 = reg3 = ctrlr->chatham_size - 1;
128 TUNABLE_INT_FETCH("hw.nvme.use_flash_timings", &use_flash_timings);
129 if (use_flash_timings) {
130 device_printf(ctrlr->dev, "Chatham: using flash timings\n");
131 temp1 = 0x00001b58000007d0LL;
132 temp2 = 0x000000cb00000131LL;
134 device_printf(ctrlr->dev, "Chatham: using DDR timings\n");
135 temp1 = temp2 = 0x0LL;
138 chatham_write_8(ctrlr, 0x8000, reg1);
139 chatham_write_8(ctrlr, 0x8008, reg2);
140 chatham_write_8(ctrlr, 0x8010, reg3);
142 chatham_write_8(ctrlr, 0x8020, temp1);
143 temp3 = chatham_read_4(ctrlr, 0x8020);
145 chatham_write_8(ctrlr, 0x8028, temp2);
146 temp3 = chatham_read_4(ctrlr, 0x8028);
148 chatham_write_8(ctrlr, 0x8030, temp1);
149 chatham_write_8(ctrlr, 0x8038, temp2);
150 chatham_write_8(ctrlr, 0x8040, temp1);
151 chatham_write_8(ctrlr, 0x8048, temp2);
152 chatham_write_8(ctrlr, 0x8050, temp1);
153 chatham_write_8(ctrlr, 0x8058, temp2);
159 nvme_chatham_populate_cdata(struct nvme_controller *ctrlr)
161 struct nvme_controller_data *cdata;
163 cdata = &ctrlr->cdata;
166 cdata->ssvid = 0x2011;
169 * Chatham2 puts garbage data in these fields when we
170 * invoke IDENTIFY_CONTROLLER, so we need to re-zero
171 * the fields before calling bcopy().
173 memset(cdata->sn, 0, sizeof(cdata->sn));
174 memcpy(cdata->sn, "2012", strlen("2012"));
175 memset(cdata->mn, 0, sizeof(cdata->mn));
176 memcpy(cdata->mn, "CHATHAM2", strlen("CHATHAM2"));
177 memset(cdata->fr, 0, sizeof(cdata->fr));
178 memcpy(cdata->fr, "0", strlen("0"));
181 cdata->lpa.ns_smart = 1;
188 /* Chatham2 doesn't support DSM command */
191 cdata->vwc.present = 1;
193 #endif /* CHATHAM2 */
196 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
198 struct nvme_qpair *qpair;
199 uint32_t num_entries;
201 qpair = &ctrlr->adminq;
203 num_entries = NVME_ADMIN_ENTRIES;
204 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
206 * If admin_entries was overridden to an invalid value, revert it
207 * back to our default value.
209 if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
210 num_entries > NVME_MAX_ADMIN_ENTRIES) {
211 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
212 "specified\n", num_entries);
213 num_entries = NVME_ADMIN_ENTRIES;
217 * The admin queue's max xfer size is treated differently than the
218 * max I/O xfer size. 16KB is sufficient here - maybe even less?
220 nvme_qpair_construct(qpair,
225 16*1024, /* max xfer size */
230 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
232 struct nvme_qpair *qpair;
233 union cap_lo_register cap_lo;
234 int i, num_entries, num_trackers;
236 num_entries = NVME_IO_ENTRIES;
237 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
240 * NVMe spec sets a hard limit of 64K max entries, but
241 * devices may specify a smaller limit, so we need to check
242 * the MQES field in the capabilities register.
244 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
245 num_entries = min(num_entries, cap_lo.bits.mqes+1);
247 num_trackers = NVME_IO_TRACKERS;
248 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
250 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
251 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
253 * No need to have more trackers than entries in the submit queue.
254 * Note also that for a queue size of N, we can only have (N-1)
255 * commands outstanding, hence the "-1" here.
257 num_trackers = min(num_trackers, (num_entries-1));
259 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
260 TUNABLE_INT_FETCH("hw.nvme.max_xfer_size", &ctrlr->max_xfer_size);
262 * Check that tunable doesn't specify a size greater than what our
263 * driver supports, and is an even PAGE_SIZE multiple.
265 if (ctrlr->max_xfer_size > NVME_MAX_XFER_SIZE ||
266 ctrlr->max_xfer_size % PAGE_SIZE)
267 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
269 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
270 M_NVME, M_ZERO | M_WAITOK);
272 for (i = 0; i < ctrlr->num_io_queues; i++) {
273 qpair = &ctrlr->ioq[i];
276 * Admin queue has ID=0. IO queues start at ID=1 -
277 * hence the 'i+1' here.
279 * For I/O queues, use the controller-wide max_xfer_size
280 * calculated in nvme_attach().
282 nvme_qpair_construct(qpair,
284 ctrlr->msix_enabled ? i+1 : 0, /* vector */
287 ctrlr->max_xfer_size,
290 if (ctrlr->per_cpu_io_queues)
291 bus_bind_intr(ctrlr->dev, qpair->res, i);
298 nvme_ctrlr_fail(struct nvme_controller *ctrlr)
302 ctrlr->is_failed = TRUE;
303 nvme_qpair_fail(&ctrlr->adminq);
304 for (i = 0; i < ctrlr->num_io_queues; i++)
305 nvme_qpair_fail(&ctrlr->ioq[i]);
306 nvme_notify_fail_consumers(ctrlr);
310 nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
311 struct nvme_request *req)
314 mtx_lock(&ctrlr->lock);
315 STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
316 mtx_unlock(&ctrlr->lock);
317 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
321 nvme_ctrlr_fail_req_task(void *arg, int pending)
323 struct nvme_controller *ctrlr = arg;
324 struct nvme_request *req;
326 mtx_lock(&ctrlr->lock);
327 while (!STAILQ_EMPTY(&ctrlr->fail_req)) {
328 req = STAILQ_FIRST(&ctrlr->fail_req);
329 STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
330 nvme_qpair_manual_complete_request(req->qpair, req,
331 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
333 mtx_unlock(&ctrlr->lock);
337 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr)
340 union cc_register cc;
341 union csts_register csts;
343 cc.raw = nvme_mmio_read_4(ctrlr, cc);
344 csts.raw = nvme_mmio_read_4(ctrlr, csts);
347 nvme_printf(ctrlr, "%s called with cc.en = 0\n", __func__);
353 while (!csts.bits.rdy) {
355 if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
356 nvme_printf(ctrlr, "controller did not become ready "
357 "within %d ms\n", ctrlr->ready_timeout_in_ms);
360 csts.raw = nvme_mmio_read_4(ctrlr, csts);
367 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
369 union cc_register cc;
370 union csts_register csts;
372 cc.raw = nvme_mmio_read_4(ctrlr, cc);
373 csts.raw = nvme_mmio_read_4(ctrlr, csts);
375 if (cc.bits.en == 1 && csts.bits.rdy == 0)
376 nvme_ctrlr_wait_for_ready(ctrlr);
379 nvme_mmio_write_4(ctrlr, cc, cc.raw);
384 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
386 union cc_register cc;
387 union csts_register csts;
388 union aqa_register aqa;
390 cc.raw = nvme_mmio_read_4(ctrlr, cc);
391 csts.raw = nvme_mmio_read_4(ctrlr, csts);
393 if (cc.bits.en == 1) {
394 if (csts.bits.rdy == 1)
397 return (nvme_ctrlr_wait_for_ready(ctrlr));
400 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
402 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
406 /* acqs and asqs are 0-based. */
407 aqa.bits.acqs = ctrlr->adminq.num_entries-1;
408 aqa.bits.asqs = ctrlr->adminq.num_entries-1;
409 nvme_mmio_write_4(ctrlr, aqa, aqa.raw);
416 cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
417 cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
419 /* This evaluates to 0, which is according to spec. */
420 cc.bits.mps = (PAGE_SIZE >> 13);
422 nvme_mmio_write_4(ctrlr, cc, cc.raw);
425 return (nvme_ctrlr_wait_for_ready(ctrlr));
429 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
433 nvme_admin_qpair_disable(&ctrlr->adminq);
434 for (i = 0; i < ctrlr->num_io_queues; i++)
435 nvme_io_qpair_disable(&ctrlr->ioq[i]);
439 nvme_ctrlr_disable(ctrlr);
440 return (nvme_ctrlr_enable(ctrlr));
444 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
448 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
450 if (cmpset == 0 || ctrlr->is_failed)
452 * Controller is already resetting or has failed. Return
453 * immediately since there is no need to kick off another
454 * reset in these cases.
458 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
462 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
464 struct nvme_completion_poll_status status;
467 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
468 nvme_completion_poll_cb, &status);
469 while (status.done == FALSE)
471 if (nvme_completion_is_error(&status.cpl)) {
472 nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
477 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
478 nvme_chatham_populate_cdata(ctrlr);
482 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
483 * controller supports.
485 if (ctrlr->cdata.mdts > 0)
486 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
487 ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
493 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
495 struct nvme_completion_poll_status status;
496 int cq_allocated, i, sq_allocated;
499 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
500 nvme_completion_poll_cb, &status);
501 while (status.done == FALSE)
503 if (nvme_completion_is_error(&status.cpl)) {
504 nvme_printf(ctrlr, "nvme_set_num_queues failed!\n");
509 * Data in cdw0 is 0-based.
510 * Lower 16-bits indicate number of submission queues allocated.
511 * Upper 16-bits indicate number of completion queues allocated.
513 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
514 cq_allocated = (status.cpl.cdw0 >> 16) + 1;
517 * Check that the controller was able to allocate the number of
518 * queues we requested. If not, revert to one IO queue pair.
520 if (sq_allocated < ctrlr->num_io_queues ||
521 cq_allocated < ctrlr->num_io_queues) {
524 * Destroy extra IO queue pairs that were created at
525 * controller construction time but are no longer
526 * needed. This will only happen when a controller
527 * supports fewer queues than MSI-X vectors. This
528 * is not the normal case, but does occur with the
529 * Chatham prototype board.
531 for (i = 1; i < ctrlr->num_io_queues; i++)
532 nvme_io_qpair_destroy(&ctrlr->ioq[i]);
534 ctrlr->num_io_queues = 1;
535 ctrlr->per_cpu_io_queues = 0;
542 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
544 struct nvme_completion_poll_status status;
545 struct nvme_qpair *qpair;
548 for (i = 0; i < ctrlr->num_io_queues; i++) {
549 qpair = &ctrlr->ioq[i];
552 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector,
553 nvme_completion_poll_cb, &status);
554 while (status.done == FALSE)
556 if (nvme_completion_is_error(&status.cpl)) {
557 nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
562 nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
563 nvme_completion_poll_cb, &status);
564 while (status.done == FALSE)
566 if (nvme_completion_is_error(&status.cpl)) {
567 nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
576 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
578 struct nvme_namespace *ns;
581 for (i = 0; i < ctrlr->cdata.nn; i++) {
583 status = nvme_ns_construct(ns, i+1, ctrlr);
592 is_log_page_id_valid(uint8_t page_id)
597 case NVME_LOG_HEALTH_INFORMATION:
598 case NVME_LOG_FIRMWARE_SLOT:
606 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
608 uint32_t log_page_size;
613 sizeof(struct nvme_error_information_entry) *
615 NVME_MAX_AER_LOG_SIZE);
617 case NVME_LOG_HEALTH_INFORMATION:
618 log_page_size = sizeof(struct nvme_health_information_page);
620 case NVME_LOG_FIRMWARE_SLOT:
621 log_page_size = sizeof(struct nvme_firmware_page);
628 return (log_page_size);
632 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
634 struct nvme_async_event_request *aer = arg;
637 * If the log page fetch for some reason completed with an error,
638 * don't pass log page data to the consumers. In practice, this case
639 * should never happen.
641 if (nvme_completion_is_error(cpl))
642 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
643 aer->log_page_id, NULL, 0);
646 * Pass the cpl data from the original async event completion,
647 * not the log page fetch.
649 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
650 aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
653 * Repost another asynchronous event request to replace the one
654 * that just completed.
656 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
660 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
662 struct nvme_async_event_request *aer = arg;
664 if (cpl->status.sc == NVME_SC_ABORTED_SQ_DELETION) {
666 * This is simulated when controller is being shut down, to
667 * effectively abort outstanding asynchronous event requests
668 * and make sure all memory is freed. Do not repost the
669 * request in this case.
674 /* Associated log page is in bits 23:16 of completion entry dw0. */
675 aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
677 nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n",
680 if (is_log_page_id_valid(aer->log_page_id)) {
681 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
683 memcpy(&aer->cpl, cpl, sizeof(*cpl));
684 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
685 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
686 aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
688 /* Wait to notify consumers until after log page is fetched. */
690 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
694 * Repost another asynchronous event request to replace the one
695 * that just completed.
697 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
702 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
703 struct nvme_async_event_request *aer)
705 struct nvme_request *req;
708 req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
712 * Disable timeout here, since asynchronous event requests should by
713 * nature never be timed out.
715 req->timeout = FALSE;
716 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
717 nvme_ctrlr_submit_admin_request(ctrlr, req);
721 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
723 union nvme_critical_warning_state state;
724 struct nvme_async_event_request *aer;
728 state.bits.reserved = 0;
729 nvme_ctrlr_cmd_set_async_event_config(ctrlr, state, NULL, NULL);
731 /* aerl is a zero-based value, so we need to add 1 here. */
732 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
734 /* Chatham doesn't support AERs. */
735 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
738 for (i = 0; i < ctrlr->num_aers; i++) {
739 aer = &ctrlr->aer[i];
740 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
745 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
748 ctrlr->int_coal_time = 0;
749 TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
750 &ctrlr->int_coal_time);
752 ctrlr->int_coal_threshold = 0;
753 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
754 &ctrlr->int_coal_threshold);
756 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
757 ctrlr->int_coal_threshold, NULL, NULL);
761 nvme_ctrlr_start(void *ctrlr_arg)
763 struct nvme_controller *ctrlr = ctrlr_arg;
766 nvme_qpair_reset(&ctrlr->adminq);
767 for (i = 0; i < ctrlr->num_io_queues; i++)
768 nvme_qpair_reset(&ctrlr->ioq[i]);
770 nvme_admin_qpair_enable(&ctrlr->adminq);
772 if (nvme_ctrlr_identify(ctrlr) != 0) {
773 nvme_ctrlr_fail(ctrlr);
777 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
778 nvme_ctrlr_fail(ctrlr);
782 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
783 nvme_ctrlr_fail(ctrlr);
787 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
788 nvme_ctrlr_fail(ctrlr);
792 nvme_ctrlr_configure_aer(ctrlr);
793 nvme_ctrlr_configure_int_coalescing(ctrlr);
795 for (i = 0; i < ctrlr->num_io_queues; i++)
796 nvme_io_qpair_enable(&ctrlr->ioq[i]);
799 * Clear software progress marker to 0, to indicate to pre-boot
800 * software that OS driver load was successful.
802 * Chatham does not support this feature.
804 if (pci_get_devid(ctrlr->dev) != CHATHAM_PCI_ID)
805 nvme_ctrlr_cmd_set_feature(ctrlr,
806 NVME_FEAT_SOFTWARE_PROGRESS_MARKER, 0, NULL, 0, NULL, NULL);
810 nvme_ctrlr_start_config_hook(void *arg)
812 struct nvme_controller *ctrlr = arg;
814 nvme_ctrlr_start(ctrlr);
815 config_intrhook_disestablish(&ctrlr->config_hook);
819 nvme_ctrlr_reset_task(void *arg, int pending)
821 struct nvme_controller *ctrlr = arg;
824 nvme_printf(ctrlr, "resetting controller\n");
825 status = nvme_ctrlr_hw_reset(ctrlr);
827 * Use pause instead of DELAY, so that we yield to any nvme interrupt
828 * handlers on this CPU that were blocked on a qpair lock. We want
829 * all nvme interrupts completed before proceeding with restarting the
832 * XXX - any way to guarantee the interrupt handlers have quiesced?
834 pause("nvmereset", hz / 10);
836 nvme_ctrlr_start(ctrlr);
838 nvme_ctrlr_fail(ctrlr);
840 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
844 nvme_ctrlr_intx_handler(void *arg)
846 struct nvme_controller *ctrlr = arg;
848 nvme_mmio_write_4(ctrlr, intms, 1);
850 nvme_qpair_process_completions(&ctrlr->adminq);
852 if (ctrlr->ioq[0].cpl)
853 nvme_qpair_process_completions(&ctrlr->ioq[0]);
855 nvme_mmio_write_4(ctrlr, intmc, 1);
859 nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr)
862 ctrlr->num_io_queues = 1;
863 ctrlr->per_cpu_io_queues = 0;
865 ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
866 &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
868 if (ctrlr->res == NULL) {
869 nvme_printf(ctrlr, "unable to allocate shared IRQ\n");
873 bus_setup_intr(ctrlr->dev, ctrlr->res,
874 INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler,
877 if (ctrlr->tag == NULL) {
878 nvme_printf(ctrlr, "unable to setup intx handler\n");
886 nvme_pt_done(void *arg, const struct nvme_completion *cpl)
888 struct nvme_pt_command *pt = arg;
890 bzero(&pt->cpl, sizeof(pt->cpl));
891 pt->cpl.cdw0 = cpl->cdw0;
892 pt->cpl.status = cpl->status;
893 pt->cpl.status.p = 0;
895 mtx_lock(pt->driver_lock);
897 mtx_unlock(pt->driver_lock);
901 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
902 struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
905 struct nvme_request *req;
907 struct buf *buf = NULL;
911 if (is_user_buffer) {
913 * Ensure the user buffer is wired for the duration of
914 * this passthrough command.
918 buf->b_saveaddr = buf->b_data;
919 buf->b_data = pt->buf;
920 buf->b_bufsize = pt->len;
921 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
922 #ifdef NVME_UNMAPPED_BIO_SUPPORT
923 if (vmapbuf(buf, 1) < 0) {
925 if (vmapbuf(buf) < 0) {
930 req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
933 req = nvme_allocate_request_vaddr(pt->buf, pt->len,
936 req = nvme_allocate_request_null(nvme_pt_done, pt);
938 req->cmd.opc = pt->cmd.opc;
939 req->cmd.cdw10 = pt->cmd.cdw10;
940 req->cmd.cdw11 = pt->cmd.cdw11;
941 req->cmd.cdw12 = pt->cmd.cdw12;
942 req->cmd.cdw13 = pt->cmd.cdw13;
943 req->cmd.cdw14 = pt->cmd.cdw14;
944 req->cmd.cdw15 = pt->cmd.cdw15;
946 req->cmd.nsid = nsid;
951 mtx = &ctrlr->ns[nsid-1].lock;
954 pt->driver_lock = mtx;
957 nvme_ctrlr_submit_admin_request(ctrlr, req);
959 nvme_ctrlr_submit_io_request(ctrlr, req);
961 mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
964 pt->driver_lock = NULL;
976 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
979 struct nvme_controller *ctrlr;
980 struct nvme_pt_command *pt;
982 ctrlr = cdev->si_drv1;
985 case NVME_RESET_CONTROLLER:
986 nvme_ctrlr_reset(ctrlr);
988 case NVME_PASSTHROUGH_CMD:
989 pt = (struct nvme_pt_command *)arg;
990 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, pt->cmd.nsid,
991 1 /* is_user_buffer */, 1 /* is_admin_cmd */));
999 static struct cdevsw nvme_ctrlr_cdevsw = {
1000 .d_version = D_VERSION,
1002 .d_ioctl = nvme_ctrlr_ioctl
1006 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1008 union cap_lo_register cap_lo;
1009 union cap_hi_register cap_hi;
1010 int num_vectors, per_cpu_io_queues, status = 0;
1015 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1017 status = nvme_ctrlr_allocate_bar(ctrlr);
1023 if (pci_get_devid(dev) == CHATHAM_PCI_ID) {
1024 status = nvme_ctrlr_allocate_chatham_bar(ctrlr);
1027 nvme_ctrlr_setup_chatham(ctrlr);
1032 * Software emulators may set the doorbell stride to something
1033 * other than zero, but this driver is not set up to handle that.
1035 cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi);
1036 if (cap_hi.bits.dstrd != 0)
1039 ctrlr->min_page_size = 1 << (12 + cap_hi.bits.mpsmin);
1041 /* Get ready timeout value from controller, in units of 500ms. */
1042 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
1043 ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500;
1045 timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
1046 TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
1047 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1048 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1049 ctrlr->timeout_period = timeout_period;
1051 nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
1052 TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
1054 per_cpu_io_queues = 1;
1055 TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
1056 ctrlr->per_cpu_io_queues = per_cpu_io_queues ? TRUE : FALSE;
1058 if (ctrlr->per_cpu_io_queues)
1059 ctrlr->num_io_queues = mp_ncpus;
1061 ctrlr->num_io_queues = 1;
1063 ctrlr->force_intx = 0;
1064 TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx);
1066 ctrlr->enable_aborts = 0;
1067 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1069 ctrlr->msix_enabled = 1;
1071 if (ctrlr->force_intx) {
1072 ctrlr->msix_enabled = 0;
1076 /* One vector per IO queue, plus one vector for admin queue. */
1077 num_vectors = ctrlr->num_io_queues + 1;
1079 if (pci_msix_count(dev) < num_vectors) {
1080 ctrlr->msix_enabled = 0;
1084 if (pci_alloc_msix(dev, &num_vectors) != 0)
1085 ctrlr->msix_enabled = 0;
1089 if (!ctrlr->msix_enabled)
1090 nvme_ctrlr_configure_intx(ctrlr);
1092 nvme_ctrlr_construct_admin_qpair(ctrlr);
1094 status = nvme_ctrlr_construct_io_qpairs(ctrlr);
1099 ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
1100 "nvme%d", device_get_unit(dev));
1102 if (ctrlr->cdev == NULL)
1105 ctrlr->cdev->si_drv1 = (void *)ctrlr;
1107 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1108 taskqueue_thread_enqueue, &ctrlr->taskqueue);
1109 taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq");
1111 ctrlr->is_resetting = 0;
1112 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1114 TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
1115 STAILQ_INIT(&ctrlr->fail_req);
1116 ctrlr->is_failed = FALSE;
1122 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1126 nvme_ctrlr_disable(ctrlr);
1127 taskqueue_free(ctrlr->taskqueue);
1129 for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1130 nvme_ns_destruct(&ctrlr->ns[i]);
1133 destroy_dev(ctrlr->cdev);
1135 for (i = 0; i < ctrlr->num_io_queues; i++) {
1136 nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1139 free(ctrlr->ioq, M_NVME);
1141 nvme_admin_qpair_destroy(&ctrlr->adminq);
1143 if (ctrlr->resource != NULL) {
1144 bus_release_resource(dev, SYS_RES_MEMORY,
1145 ctrlr->resource_id, ctrlr->resource);
1148 if (ctrlr->bar4_resource != NULL) {
1149 bus_release_resource(dev, SYS_RES_MEMORY,
1150 ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1154 if (ctrlr->chatham_resource != NULL) {
1155 bus_release_resource(dev, SYS_RES_MEMORY,
1156 ctrlr->chatham_resource_id, ctrlr->chatham_resource);
1161 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1164 bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1165 rman_get_rid(ctrlr->res), ctrlr->res);
1167 if (ctrlr->msix_enabled)
1168 pci_release_msi(dev);
1172 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1173 struct nvme_request *req)
1176 nvme_qpair_submit_request(&ctrlr->adminq, req);
1180 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1181 struct nvme_request *req)
1183 struct nvme_qpair *qpair;
1185 if (ctrlr->per_cpu_io_queues)
1186 qpair = &ctrlr->ioq[curcpu];
1188 qpair = &ctrlr->ioq[0];
1190 nvme_qpair_submit_request(qpair, req);
1194 nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1197 return (ctrlr->dev);
1200 const struct nvme_controller_data *
1201 nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1204 return (&ctrlr->cdata);