2 * Copyright (C) 2012 Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
33 #include <sys/ioccom.h>
36 #include <dev/pci/pcireg.h>
37 #include <dev/pci/pcivar.h>
39 #include "nvme_private.h"
42 nvme_ctrlr_cb(void *arg, const struct nvme_completion *status)
44 struct nvme_completion *cpl = arg;
48 * Copy status into the argument passed by the caller, so that
49 * the caller can check the status to determine if the
50 * the request passed or failed.
52 memcpy(cpl, status, sizeof(*cpl));
53 mtx = mtx_pool_find(mtxpool_sleep, cpl);
60 nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
63 /* Chatham puts the NVMe MMRs behind BAR 2/3, not BAR 0/1. */
64 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
65 ctrlr->resource_id = PCIR_BAR(2);
67 ctrlr->resource_id = PCIR_BAR(0);
69 ctrlr->resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY,
70 &ctrlr->resource_id, 0, ~0, 1, RF_ACTIVE);
72 if(ctrlr->resource == NULL) {
73 device_printf(ctrlr->dev, "unable to allocate pci resource\n");
77 ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
78 ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
79 ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
86 nvme_ctrlr_allocate_chatham_bar(struct nvme_controller *ctrlr)
89 ctrlr->chatham_resource_id = PCIR_BAR(CHATHAM_CONTROL_BAR);
90 ctrlr->chatham_resource = bus_alloc_resource(ctrlr->dev,
91 SYS_RES_MEMORY, &ctrlr->chatham_resource_id, 0, ~0, 1,
94 if(ctrlr->chatham_resource == NULL) {
95 device_printf(ctrlr->dev, "unable to alloc pci resource\n");
99 ctrlr->chatham_bus_tag = rman_get_bustag(ctrlr->chatham_resource);
100 ctrlr->chatham_bus_handle =
101 rman_get_bushandle(ctrlr->chatham_resource);
107 nvme_ctrlr_setup_chatham(struct nvme_controller *ctrlr)
109 uint64_t reg1, reg2, reg3;
110 uint64_t temp1, temp2;
112 uint32_t use_flash_timings = 0;
116 temp3 = chatham_read_4(ctrlr, 0x8080);
118 device_printf(ctrlr->dev, "Chatham version: 0x%x\n", temp3);
120 ctrlr->chatham_lbas = chatham_read_4(ctrlr, 0x8068) - 0x110;
121 ctrlr->chatham_size = ctrlr->chatham_lbas * 512;
123 device_printf(ctrlr->dev, "Chatham size: %lld\n",
124 (long long)ctrlr->chatham_size);
126 reg1 = reg2 = reg3 = ctrlr->chatham_size - 1;
128 TUNABLE_INT_FETCH("hw.nvme.use_flash_timings", &use_flash_timings);
129 if (use_flash_timings) {
130 device_printf(ctrlr->dev, "Chatham: using flash timings\n");
131 temp1 = 0x00001b58000007d0LL;
132 temp2 = 0x000000cb00000131LL;
134 device_printf(ctrlr->dev, "Chatham: using DDR timings\n");
135 temp1 = temp2 = 0x0LL;
138 chatham_write_8(ctrlr, 0x8000, reg1);
139 chatham_write_8(ctrlr, 0x8008, reg2);
140 chatham_write_8(ctrlr, 0x8010, reg3);
142 chatham_write_8(ctrlr, 0x8020, temp1);
143 temp3 = chatham_read_4(ctrlr, 0x8020);
145 chatham_write_8(ctrlr, 0x8028, temp2);
146 temp3 = chatham_read_4(ctrlr, 0x8028);
148 chatham_write_8(ctrlr, 0x8030, temp1);
149 chatham_write_8(ctrlr, 0x8038, temp2);
150 chatham_write_8(ctrlr, 0x8040, temp1);
151 chatham_write_8(ctrlr, 0x8048, temp2);
152 chatham_write_8(ctrlr, 0x8050, temp1);
153 chatham_write_8(ctrlr, 0x8058, temp2);
159 nvme_chatham_populate_cdata(struct nvme_controller *ctrlr)
161 struct nvme_controller_data *cdata;
163 cdata = &ctrlr->cdata;
166 cdata->ssvid = 0x2011;
169 * Chatham2 puts garbage data in these fields when we
170 * invoke IDENTIFY_CONTROLLER, so we need to re-zero
171 * the fields before calling bcopy().
173 memset(cdata->sn, 0, sizeof(cdata->sn));
174 memcpy(cdata->sn, "2012", strlen("2012"));
175 memset(cdata->mn, 0, sizeof(cdata->mn));
176 memcpy(cdata->mn, "CHATHAM2", strlen("CHATHAM2"));
177 memset(cdata->fr, 0, sizeof(cdata->fr));
178 memcpy(cdata->fr, "0", strlen("0"));
181 cdata->lpa.ns_smart = 1;
188 /* Chatham2 doesn't support DSM command */
191 cdata->vwc.present = 1;
193 #endif /* CHATHAM2 */
196 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
198 struct nvme_qpair *qpair;
199 uint32_t num_entries;
201 qpair = &ctrlr->adminq;
203 num_entries = NVME_ADMIN_ENTRIES;
204 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
206 * If admin_entries was overridden to an invalid value, revert it
207 * back to our default value.
209 if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
210 num_entries > NVME_MAX_ADMIN_ENTRIES) {
211 printf("nvme: invalid hw.nvme.admin_entries=%d specified\n",
213 num_entries = NVME_ADMIN_ENTRIES;
217 * The admin queue's max xfer size is treated differently than the
218 * max I/O xfer size. 16KB is sufficient here - maybe even less?
220 nvme_qpair_construct(qpair,
225 16*1024, /* max xfer size */
230 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
232 struct nvme_qpair *qpair;
233 union cap_lo_register cap_lo;
234 int i, num_entries, num_trackers;
236 num_entries = NVME_IO_ENTRIES;
237 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
240 * NVMe spec sets a hard limit of 64K max entries, but
241 * devices may specify a smaller limit, so we need to check
242 * the MQES field in the capabilities register.
244 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
245 num_entries = min(num_entries, cap_lo.bits.mqes+1);
247 num_trackers = NVME_IO_TRACKERS;
248 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
250 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
251 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
253 * No need to have more trackers than entries in the submit queue.
254 * Note also that for a queue size of N, we can only have (N-1)
255 * commands outstanding, hence the "-1" here.
257 num_trackers = min(num_trackers, (num_entries-1));
259 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
260 TUNABLE_INT_FETCH("hw.nvme.max_xfer_size", &ctrlr->max_xfer_size);
262 * Check that tunable doesn't specify a size greater than what our
263 * driver supports, and is an even PAGE_SIZE multiple.
265 if (ctrlr->max_xfer_size > NVME_MAX_XFER_SIZE ||
266 ctrlr->max_xfer_size % PAGE_SIZE)
267 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
269 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
270 M_NVME, M_ZERO | M_NOWAIT);
272 if (ctrlr->ioq == NULL)
275 for (i = 0; i < ctrlr->num_io_queues; i++) {
276 qpair = &ctrlr->ioq[i];
279 * Admin queue has ID=0. IO queues start at ID=1 -
280 * hence the 'i+1' here.
282 * For I/O queues, use the controller-wide max_xfer_size
283 * calculated in nvme_attach().
285 nvme_qpair_construct(qpair,
287 ctrlr->msix_enabled ? i+1 : 0, /* vector */
290 ctrlr->max_xfer_size,
293 if (ctrlr->per_cpu_io_queues)
294 bus_bind_intr(ctrlr->dev, qpair->res, i);
301 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr)
304 union cc_register cc;
305 union csts_register csts;
307 cc.raw = nvme_mmio_read_4(ctrlr, cc);
308 csts.raw = nvme_mmio_read_4(ctrlr, csts);
311 device_printf(ctrlr->dev, "%s called with cc.en = 0\n",
318 while (!csts.bits.rdy) {
320 if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
321 device_printf(ctrlr->dev, "controller did not become "
322 "ready within %d ms\n", ctrlr->ready_timeout_in_ms);
325 csts.raw = nvme_mmio_read_4(ctrlr, csts);
332 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
334 union cc_register cc;
335 union csts_register csts;
337 cc.raw = nvme_mmio_read_4(ctrlr, cc);
338 csts.raw = nvme_mmio_read_4(ctrlr, csts);
340 if (cc.bits.en == 1 && csts.bits.rdy == 0)
341 nvme_ctrlr_wait_for_ready(ctrlr);
344 nvme_mmio_write_4(ctrlr, cc, cc.raw);
349 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
351 union cc_register cc;
352 union csts_register csts;
353 union aqa_register aqa;
355 cc.raw = nvme_mmio_read_4(ctrlr, cc);
356 csts.raw = nvme_mmio_read_4(ctrlr, csts);
358 if (cc.bits.en == 1) {
359 if (csts.bits.rdy == 1)
362 return (nvme_ctrlr_wait_for_ready(ctrlr));
365 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
367 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
371 /* acqs and asqs are 0-based. */
372 aqa.bits.acqs = ctrlr->adminq.num_entries-1;
373 aqa.bits.asqs = ctrlr->adminq.num_entries-1;
374 nvme_mmio_write_4(ctrlr, aqa, aqa.raw);
381 cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
382 cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
384 /* This evaluates to 0, which is according to spec. */
385 cc.bits.mps = (PAGE_SIZE >> 13);
387 nvme_mmio_write_4(ctrlr, cc, cc.raw);
390 return (nvme_ctrlr_wait_for_ready(ctrlr));
394 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
397 nvme_ctrlr_disable(ctrlr);
398 return (nvme_ctrlr_enable(ctrlr));
402 * Disable this code for now, since Chatham doesn't support
403 * AERs so I have no good way to test them.
407 nvme_async_event_cb(void *arg, const struct nvme_completion *status)
409 struct nvme_controller *ctrlr = arg;
411 printf("Asynchronous event occurred.\n");
413 /* TODO: decode async event type based on status */
414 /* TODO: check status for any error bits */
417 * Repost an asynchronous event request so that it can be
418 * used again by the controller.
420 nvme_ctrlr_cmd_asynchronous_event_request(ctrlr, nvme_async_event_cb,
426 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
429 struct nvme_completion cpl;
432 mtx = mtx_pool_find(mtxpool_sleep, &cpl);
435 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
436 nvme_ctrlr_cb, &cpl);
437 status = msleep(&cpl, mtx, PRIBIO, "nvme_start", hz*5);
439 if ((status != 0) || cpl.sf_sc || cpl.sf_sct) {
440 printf("nvme_identify_controller failed!\n");
445 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
446 nvme_chatham_populate_cdata(ctrlr);
453 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
456 struct nvme_completion cpl;
457 int cq_allocated, sq_allocated, status;
459 mtx = mtx_pool_find(mtxpool_sleep, &cpl);
462 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
463 nvme_ctrlr_cb, &cpl);
464 status = msleep(&cpl, mtx, PRIBIO, "nvme_start", hz*5);
466 if ((status != 0) || cpl.sf_sc || cpl.sf_sct) {
467 printf("nvme_set_num_queues failed!\n");
472 * Data in cdw0 is 0-based.
473 * Lower 16-bits indicate number of submission queues allocated.
474 * Upper 16-bits indicate number of completion queues allocated.
476 sq_allocated = (cpl.cdw0 & 0xFFFF) + 1;
477 cq_allocated = (cpl.cdw0 >> 16) + 1;
480 * Check that the controller was able to allocate the number of
481 * queues we requested. If not, revert to one IO queue.
483 if (sq_allocated < ctrlr->num_io_queues ||
484 cq_allocated < ctrlr->num_io_queues) {
485 ctrlr->num_io_queues = 1;
486 ctrlr->per_cpu_io_queues = 0;
488 /* TODO: destroy extra queues that were created
489 * previously but now found to be not needed.
497 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
500 struct nvme_qpair *qpair;
501 struct nvme_completion cpl;
504 mtx = mtx_pool_find(mtxpool_sleep, &cpl);
506 for (i = 0; i < ctrlr->num_io_queues; i++) {
507 qpair = &ctrlr->ioq[i];
510 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector,
511 nvme_ctrlr_cb, &cpl);
512 status = msleep(&cpl, mtx, PRIBIO, "nvme_start", hz*5);
514 if ((status != 0) || cpl.sf_sc || cpl.sf_sct) {
515 printf("nvme_create_io_cq failed!\n");
520 nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
521 nvme_ctrlr_cb, &cpl);
522 status = msleep(&cpl, mtx, PRIBIO, "nvme_start", hz*5);
524 if ((status != 0) || cpl.sf_sc || cpl.sf_sct) {
525 printf("nvme_create_io_sq failed!\n");
534 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
536 struct nvme_namespace *ns;
539 for (i = 0; i < ctrlr->cdata.nn; i++) {
541 status = nvme_ns_construct(ns, i+1, ctrlr);
550 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
552 union nvme_critical_warning_state state;
553 uint8_t num_async_events;
556 state.bits.reserved = 0;
557 nvme_ctrlr_cmd_set_asynchronous_event_config(ctrlr, state, NULL, NULL);
559 /* aerl is a zero-based value, so we need to add 1 here. */
560 num_async_events = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
563 * Disable this code for now, since Chatham doesn't support
564 * AERs so I have no good way to test them.
567 for (int i = 0; i < num_async_events; i++)
568 nvme_ctrlr_cmd_asynchronous_event_request(ctrlr,
569 nvme_async_event_cb, ctrlr);
574 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
577 ctrlr->int_coal_time = 0;
578 TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
579 &ctrlr->int_coal_time);
581 ctrlr->int_coal_threshold = 0;
582 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
583 &ctrlr->int_coal_threshold);
585 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
586 ctrlr->int_coal_threshold, NULL, NULL);
590 nvme_ctrlr_start(void *ctrlr_arg)
592 struct nvme_controller *ctrlr = ctrlr_arg;
594 if (nvme_ctrlr_identify(ctrlr) != 0)
597 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0)
600 if (nvme_ctrlr_create_qpairs(ctrlr) != 0)
603 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0)
606 nvme_ctrlr_configure_aer(ctrlr);
607 nvme_ctrlr_configure_int_coalescing(ctrlr);
609 ctrlr->is_started = TRUE;
614 * Initialize sysctls, even if controller failed to start, to
615 * assist with debugging admin queue pair.
617 nvme_sysctl_initialize_ctrlr(ctrlr);
618 config_intrhook_disestablish(&ctrlr->config_hook);
622 nvme_ctrlr_intx_task(void *arg, int pending)
624 struct nvme_controller *ctrlr = arg;
626 nvme_qpair_process_completions(&ctrlr->adminq);
628 if (ctrlr->ioq[0].cpl)
629 nvme_qpair_process_completions(&ctrlr->ioq[0]);
631 nvme_mmio_write_4(ctrlr, intmc, 1);
635 nvme_ctrlr_intx_handler(void *arg)
637 struct nvme_controller *ctrlr = arg;
639 nvme_mmio_write_4(ctrlr, intms, 1);
640 taskqueue_enqueue_fast(ctrlr->taskqueue, &ctrlr->task);
644 nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr)
647 ctrlr->num_io_queues = 1;
648 ctrlr->per_cpu_io_queues = 0;
650 ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
651 &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
653 if (ctrlr->res == NULL) {
654 device_printf(ctrlr->dev, "unable to allocate shared IRQ\n");
658 bus_setup_intr(ctrlr->dev, ctrlr->res,
659 INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler,
662 if (ctrlr->tag == NULL) {
663 device_printf(ctrlr->dev,
664 "unable to setup legacy interrupt handler\n");
668 TASK_INIT(&ctrlr->task, 0, nvme_ctrlr_intx_task, ctrlr);
669 ctrlr->taskqueue = taskqueue_create_fast("nvme_taskq", M_NOWAIT,
670 taskqueue_thread_enqueue, &ctrlr->taskqueue);
671 taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_NET,
672 "%s intx taskq", device_get_nameunit(ctrlr->dev));
678 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
681 struct nvme_controller *ctrlr;
682 struct nvme_completion cpl;
685 ctrlr = cdev->si_drv1;
688 case NVME_IDENTIFY_CONTROLLER:
691 * Don't refresh data on Chatham, since Chatham returns
692 * garbage on IDENTIFY anyways.
694 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID) {
695 memcpy(arg, &ctrlr->cdata, sizeof(ctrlr->cdata));
699 /* Refresh data before returning to user. */
700 mtx = mtx_pool_find(mtxpool_sleep, &cpl);
702 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
703 nvme_ctrlr_cb, &cpl);
704 msleep(&cpl, mtx, PRIBIO, "nvme_ioctl", 0);
706 if (cpl.sf_sc || cpl.sf_sct)
708 memcpy(arg, &ctrlr->cdata, sizeof(ctrlr->cdata));
717 static struct cdevsw nvme_ctrlr_cdevsw = {
718 .d_version = D_VERSION,
720 .d_ioctl = nvme_ctrlr_ioctl
724 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
726 union cap_lo_register cap_lo;
727 union cap_hi_register cap_hi;
728 int num_vectors, per_cpu_io_queues, status = 0;
731 ctrlr->is_started = FALSE;
733 status = nvme_ctrlr_allocate_bar(ctrlr);
739 if (pci_get_devid(dev) == CHATHAM_PCI_ID) {
740 status = nvme_ctrlr_allocate_chatham_bar(ctrlr);
743 nvme_ctrlr_setup_chatham(ctrlr);
748 * Software emulators may set the doorbell stride to something
749 * other than zero, but this driver is not set up to handle that.
751 cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi);
752 if (cap_hi.bits.dstrd != 0)
755 /* Get ready timeout value from controller, in units of 500ms. */
756 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
757 ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500;
759 per_cpu_io_queues = 1;
760 TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
761 ctrlr->per_cpu_io_queues = per_cpu_io_queues ? TRUE : FALSE;
763 if (ctrlr->per_cpu_io_queues)
764 ctrlr->num_io_queues = mp_ncpus;
766 ctrlr->num_io_queues = 1;
768 ctrlr->force_intx = 0;
769 TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx);
771 ctrlr->msix_enabled = 1;
773 if (ctrlr->force_intx) {
774 ctrlr->msix_enabled = 0;
778 /* One vector per IO queue, plus one vector for admin queue. */
779 num_vectors = ctrlr->num_io_queues + 1;
781 if (pci_msix_count(dev) < num_vectors) {
782 ctrlr->msix_enabled = 0;
786 if (pci_alloc_msix(dev, &num_vectors) != 0)
787 ctrlr->msix_enabled = 0;
791 if (!ctrlr->msix_enabled)
792 nvme_ctrlr_configure_intx(ctrlr);
794 nvme_ctrlr_construct_admin_qpair(ctrlr);
796 status = nvme_ctrlr_construct_io_qpairs(ctrlr);
801 ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
802 "nvme%d", device_get_unit(dev));
804 if (ctrlr->cdev == NULL)
807 ctrlr->cdev->si_drv1 = (void *)ctrlr;
813 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
814 struct nvme_request *req)
817 nvme_qpair_submit_request(&ctrlr->adminq, req);
821 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
822 struct nvme_request *req)
824 struct nvme_qpair *qpair;
826 if (ctrlr->per_cpu_io_queues)
827 qpair = &ctrlr->ioq[curcpu];
829 qpair = &ctrlr->ioq[0];
831 nvme_qpair_submit_request(qpair, req);