2 * Copyright (C) 2012 Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
33 #include <sys/ioccom.h>
36 #include <dev/pci/pcireg.h>
37 #include <dev/pci/pcivar.h>
39 #include "nvme_private.h"
42 nvme_ctrlr_cb(void *arg, const struct nvme_completion *status)
44 struct nvme_completion *cpl = arg;
48 * Copy status into the argument passed by the caller, so that
49 * the caller can check the status to determine if the
50 * the request passed or failed.
52 memcpy(cpl, status, sizeof(*cpl));
53 mtx = mtx_pool_find(mtxpool_sleep, cpl);
60 nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
63 /* Chatham puts the NVMe MMRs behind BAR 2/3, not BAR 0/1. */
64 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
65 ctrlr->resource_id = PCIR_BAR(2);
67 ctrlr->resource_id = PCIR_BAR(0);
69 ctrlr->resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY,
70 &ctrlr->resource_id, 0, ~0, 1, RF_ACTIVE);
72 if(ctrlr->resource == NULL) {
73 device_printf(ctrlr->dev, "unable to allocate pci resource\n");
77 ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
78 ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
79 ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
82 * The NVMe spec allows for the MSI-X table to be placed behind
83 * BAR 4/5, separate from the control/doorbell registers. Always
84 * try to map this bar, because it must be mapped prior to calling
85 * pci_alloc_msix(). If the table isn't behind BAR 4/5,
86 * bus_alloc_resource() will just return NULL which is OK.
88 ctrlr->bar4_resource_id = PCIR_BAR(4);
89 ctrlr->bar4_resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY,
90 &ctrlr->bar4_resource_id, 0, ~0, 1, RF_ACTIVE);
97 nvme_ctrlr_allocate_chatham_bar(struct nvme_controller *ctrlr)
100 ctrlr->chatham_resource_id = PCIR_BAR(CHATHAM_CONTROL_BAR);
101 ctrlr->chatham_resource = bus_alloc_resource(ctrlr->dev,
102 SYS_RES_MEMORY, &ctrlr->chatham_resource_id, 0, ~0, 1,
105 if(ctrlr->chatham_resource == NULL) {
106 device_printf(ctrlr->dev, "unable to alloc pci resource\n");
110 ctrlr->chatham_bus_tag = rman_get_bustag(ctrlr->chatham_resource);
111 ctrlr->chatham_bus_handle =
112 rman_get_bushandle(ctrlr->chatham_resource);
118 nvme_ctrlr_setup_chatham(struct nvme_controller *ctrlr)
120 uint64_t reg1, reg2, reg3;
121 uint64_t temp1, temp2;
123 uint32_t use_flash_timings = 0;
127 temp3 = chatham_read_4(ctrlr, 0x8080);
129 device_printf(ctrlr->dev, "Chatham version: 0x%x\n", temp3);
131 ctrlr->chatham_lbas = chatham_read_4(ctrlr, 0x8068) - 0x110;
132 ctrlr->chatham_size = ctrlr->chatham_lbas * 512;
134 device_printf(ctrlr->dev, "Chatham size: %lld\n",
135 (long long)ctrlr->chatham_size);
137 reg1 = reg2 = reg3 = ctrlr->chatham_size - 1;
139 TUNABLE_INT_FETCH("hw.nvme.use_flash_timings", &use_flash_timings);
140 if (use_flash_timings) {
141 device_printf(ctrlr->dev, "Chatham: using flash timings\n");
142 temp1 = 0x00001b58000007d0LL;
143 temp2 = 0x000000cb00000131LL;
145 device_printf(ctrlr->dev, "Chatham: using DDR timings\n");
146 temp1 = temp2 = 0x0LL;
149 chatham_write_8(ctrlr, 0x8000, reg1);
150 chatham_write_8(ctrlr, 0x8008, reg2);
151 chatham_write_8(ctrlr, 0x8010, reg3);
153 chatham_write_8(ctrlr, 0x8020, temp1);
154 temp3 = chatham_read_4(ctrlr, 0x8020);
156 chatham_write_8(ctrlr, 0x8028, temp2);
157 temp3 = chatham_read_4(ctrlr, 0x8028);
159 chatham_write_8(ctrlr, 0x8030, temp1);
160 chatham_write_8(ctrlr, 0x8038, temp2);
161 chatham_write_8(ctrlr, 0x8040, temp1);
162 chatham_write_8(ctrlr, 0x8048, temp2);
163 chatham_write_8(ctrlr, 0x8050, temp1);
164 chatham_write_8(ctrlr, 0x8058, temp2);
170 nvme_chatham_populate_cdata(struct nvme_controller *ctrlr)
172 struct nvme_controller_data *cdata;
174 cdata = &ctrlr->cdata;
177 cdata->ssvid = 0x2011;
180 * Chatham2 puts garbage data in these fields when we
181 * invoke IDENTIFY_CONTROLLER, so we need to re-zero
182 * the fields before calling bcopy().
184 memset(cdata->sn, 0, sizeof(cdata->sn));
185 memcpy(cdata->sn, "2012", strlen("2012"));
186 memset(cdata->mn, 0, sizeof(cdata->mn));
187 memcpy(cdata->mn, "CHATHAM2", strlen("CHATHAM2"));
188 memset(cdata->fr, 0, sizeof(cdata->fr));
189 memcpy(cdata->fr, "0", strlen("0"));
192 cdata->lpa.ns_smart = 1;
199 /* Chatham2 doesn't support DSM command */
202 cdata->vwc.present = 1;
204 #endif /* CHATHAM2 */
207 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
209 struct nvme_qpair *qpair;
210 uint32_t num_entries;
212 qpair = &ctrlr->adminq;
214 num_entries = NVME_ADMIN_ENTRIES;
215 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
217 * If admin_entries was overridden to an invalid value, revert it
218 * back to our default value.
220 if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
221 num_entries > NVME_MAX_ADMIN_ENTRIES) {
222 printf("nvme: invalid hw.nvme.admin_entries=%d specified\n",
224 num_entries = NVME_ADMIN_ENTRIES;
228 * The admin queue's max xfer size is treated differently than the
229 * max I/O xfer size. 16KB is sufficient here - maybe even less?
231 nvme_qpair_construct(qpair,
236 16*1024, /* max xfer size */
241 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
243 struct nvme_qpair *qpair;
244 union cap_lo_register cap_lo;
245 int i, num_entries, num_trackers;
247 num_entries = NVME_IO_ENTRIES;
248 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
251 * NVMe spec sets a hard limit of 64K max entries, but
252 * devices may specify a smaller limit, so we need to check
253 * the MQES field in the capabilities register.
255 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
256 num_entries = min(num_entries, cap_lo.bits.mqes+1);
258 num_trackers = NVME_IO_TRACKERS;
259 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
261 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
262 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
264 * No need to have more trackers than entries in the submit queue.
265 * Note also that for a queue size of N, we can only have (N-1)
266 * commands outstanding, hence the "-1" here.
268 num_trackers = min(num_trackers, (num_entries-1));
270 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
271 TUNABLE_INT_FETCH("hw.nvme.max_xfer_size", &ctrlr->max_xfer_size);
273 * Check that tunable doesn't specify a size greater than what our
274 * driver supports, and is an even PAGE_SIZE multiple.
276 if (ctrlr->max_xfer_size > NVME_MAX_XFER_SIZE ||
277 ctrlr->max_xfer_size % PAGE_SIZE)
278 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
280 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
281 M_NVME, M_ZERO | M_NOWAIT);
283 if (ctrlr->ioq == NULL)
286 for (i = 0; i < ctrlr->num_io_queues; i++) {
287 qpair = &ctrlr->ioq[i];
290 * Admin queue has ID=0. IO queues start at ID=1 -
291 * hence the 'i+1' here.
293 * For I/O queues, use the controller-wide max_xfer_size
294 * calculated in nvme_attach().
296 nvme_qpair_construct(qpair,
298 ctrlr->msix_enabled ? i+1 : 0, /* vector */
301 ctrlr->max_xfer_size,
304 if (ctrlr->per_cpu_io_queues)
305 bus_bind_intr(ctrlr->dev, qpair->res, i);
312 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr)
315 union cc_register cc;
316 union csts_register csts;
318 cc.raw = nvme_mmio_read_4(ctrlr, cc);
319 csts.raw = nvme_mmio_read_4(ctrlr, csts);
322 device_printf(ctrlr->dev, "%s called with cc.en = 0\n",
329 while (!csts.bits.rdy) {
331 if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
332 device_printf(ctrlr->dev, "controller did not become "
333 "ready within %d ms\n", ctrlr->ready_timeout_in_ms);
336 csts.raw = nvme_mmio_read_4(ctrlr, csts);
343 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
345 union cc_register cc;
346 union csts_register csts;
348 cc.raw = nvme_mmio_read_4(ctrlr, cc);
349 csts.raw = nvme_mmio_read_4(ctrlr, csts);
351 if (cc.bits.en == 1 && csts.bits.rdy == 0)
352 nvme_ctrlr_wait_for_ready(ctrlr);
355 nvme_mmio_write_4(ctrlr, cc, cc.raw);
360 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
362 union cc_register cc;
363 union csts_register csts;
364 union aqa_register aqa;
366 cc.raw = nvme_mmio_read_4(ctrlr, cc);
367 csts.raw = nvme_mmio_read_4(ctrlr, csts);
369 if (cc.bits.en == 1) {
370 if (csts.bits.rdy == 1)
373 return (nvme_ctrlr_wait_for_ready(ctrlr));
376 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
378 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
382 /* acqs and asqs are 0-based. */
383 aqa.bits.acqs = ctrlr->adminq.num_entries-1;
384 aqa.bits.asqs = ctrlr->adminq.num_entries-1;
385 nvme_mmio_write_4(ctrlr, aqa, aqa.raw);
392 cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
393 cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
395 /* This evaluates to 0, which is according to spec. */
396 cc.bits.mps = (PAGE_SIZE >> 13);
398 nvme_mmio_write_4(ctrlr, cc, cc.raw);
401 return (nvme_ctrlr_wait_for_ready(ctrlr));
405 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
408 nvme_ctrlr_disable(ctrlr);
409 return (nvme_ctrlr_enable(ctrlr));
413 * Disable this code for now, since Chatham doesn't support
414 * AERs so I have no good way to test them.
418 nvme_async_event_cb(void *arg, const struct nvme_completion *status)
420 struct nvme_controller *ctrlr = arg;
422 printf("Asynchronous event occurred.\n");
424 /* TODO: decode async event type based on status */
425 /* TODO: check status for any error bits */
428 * Repost an asynchronous event request so that it can be
429 * used again by the controller.
431 nvme_ctrlr_cmd_asynchronous_event_request(ctrlr, nvme_async_event_cb,
437 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
440 struct nvme_completion cpl;
443 mtx = mtx_pool_find(mtxpool_sleep, &cpl);
446 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
447 nvme_ctrlr_cb, &cpl);
448 status = msleep(&cpl, mtx, PRIBIO, "nvme_start", hz*5);
450 if ((status != 0) || cpl.sf_sc || cpl.sf_sct) {
451 printf("nvme_identify_controller failed!\n");
456 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
457 nvme_chatham_populate_cdata(ctrlr);
464 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
467 struct nvme_completion cpl;
468 int cq_allocated, sq_allocated, status;
470 mtx = mtx_pool_find(mtxpool_sleep, &cpl);
473 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
474 nvme_ctrlr_cb, &cpl);
475 status = msleep(&cpl, mtx, PRIBIO, "nvme_start", hz*5);
477 if ((status != 0) || cpl.sf_sc || cpl.sf_sct) {
478 printf("nvme_set_num_queues failed!\n");
483 * Data in cdw0 is 0-based.
484 * Lower 16-bits indicate number of submission queues allocated.
485 * Upper 16-bits indicate number of completion queues allocated.
487 sq_allocated = (cpl.cdw0 & 0xFFFF) + 1;
488 cq_allocated = (cpl.cdw0 >> 16) + 1;
491 * Check that the controller was able to allocate the number of
492 * queues we requested. If not, revert to one IO queue.
494 if (sq_allocated < ctrlr->num_io_queues ||
495 cq_allocated < ctrlr->num_io_queues) {
496 ctrlr->num_io_queues = 1;
497 ctrlr->per_cpu_io_queues = 0;
499 /* TODO: destroy extra queues that were created
500 * previously but now found to be not needed.
508 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
511 struct nvme_qpair *qpair;
512 struct nvme_completion cpl;
515 mtx = mtx_pool_find(mtxpool_sleep, &cpl);
517 for (i = 0; i < ctrlr->num_io_queues; i++) {
518 qpair = &ctrlr->ioq[i];
521 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector,
522 nvme_ctrlr_cb, &cpl);
523 status = msleep(&cpl, mtx, PRIBIO, "nvme_start", hz*5);
525 if ((status != 0) || cpl.sf_sc || cpl.sf_sct) {
526 printf("nvme_create_io_cq failed!\n");
531 nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
532 nvme_ctrlr_cb, &cpl);
533 status = msleep(&cpl, mtx, PRIBIO, "nvme_start", hz*5);
535 if ((status != 0) || cpl.sf_sc || cpl.sf_sct) {
536 printf("nvme_create_io_sq failed!\n");
545 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
547 struct nvme_namespace *ns;
550 for (i = 0; i < ctrlr->cdata.nn; i++) {
552 status = nvme_ns_construct(ns, i+1, ctrlr);
561 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
563 union nvme_critical_warning_state state;
564 uint8_t num_async_events;
567 state.bits.reserved = 0;
568 nvme_ctrlr_cmd_set_asynchronous_event_config(ctrlr, state, NULL, NULL);
570 /* aerl is a zero-based value, so we need to add 1 here. */
571 num_async_events = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
574 * Disable this code for now, since Chatham doesn't support
575 * AERs so I have no good way to test them.
578 for (int i = 0; i < num_async_events; i++)
579 nvme_ctrlr_cmd_asynchronous_event_request(ctrlr,
580 nvme_async_event_cb, ctrlr);
585 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
588 ctrlr->int_coal_time = 0;
589 TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
590 &ctrlr->int_coal_time);
592 ctrlr->int_coal_threshold = 0;
593 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
594 &ctrlr->int_coal_threshold);
596 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
597 ctrlr->int_coal_threshold, NULL, NULL);
601 nvme_ctrlr_start(void *ctrlr_arg)
603 struct nvme_controller *ctrlr = ctrlr_arg;
605 if (nvme_ctrlr_identify(ctrlr) != 0)
608 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0)
611 if (nvme_ctrlr_create_qpairs(ctrlr) != 0)
614 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0)
617 nvme_ctrlr_configure_aer(ctrlr);
618 nvme_ctrlr_configure_int_coalescing(ctrlr);
620 ctrlr->is_started = TRUE;
625 * Initialize sysctls, even if controller failed to start, to
626 * assist with debugging admin queue pair.
628 nvme_sysctl_initialize_ctrlr(ctrlr);
629 config_intrhook_disestablish(&ctrlr->config_hook);
633 nvme_ctrlr_intx_handler(void *arg)
635 struct nvme_controller *ctrlr = arg;
637 nvme_mmio_write_4(ctrlr, intms, 1);
639 nvme_qpair_process_completions(&ctrlr->adminq);
641 if (ctrlr->ioq[0].cpl)
642 nvme_qpair_process_completions(&ctrlr->ioq[0]);
644 nvme_mmio_write_4(ctrlr, intmc, 1);
648 nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr)
651 ctrlr->num_io_queues = 1;
652 ctrlr->per_cpu_io_queues = 0;
654 ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
655 &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
657 if (ctrlr->res == NULL) {
658 device_printf(ctrlr->dev, "unable to allocate shared IRQ\n");
662 bus_setup_intr(ctrlr->dev, ctrlr->res,
663 INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler,
666 if (ctrlr->tag == NULL) {
667 device_printf(ctrlr->dev,
668 "unable to setup legacy interrupt handler\n");
676 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
679 struct nvme_controller *ctrlr;
680 struct nvme_completion cpl;
683 ctrlr = cdev->si_drv1;
686 case NVME_IDENTIFY_CONTROLLER:
689 * Don't refresh data on Chatham, since Chatham returns
690 * garbage on IDENTIFY anyways.
692 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID) {
693 memcpy(arg, &ctrlr->cdata, sizeof(ctrlr->cdata));
697 /* Refresh data before returning to user. */
698 mtx = mtx_pool_find(mtxpool_sleep, &cpl);
700 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
701 nvme_ctrlr_cb, &cpl);
702 msleep(&cpl, mtx, PRIBIO, "nvme_ioctl", 0);
704 if (cpl.sf_sc || cpl.sf_sct)
706 memcpy(arg, &ctrlr->cdata, sizeof(ctrlr->cdata));
715 static struct cdevsw nvme_ctrlr_cdevsw = {
716 .d_version = D_VERSION,
718 .d_ioctl = nvme_ctrlr_ioctl
722 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
724 union cap_lo_register cap_lo;
725 union cap_hi_register cap_hi;
726 int num_vectors, per_cpu_io_queues, status = 0;
729 ctrlr->is_started = FALSE;
731 status = nvme_ctrlr_allocate_bar(ctrlr);
737 if (pci_get_devid(dev) == CHATHAM_PCI_ID) {
738 status = nvme_ctrlr_allocate_chatham_bar(ctrlr);
741 nvme_ctrlr_setup_chatham(ctrlr);
746 * Software emulators may set the doorbell stride to something
747 * other than zero, but this driver is not set up to handle that.
749 cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi);
750 if (cap_hi.bits.dstrd != 0)
753 /* Get ready timeout value from controller, in units of 500ms. */
754 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
755 ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500;
757 per_cpu_io_queues = 1;
758 TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
759 ctrlr->per_cpu_io_queues = per_cpu_io_queues ? TRUE : FALSE;
761 if (ctrlr->per_cpu_io_queues)
762 ctrlr->num_io_queues = mp_ncpus;
764 ctrlr->num_io_queues = 1;
766 ctrlr->force_intx = 0;
767 TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx);
769 ctrlr->msix_enabled = 1;
771 if (ctrlr->force_intx) {
772 ctrlr->msix_enabled = 0;
776 /* One vector per IO queue, plus one vector for admin queue. */
777 num_vectors = ctrlr->num_io_queues + 1;
779 if (pci_msix_count(dev) < num_vectors) {
780 ctrlr->msix_enabled = 0;
784 if (pci_alloc_msix(dev, &num_vectors) != 0)
785 ctrlr->msix_enabled = 0;
789 if (!ctrlr->msix_enabled)
790 nvme_ctrlr_configure_intx(ctrlr);
792 nvme_ctrlr_construct_admin_qpair(ctrlr);
794 status = nvme_ctrlr_construct_io_qpairs(ctrlr);
799 ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
800 "nvme%d", device_get_unit(dev));
802 if (ctrlr->cdev == NULL)
805 ctrlr->cdev->si_drv1 = (void *)ctrlr;
811 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
812 struct nvme_request *req)
815 nvme_qpair_submit_request(&ctrlr->adminq, req);
819 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
820 struct nvme_request *req)
822 struct nvme_qpair *qpair;
824 if (ctrlr->per_cpu_io_queues)
825 qpair = &ctrlr->ioq[curcpu];
827 qpair = &ctrlr->ioq[0];
829 nvme_qpair_submit_request(qpair, req);