]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/nvme/nvme_ctrlr.c
MFH @ r323558.
[FreeBSD/FreeBSD.git] / sys / dev / nvme / nvme_ctrlr.c
1 /*-
2  * Copyright (C) 2012-2016 Intel Corporation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include "opt_cam.h"
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/buf.h>
35 #include <sys/bus.h>
36 #include <sys/conf.h>
37 #include <sys/ioccom.h>
38 #include <sys/proc.h>
39 #include <sys/smp.h>
40 #include <sys/uio.h>
41
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44
45 #include "nvme_private.h"
46
47 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
48                                                 struct nvme_async_event_request *aer);
49 static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr);
50
51 static int
52 nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
53 {
54
55         ctrlr->resource_id = PCIR_BAR(0);
56
57         ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
58             &ctrlr->resource_id, RF_ACTIVE);
59
60         if(ctrlr->resource == NULL) {
61                 nvme_printf(ctrlr, "unable to allocate pci resource\n");
62                 return (ENOMEM);
63         }
64
65         ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
66         ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
67         ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
68
69         /*
70          * The NVMe spec allows for the MSI-X table to be placed behind
71          *  BAR 4/5, separate from the control/doorbell registers.  Always
72          *  try to map this bar, because it must be mapped prior to calling
73          *  pci_alloc_msix().  If the table isn't behind BAR 4/5,
74          *  bus_alloc_resource() will just return NULL which is OK.
75          */
76         ctrlr->bar4_resource_id = PCIR_BAR(4);
77         ctrlr->bar4_resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
78             &ctrlr->bar4_resource_id, RF_ACTIVE);
79
80         return (0);
81 }
82
83 static int
84 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
85 {
86         struct nvme_qpair       *qpair;
87         uint32_t                num_entries;
88         int                     error;
89
90         qpair = &ctrlr->adminq;
91
92         num_entries = NVME_ADMIN_ENTRIES;
93         TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
94         /*
95          * If admin_entries was overridden to an invalid value, revert it
96          *  back to our default value.
97          */
98         if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
99             num_entries > NVME_MAX_ADMIN_ENTRIES) {
100                 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
101                     "specified\n", num_entries);
102                 num_entries = NVME_ADMIN_ENTRIES;
103         }
104
105         /*
106          * The admin queue's max xfer size is treated differently than the
107          *  max I/O xfer size.  16KB is sufficient here - maybe even less?
108          */
109         error = nvme_qpair_construct(qpair, 
110                                      0, /* qpair ID */
111                                      0, /* vector */
112                                      num_entries,
113                                      NVME_ADMIN_TRACKERS,
114                                      ctrlr);
115         return (error);
116 }
117
118 static int
119 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
120 {
121         struct nvme_qpair       *qpair;
122         union cap_lo_register   cap_lo;
123         int                     i, error, num_entries, num_trackers;
124
125         num_entries = NVME_IO_ENTRIES;
126         TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
127
128         /*
129          * NVMe spec sets a hard limit of 64K max entries, but
130          *  devices may specify a smaller limit, so we need to check
131          *  the MQES field in the capabilities register.
132          */
133         cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
134         num_entries = min(num_entries, cap_lo.bits.mqes+1);
135
136         num_trackers = NVME_IO_TRACKERS;
137         TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
138
139         num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
140         num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
141         /*
142          * No need to have more trackers than entries in the submit queue.
143          *  Note also that for a queue size of N, we can only have (N-1)
144          *  commands outstanding, hence the "-1" here.
145          */
146         num_trackers = min(num_trackers, (num_entries-1));
147
148         /*
149          * Our best estimate for the maximum number of I/Os that we should
150          * noramlly have in flight at one time. This should be viewed as a hint,
151          * not a hard limit and will need to be revisitted when the upper layers
152          * of the storage system grows multi-queue support.
153          */
154         ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues / 4;
155
156         /*
157          * This was calculated previously when setting up interrupts, but
158          *  a controller could theoretically support fewer I/O queues than
159          *  MSI-X vectors.  So calculate again here just to be safe.
160          */
161         ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues);
162
163         ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
164             M_NVME, M_ZERO | M_WAITOK);
165
166         for (i = 0; i < ctrlr->num_io_queues; i++) {
167                 qpair = &ctrlr->ioq[i];
168
169                 /*
170                  * Admin queue has ID=0. IO queues start at ID=1 -
171                  *  hence the 'i+1' here.
172                  *
173                  * For I/O queues, use the controller-wide max_xfer_size
174                  *  calculated in nvme_attach().
175                  */
176                 error = nvme_qpair_construct(qpair,
177                                      i+1, /* qpair ID */
178                                      ctrlr->msix_enabled ? i+1 : 0, /* vector */
179                                      num_entries,
180                                      num_trackers,
181                                      ctrlr);
182                 if (error)
183                         return (error);
184
185                 /*
186                  * Do not bother binding interrupts if we only have one I/O
187                  *  interrupt thread for this controller.
188                  */
189                 if (ctrlr->num_io_queues > 1)
190                         bus_bind_intr(ctrlr->dev, qpair->res,
191                             i * ctrlr->num_cpus_per_ioq);
192         }
193
194         return (0);
195 }
196
197 static void
198 nvme_ctrlr_fail(struct nvme_controller *ctrlr)
199 {
200         int i;
201
202         ctrlr->is_failed = TRUE;
203         nvme_qpair_fail(&ctrlr->adminq);
204         if (ctrlr->ioq != NULL) {
205                 for (i = 0; i < ctrlr->num_io_queues; i++)
206                         nvme_qpair_fail(&ctrlr->ioq[i]);
207         }
208         nvme_notify_fail_consumers(ctrlr);
209 }
210
211 void
212 nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
213     struct nvme_request *req)
214 {
215
216         mtx_lock(&ctrlr->lock);
217         STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
218         mtx_unlock(&ctrlr->lock);
219         taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
220 }
221
222 static void
223 nvme_ctrlr_fail_req_task(void *arg, int pending)
224 {
225         struct nvme_controller  *ctrlr = arg;
226         struct nvme_request     *req;
227
228         mtx_lock(&ctrlr->lock);
229         while (!STAILQ_EMPTY(&ctrlr->fail_req)) {
230                 req = STAILQ_FIRST(&ctrlr->fail_req);
231                 STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
232                 nvme_qpair_manual_complete_request(req->qpair, req,
233                     NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
234         }
235         mtx_unlock(&ctrlr->lock);
236 }
237
238 static int
239 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
240 {
241         int ms_waited;
242         union cc_register cc;
243         union csts_register csts;
244
245         cc.raw = nvme_mmio_read_4(ctrlr, cc);
246         csts.raw = nvme_mmio_read_4(ctrlr, csts);
247
248         if (cc.bits.en != desired_val) {
249                 nvme_printf(ctrlr, "%s called with desired_val = %d "
250                     "but cc.en = %d\n", __func__, desired_val, cc.bits.en);
251                 return (ENXIO);
252         }
253
254         ms_waited = 0;
255
256         while (csts.bits.rdy != desired_val) {
257                 DELAY(1000);
258                 if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
259                         nvme_printf(ctrlr, "controller ready did not become %d "
260                             "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
261                         return (ENXIO);
262                 }
263                 csts.raw = nvme_mmio_read_4(ctrlr, csts);
264         }
265
266         return (0);
267 }
268
269 static void
270 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
271 {
272         union cc_register cc;
273         union csts_register csts;
274
275         cc.raw = nvme_mmio_read_4(ctrlr, cc);
276         csts.raw = nvme_mmio_read_4(ctrlr, csts);
277
278         if (cc.bits.en == 1 && csts.bits.rdy == 0)
279                 nvme_ctrlr_wait_for_ready(ctrlr, 1);
280
281         cc.bits.en = 0;
282         nvme_mmio_write_4(ctrlr, cc, cc.raw);
283         DELAY(5000);
284         nvme_ctrlr_wait_for_ready(ctrlr, 0);
285 }
286
287 static int
288 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
289 {
290         union cc_register       cc;
291         union csts_register     csts;
292         union aqa_register      aqa;
293
294         cc.raw = nvme_mmio_read_4(ctrlr, cc);
295         csts.raw = nvme_mmio_read_4(ctrlr, csts);
296
297         if (cc.bits.en == 1) {
298                 if (csts.bits.rdy == 1)
299                         return (0);
300                 else
301                         return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
302         }
303
304         nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
305         DELAY(5000);
306         nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
307         DELAY(5000);
308
309         aqa.raw = 0;
310         /* acqs and asqs are 0-based. */
311         aqa.bits.acqs = ctrlr->adminq.num_entries-1;
312         aqa.bits.asqs = ctrlr->adminq.num_entries-1;
313         nvme_mmio_write_4(ctrlr, aqa, aqa.raw);
314         DELAY(5000);
315
316         cc.bits.en = 1;
317         cc.bits.css = 0;
318         cc.bits.ams = 0;
319         cc.bits.shn = 0;
320         cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
321         cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
322
323         /* This evaluates to 0, which is according to spec. */
324         cc.bits.mps = (PAGE_SIZE >> 13);
325
326         nvme_mmio_write_4(ctrlr, cc, cc.raw);
327         DELAY(5000);
328
329         return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
330 }
331
332 int
333 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
334 {
335         int i;
336
337         nvme_admin_qpair_disable(&ctrlr->adminq);
338         /*
339          * I/O queues are not allocated before the initial HW
340          *  reset, so do not try to disable them.  Use is_initialized
341          *  to determine if this is the initial HW reset.
342          */
343         if (ctrlr->is_initialized) {
344                 for (i = 0; i < ctrlr->num_io_queues; i++)
345                         nvme_io_qpair_disable(&ctrlr->ioq[i]);
346         }
347
348         DELAY(100*1000);
349
350         nvme_ctrlr_disable(ctrlr);
351         return (nvme_ctrlr_enable(ctrlr));
352 }
353
354 void
355 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
356 {
357         int cmpset;
358
359         cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
360
361         if (cmpset == 0 || ctrlr->is_failed)
362                 /*
363                  * Controller is already resetting or has failed.  Return
364                  *  immediately since there is no need to kick off another
365                  *  reset in these cases.
366                  */
367                 return;
368
369         taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
370 }
371
372 static int
373 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
374 {
375         struct nvme_completion_poll_status      status;
376
377         status.done = FALSE;
378         nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
379             nvme_completion_poll_cb, &status);
380         while (status.done == FALSE)
381                 pause("nvme", 1);
382         if (nvme_completion_is_error(&status.cpl)) {
383                 nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
384                 return (ENXIO);
385         }
386
387         /*
388          * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
389          *  controller supports.
390          */
391         if (ctrlr->cdata.mdts > 0)
392                 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
393                     ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
394
395         return (0);
396 }
397
398 static int
399 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
400 {
401         struct nvme_completion_poll_status      status;
402         int                                     cq_allocated, sq_allocated;
403
404         status.done = FALSE;
405         nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
406             nvme_completion_poll_cb, &status);
407         while (status.done == FALSE)
408                 pause("nvme", 1);
409         if (nvme_completion_is_error(&status.cpl)) {
410                 nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n");
411                 return (ENXIO);
412         }
413
414         /*
415          * Data in cdw0 is 0-based.
416          * Lower 16-bits indicate number of submission queues allocated.
417          * Upper 16-bits indicate number of completion queues allocated.
418          */
419         sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
420         cq_allocated = (status.cpl.cdw0 >> 16) + 1;
421
422         /*
423          * Controller may allocate more queues than we requested,
424          *  so use the minimum of the number requested and what was
425          *  actually allocated.
426          */
427         ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
428         ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
429
430         return (0);
431 }
432
433 static int
434 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
435 {
436         struct nvme_completion_poll_status      status;
437         struct nvme_qpair                       *qpair;
438         int                                     i;
439
440         for (i = 0; i < ctrlr->num_io_queues; i++) {
441                 qpair = &ctrlr->ioq[i];
442
443                 status.done = FALSE;
444                 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector,
445                     nvme_completion_poll_cb, &status);
446                 while (status.done == FALSE)
447                         pause("nvme", 1);
448                 if (nvme_completion_is_error(&status.cpl)) {
449                         nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
450                         return (ENXIO);
451                 }
452
453                 status.done = FALSE;
454                 nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
455                     nvme_completion_poll_cb, &status);
456                 while (status.done == FALSE)
457                         pause("nvme", 1);
458                 if (nvme_completion_is_error(&status.cpl)) {
459                         nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
460                         return (ENXIO);
461                 }
462         }
463
464         return (0);
465 }
466
467 static int
468 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
469 {
470         struct nvme_namespace   *ns;
471         uint32_t                i;
472
473         for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) {
474                 ns = &ctrlr->ns[i];
475                 nvme_ns_construct(ns, i+1, ctrlr);
476         }
477
478         return (0);
479 }
480
481 static boolean_t
482 is_log_page_id_valid(uint8_t page_id)
483 {
484
485         switch (page_id) {
486         case NVME_LOG_ERROR:
487         case NVME_LOG_HEALTH_INFORMATION:
488         case NVME_LOG_FIRMWARE_SLOT:
489                 return (TRUE);
490         }
491
492         return (FALSE);
493 }
494
495 static uint32_t
496 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
497 {
498         uint32_t        log_page_size;
499
500         switch (page_id) {
501         case NVME_LOG_ERROR:
502                 log_page_size = min(
503                     sizeof(struct nvme_error_information_entry) *
504                     ctrlr->cdata.elpe,
505                     NVME_MAX_AER_LOG_SIZE);
506                 break;
507         case NVME_LOG_HEALTH_INFORMATION:
508                 log_page_size = sizeof(struct nvme_health_information_page);
509                 break;
510         case NVME_LOG_FIRMWARE_SLOT:
511                 log_page_size = sizeof(struct nvme_firmware_page);
512                 break;
513         default:
514                 log_page_size = 0;
515                 break;
516         }
517
518         return (log_page_size);
519 }
520
521 static void
522 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
523     union nvme_critical_warning_state state)
524 {
525
526         if (state.bits.available_spare == 1)
527                 nvme_printf(ctrlr, "available spare space below threshold\n");
528
529         if (state.bits.temperature == 1)
530                 nvme_printf(ctrlr, "temperature above threshold\n");
531
532         if (state.bits.device_reliability == 1)
533                 nvme_printf(ctrlr, "device reliability degraded\n");
534
535         if (state.bits.read_only == 1)
536                 nvme_printf(ctrlr, "media placed in read only mode\n");
537
538         if (state.bits.volatile_memory_backup == 1)
539                 nvme_printf(ctrlr, "volatile memory backup device failed\n");
540
541         if (state.bits.reserved != 0)
542                 nvme_printf(ctrlr,
543                     "unknown critical warning(s): state = 0x%02x\n", state.raw);
544 }
545
546 static void
547 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
548 {
549         struct nvme_async_event_request         *aer = arg;
550         struct nvme_health_information_page     *health_info;
551
552         /*
553          * If the log page fetch for some reason completed with an error,
554          *  don't pass log page data to the consumers.  In practice, this case
555          *  should never happen.
556          */
557         if (nvme_completion_is_error(cpl))
558                 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
559                     aer->log_page_id, NULL, 0);
560         else {
561                 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
562                         health_info = (struct nvme_health_information_page *)
563                             aer->log_page_buffer;
564                         nvme_ctrlr_log_critical_warnings(aer->ctrlr,
565                             health_info->critical_warning);
566                         /*
567                          * Critical warnings reported through the
568                          *  SMART/health log page are persistent, so
569                          *  clear the associated bits in the async event
570                          *  config so that we do not receive repeated
571                          *  notifications for the same event.
572                          */
573                         aer->ctrlr->async_event_config.raw &=
574                             ~health_info->critical_warning.raw;
575                         nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
576                             aer->ctrlr->async_event_config, NULL, NULL);
577                 }
578
579
580                 /*
581                  * Pass the cpl data from the original async event completion,
582                  *  not the log page fetch.
583                  */
584                 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
585                     aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
586         }
587
588         /*
589          * Repost another asynchronous event request to replace the one
590          *  that just completed.
591          */
592         nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
593 }
594
595 static void
596 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
597 {
598         struct nvme_async_event_request *aer = arg;
599
600         if (nvme_completion_is_error(cpl)) {
601                 /*
602                  *  Do not retry failed async event requests.  This avoids
603                  *  infinite loops where a new async event request is submitted
604                  *  to replace the one just failed, only to fail again and
605                  *  perpetuate the loop.
606                  */
607                 return;
608         }
609
610         /* Associated log page is in bits 23:16 of completion entry dw0. */
611         aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
612
613         nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n",
614             aer->log_page_id);
615
616         if (is_log_page_id_valid(aer->log_page_id)) {
617                 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
618                     aer->log_page_id);
619                 memcpy(&aer->cpl, cpl, sizeof(*cpl));
620                 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
621                     NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
622                     aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
623                     aer);
624                 /* Wait to notify consumers until after log page is fetched. */
625         } else {
626                 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
627                     NULL, 0);
628
629                 /*
630                  * Repost another asynchronous event request to replace the one
631                  *  that just completed.
632                  */
633                 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
634         }
635 }
636
637 static void
638 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
639     struct nvme_async_event_request *aer)
640 {
641         struct nvme_request *req;
642
643         aer->ctrlr = ctrlr;
644         req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
645         aer->req = req;
646
647         /*
648          * Disable timeout here, since asynchronous event requests should by
649          *  nature never be timed out.
650          */
651         req->timeout = FALSE;
652         req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
653         nvme_ctrlr_submit_admin_request(ctrlr, req);
654 }
655
656 static void
657 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
658 {
659         struct nvme_completion_poll_status      status;
660         struct nvme_async_event_request         *aer;
661         uint32_t                                i;
662
663         ctrlr->async_event_config.raw = 0xFF;
664         ctrlr->async_event_config.bits.reserved = 0;
665
666         status.done = FALSE;
667         nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
668             0, NULL, 0, nvme_completion_poll_cb, &status);
669         while (status.done == FALSE)
670                 pause("nvme", 1);
671         if (nvme_completion_is_error(&status.cpl) ||
672             (status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
673             (status.cpl.cdw0 & 0xFFFF) == 0x0000) {
674                 nvme_printf(ctrlr, "temperature threshold not supported\n");
675                 ctrlr->async_event_config.bits.temperature = 0;
676         }
677
678         nvme_ctrlr_cmd_set_async_event_config(ctrlr,
679             ctrlr->async_event_config, NULL, NULL);
680
681         /* aerl is a zero-based value, so we need to add 1 here. */
682         ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
683
684         for (i = 0; i < ctrlr->num_aers; i++) {
685                 aer = &ctrlr->aer[i];
686                 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
687         }
688 }
689
690 static void
691 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
692 {
693
694         ctrlr->int_coal_time = 0;
695         TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
696             &ctrlr->int_coal_time);
697
698         ctrlr->int_coal_threshold = 0;
699         TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
700             &ctrlr->int_coal_threshold);
701
702         nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
703             ctrlr->int_coal_threshold, NULL, NULL);
704 }
705
706 static void
707 nvme_ctrlr_start(void *ctrlr_arg)
708 {
709         struct nvme_controller *ctrlr = ctrlr_arg;
710         uint32_t old_num_io_queues;
711         int i;
712
713         /*
714          * Only reset adminq here when we are restarting the
715          *  controller after a reset.  During initialization,
716          *  we have already submitted admin commands to get
717          *  the number of I/O queues supported, so cannot reset
718          *  the adminq again here.
719          */
720         if (ctrlr->is_resetting) {
721                 nvme_qpair_reset(&ctrlr->adminq);
722         }
723
724         for (i = 0; i < ctrlr->num_io_queues; i++)
725                 nvme_qpair_reset(&ctrlr->ioq[i]);
726
727         nvme_admin_qpair_enable(&ctrlr->adminq);
728
729         if (nvme_ctrlr_identify(ctrlr) != 0) {
730                 nvme_ctrlr_fail(ctrlr);
731                 return;
732         }
733
734         /*
735          * The number of qpairs are determined during controller initialization,
736          *  including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the
737          *  HW limit.  We call SET_FEATURES again here so that it gets called
738          *  after any reset for controllers that depend on the driver to
739          *  explicit specify how many queues it will use.  This value should
740          *  never change between resets, so panic if somehow that does happen.
741          */
742         if (ctrlr->is_resetting) {
743                 old_num_io_queues = ctrlr->num_io_queues;
744                 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
745                         nvme_ctrlr_fail(ctrlr);
746                         return;
747                 }
748
749                 if (old_num_io_queues != ctrlr->num_io_queues) {
750                         panic("num_io_queues changed from %u to %u",
751                               old_num_io_queues, ctrlr->num_io_queues);
752                 }
753         }
754
755         if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
756                 nvme_ctrlr_fail(ctrlr);
757                 return;
758         }
759
760         if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
761                 nvme_ctrlr_fail(ctrlr);
762                 return;
763         }
764
765         nvme_ctrlr_configure_aer(ctrlr);
766         nvme_ctrlr_configure_int_coalescing(ctrlr);
767
768         for (i = 0; i < ctrlr->num_io_queues; i++)
769                 nvme_io_qpair_enable(&ctrlr->ioq[i]);
770 }
771
772 void
773 nvme_ctrlr_start_config_hook(void *arg)
774 {
775         struct nvme_controller *ctrlr = arg;
776
777         nvme_qpair_reset(&ctrlr->adminq);
778         nvme_admin_qpair_enable(&ctrlr->adminq);
779
780         if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
781             nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
782                 nvme_ctrlr_start(ctrlr);
783         else
784                 nvme_ctrlr_fail(ctrlr);
785
786         nvme_sysctl_initialize_ctrlr(ctrlr);
787         config_intrhook_disestablish(&ctrlr->config_hook);
788
789         ctrlr->is_initialized = 1;
790         nvme_notify_new_controller(ctrlr);
791 }
792
793 static void
794 nvme_ctrlr_reset_task(void *arg, int pending)
795 {
796         struct nvme_controller  *ctrlr = arg;
797         int                     status;
798
799         nvme_printf(ctrlr, "resetting controller\n");
800         status = nvme_ctrlr_hw_reset(ctrlr);
801         /*
802          * Use pause instead of DELAY, so that we yield to any nvme interrupt
803          *  handlers on this CPU that were blocked on a qpair lock. We want
804          *  all nvme interrupts completed before proceeding with restarting the
805          *  controller.
806          *
807          * XXX - any way to guarantee the interrupt handlers have quiesced?
808          */
809         pause("nvmereset", hz / 10);
810         if (status == 0)
811                 nvme_ctrlr_start(ctrlr);
812         else
813                 nvme_ctrlr_fail(ctrlr);
814
815         atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
816 }
817
818 void
819 nvme_ctrlr_intx_handler(void *arg)
820 {
821         struct nvme_controller *ctrlr = arg;
822
823         nvme_mmio_write_4(ctrlr, intms, 1);
824
825         nvme_qpair_process_completions(&ctrlr->adminq);
826
827         if (ctrlr->ioq && ctrlr->ioq[0].cpl)
828                 nvme_qpair_process_completions(&ctrlr->ioq[0]);
829
830         nvme_mmio_write_4(ctrlr, intmc, 1);
831 }
832
833 static int
834 nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr)
835 {
836
837         ctrlr->msix_enabled = 0;
838         ctrlr->num_io_queues = 1;
839         ctrlr->num_cpus_per_ioq = mp_ncpus;
840         ctrlr->rid = 0;
841         ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
842             &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
843
844         if (ctrlr->res == NULL) {
845                 nvme_printf(ctrlr, "unable to allocate shared IRQ\n");
846                 return (ENOMEM);
847         }
848
849         bus_setup_intr(ctrlr->dev, ctrlr->res,
850             INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler,
851             ctrlr, &ctrlr->tag);
852
853         if (ctrlr->tag == NULL) {
854                 nvme_printf(ctrlr, "unable to setup intx handler\n");
855                 return (ENOMEM);
856         }
857
858         return (0);
859 }
860
861 static void
862 nvme_pt_done(void *arg, const struct nvme_completion *cpl)
863 {
864         struct nvme_pt_command *pt = arg;
865
866         bzero(&pt->cpl, sizeof(pt->cpl));
867         pt->cpl.cdw0 = cpl->cdw0;
868         pt->cpl.status = cpl->status;
869         pt->cpl.status.p = 0;
870
871         mtx_lock(pt->driver_lock);
872         wakeup(pt);
873         mtx_unlock(pt->driver_lock);
874 }
875
876 int
877 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
878     struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
879     int is_admin_cmd)
880 {
881         struct nvme_request     *req;
882         struct mtx              *mtx;
883         struct buf              *buf = NULL;
884         int                     ret = 0;
885         vm_offset_t             addr, end;
886
887         if (pt->len > 0) {
888                 /*
889                  * vmapbuf calls vm_fault_quick_hold_pages which only maps full
890                  * pages. Ensure this request has fewer than MAXPHYS bytes when
891                  * extended to full pages.
892                  */
893                 addr = (vm_offset_t)pt->buf;
894                 end = round_page(addr + pt->len);
895                 addr = trunc_page(addr);
896                 if (end - addr > MAXPHYS)
897                         return EIO;
898
899                 if (pt->len > ctrlr->max_xfer_size) {
900                         nvme_printf(ctrlr, "pt->len (%d) "
901                             "exceeds max_xfer_size (%d)\n", pt->len,
902                             ctrlr->max_xfer_size);
903                         return EIO;
904                 }
905                 if (is_user_buffer) {
906                         /*
907                          * Ensure the user buffer is wired for the duration of
908                          *  this passthrough command.
909                          */
910                         PHOLD(curproc);
911                         buf = getpbuf(NULL);
912                         buf->b_data = pt->buf;
913                         buf->b_bufsize = pt->len;
914                         buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
915 #ifdef NVME_UNMAPPED_BIO_SUPPORT
916                         if (vmapbuf(buf, 1) < 0) {
917 #else
918                         if (vmapbuf(buf) < 0) {
919 #endif
920                                 ret = EFAULT;
921                                 goto err;
922                         }
923                         req = nvme_allocate_request_vaddr(buf->b_data, pt->len, 
924                             nvme_pt_done, pt);
925                 } else
926                         req = nvme_allocate_request_vaddr(pt->buf, pt->len,
927                             nvme_pt_done, pt);
928         } else
929                 req = nvme_allocate_request_null(nvme_pt_done, pt);
930
931         req->cmd.opc    = pt->cmd.opc;
932         req->cmd.cdw10  = pt->cmd.cdw10;
933         req->cmd.cdw11  = pt->cmd.cdw11;
934         req->cmd.cdw12  = pt->cmd.cdw12;
935         req->cmd.cdw13  = pt->cmd.cdw13;
936         req->cmd.cdw14  = pt->cmd.cdw14;
937         req->cmd.cdw15  = pt->cmd.cdw15;
938
939         req->cmd.nsid = nsid;
940
941         if (is_admin_cmd)
942                 mtx = &ctrlr->lock;
943         else
944                 mtx = &ctrlr->ns[nsid-1].lock;
945
946         mtx_lock(mtx);
947         pt->driver_lock = mtx;
948
949         if (is_admin_cmd)
950                 nvme_ctrlr_submit_admin_request(ctrlr, req);
951         else
952                 nvme_ctrlr_submit_io_request(ctrlr, req);
953
954         mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
955         mtx_unlock(mtx);
956
957         pt->driver_lock = NULL;
958
959 err:
960         if (buf != NULL) {
961                 relpbuf(buf, NULL);
962                 PRELE(curproc);
963         }
964
965         return (ret);
966 }
967
968 static int
969 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
970     struct thread *td)
971 {
972         struct nvme_controller                  *ctrlr;
973         struct nvme_pt_command                  *pt;
974
975         ctrlr = cdev->si_drv1;
976
977         switch (cmd) {
978         case NVME_RESET_CONTROLLER:
979                 nvme_ctrlr_reset(ctrlr);
980                 break;
981         case NVME_PASSTHROUGH_CMD:
982                 pt = (struct nvme_pt_command *)arg;
983                 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, pt->cmd.nsid,
984                     1 /* is_user_buffer */, 1 /* is_admin_cmd */));
985         default:
986                 return (ENOTTY);
987         }
988
989         return (0);
990 }
991
992 static struct cdevsw nvme_ctrlr_cdevsw = {
993         .d_version =    D_VERSION,
994         .d_flags =      0,
995         .d_ioctl =      nvme_ctrlr_ioctl
996 };
997
998 static void
999 nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr)
1000 {
1001         device_t        dev;
1002         int             per_cpu_io_queues;
1003         int             min_cpus_per_ioq;
1004         int             num_vectors_requested, num_vectors_allocated;
1005         int             num_vectors_available;
1006
1007         dev = ctrlr->dev;
1008         min_cpus_per_ioq = 1;
1009         TUNABLE_INT_FETCH("hw.nvme.min_cpus_per_ioq", &min_cpus_per_ioq);
1010
1011         if (min_cpus_per_ioq < 1) {
1012                 min_cpus_per_ioq = 1;
1013         } else if (min_cpus_per_ioq > mp_ncpus) {
1014                 min_cpus_per_ioq = mp_ncpus;
1015         }
1016
1017         per_cpu_io_queues = 1;
1018         TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
1019
1020         if (per_cpu_io_queues == 0) {
1021                 min_cpus_per_ioq = mp_ncpus;
1022         }
1023
1024         ctrlr->force_intx = 0;
1025         TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx);
1026
1027         /*
1028          * FreeBSD currently cannot allocate more than about 190 vectors at
1029          *  boot, meaning that systems with high core count and many devices
1030          *  requesting per-CPU interrupt vectors will not get their full
1031          *  allotment.  So first, try to allocate as many as we may need to
1032          *  understand what is available, then immediately release them.
1033          *  Then figure out how many of those we will actually use, based on
1034          *  assigning an equal number of cores to each I/O queue.
1035          */
1036
1037         /* One vector for per core I/O queue, plus one vector for admin queue. */
1038         num_vectors_available = min(pci_msix_count(dev), mp_ncpus + 1);
1039         if (pci_alloc_msix(dev, &num_vectors_available) != 0) {
1040                 num_vectors_available = 0;
1041         }
1042         pci_release_msi(dev);
1043
1044         if (ctrlr->force_intx || num_vectors_available < 2) {
1045                 nvme_ctrlr_configure_intx(ctrlr);
1046                 return;
1047         }
1048
1049         /*
1050          * Do not use all vectors for I/O queues - one must be saved for the
1051          *  admin queue.
1052          */
1053         ctrlr->num_cpus_per_ioq = max(min_cpus_per_ioq,
1054             howmany(mp_ncpus, num_vectors_available - 1));
1055
1056         ctrlr->num_io_queues = howmany(mp_ncpus, ctrlr->num_cpus_per_ioq);
1057         num_vectors_requested = ctrlr->num_io_queues + 1;
1058         num_vectors_allocated = num_vectors_requested;
1059
1060         /*
1061          * Now just allocate the number of vectors we need.  This should
1062          *  succeed, since we previously called pci_alloc_msix()
1063          *  successfully returning at least this many vectors, but just to
1064          *  be safe, if something goes wrong just revert to INTx.
1065          */
1066         if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) {
1067                 nvme_ctrlr_configure_intx(ctrlr);
1068                 return;
1069         }
1070
1071         if (num_vectors_allocated < num_vectors_requested) {
1072                 pci_release_msi(dev);
1073                 nvme_ctrlr_configure_intx(ctrlr);
1074                 return;
1075         }
1076
1077         ctrlr->msix_enabled = 1;
1078 }
1079
1080 int
1081 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1082 {
1083         union cap_lo_register   cap_lo;
1084         union cap_hi_register   cap_hi;
1085         int                     status, timeout_period;
1086
1087         ctrlr->dev = dev;
1088
1089         mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1090
1091         status = nvme_ctrlr_allocate_bar(ctrlr);
1092
1093         if (status != 0)
1094                 return (status);
1095
1096         /*
1097          * Software emulators may set the doorbell stride to something
1098          *  other than zero, but this driver is not set up to handle that.
1099          */
1100         cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi);
1101         if (cap_hi.bits.dstrd != 0)
1102                 return (ENXIO);
1103
1104         ctrlr->min_page_size = 1 << (12 + cap_hi.bits.mpsmin);
1105
1106         /* Get ready timeout value from controller, in units of 500ms. */
1107         cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
1108         ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500;
1109
1110         timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
1111         TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
1112         timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1113         timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1114         ctrlr->timeout_period = timeout_period;
1115
1116         nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
1117         TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
1118
1119         ctrlr->enable_aborts = 0;
1120         TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1121
1122         nvme_ctrlr_setup_interrupts(ctrlr);
1123
1124         ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
1125         if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0)
1126                 return (ENXIO);
1127
1128         ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, device_get_unit(dev),
1129             UID_ROOT, GID_WHEEL, 0600, "nvme%d", device_get_unit(dev));
1130
1131         if (ctrlr->cdev == NULL)
1132                 return (ENXIO);
1133
1134         ctrlr->cdev->si_drv1 = (void *)ctrlr;
1135
1136         ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1137             taskqueue_thread_enqueue, &ctrlr->taskqueue);
1138         taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq");
1139
1140         ctrlr->is_resetting = 0;
1141         ctrlr->is_initialized = 0;
1142         ctrlr->notification_sent = 0;
1143         TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1144
1145         TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
1146         STAILQ_INIT(&ctrlr->fail_req);
1147         ctrlr->is_failed = FALSE;
1148
1149         return (0);
1150 }
1151
1152 void
1153 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1154 {
1155         int                             i;
1156
1157         /*
1158          *  Notify the controller of a shutdown, even though this is due to
1159          *   a driver unload, not a system shutdown (this path is not invoked
1160          *   during shutdown).  This ensures the controller receives a
1161          *   shutdown notification in case the system is shutdown before
1162          *   reloading the driver.
1163          */
1164         nvme_ctrlr_shutdown(ctrlr);
1165
1166         nvme_ctrlr_disable(ctrlr);
1167         taskqueue_free(ctrlr->taskqueue);
1168
1169         for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1170                 nvme_ns_destruct(&ctrlr->ns[i]);
1171
1172         if (ctrlr->cdev)
1173                 destroy_dev(ctrlr->cdev);
1174
1175         for (i = 0; i < ctrlr->num_io_queues; i++) {
1176                 nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1177         }
1178
1179         free(ctrlr->ioq, M_NVME);
1180
1181         nvme_admin_qpair_destroy(&ctrlr->adminq);
1182
1183         if (ctrlr->resource != NULL) {
1184                 bus_release_resource(dev, SYS_RES_MEMORY,
1185                     ctrlr->resource_id, ctrlr->resource);
1186         }
1187
1188         if (ctrlr->bar4_resource != NULL) {
1189                 bus_release_resource(dev, SYS_RES_MEMORY,
1190                     ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1191         }
1192
1193         if (ctrlr->tag)
1194                 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1195
1196         if (ctrlr->res)
1197                 bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1198                     rman_get_rid(ctrlr->res), ctrlr->res);
1199
1200         if (ctrlr->msix_enabled)
1201                 pci_release_msi(dev);
1202 }
1203
1204 void
1205 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
1206 {
1207         union cc_register       cc;
1208         union csts_register     csts;
1209         int                     ticks = 0;
1210
1211         cc.raw = nvme_mmio_read_4(ctrlr, cc);
1212         cc.bits.shn = NVME_SHN_NORMAL;
1213         nvme_mmio_write_4(ctrlr, cc, cc.raw);
1214         csts.raw = nvme_mmio_read_4(ctrlr, csts);
1215         while ((csts.bits.shst != NVME_SHST_COMPLETE) && (ticks++ < 5*hz)) {
1216                 pause("nvme shn", 1);
1217                 csts.raw = nvme_mmio_read_4(ctrlr, csts);
1218         }
1219         if (csts.bits.shst != NVME_SHST_COMPLETE)
1220                 nvme_printf(ctrlr, "did not complete shutdown within 5 seconds "
1221                     "of notification\n");
1222 }
1223
1224 void
1225 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1226     struct nvme_request *req)
1227 {
1228
1229         nvme_qpair_submit_request(&ctrlr->adminq, req);
1230 }
1231
1232 void
1233 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1234     struct nvme_request *req)
1235 {
1236         struct nvme_qpair       *qpair;
1237
1238         qpair = &ctrlr->ioq[curcpu / ctrlr->num_cpus_per_ioq];
1239         nvme_qpair_submit_request(qpair, req);
1240 }
1241
1242 device_t
1243 nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1244 {
1245
1246         return (ctrlr->dev);
1247 }
1248
1249 const struct nvme_controller_data *
1250 nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1251 {
1252
1253         return (&ctrlr->cdata);
1254 }