]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/nvme/nvme_ctrlr.c
nvme: Explain a workaround a little better
[FreeBSD/FreeBSD.git] / sys / dev / nvme / nvme_ctrlr.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2012-2016 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include "opt_cam.h"
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/buf.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/ioccom.h>
40 #include <sys/proc.h>
41 #include <sys/smp.h>
42 #include <sys/uio.h>
43 #include <sys/sbuf.h>
44 #include <sys/endian.h>
45 #include <machine/stdarg.h>
46 #include <vm/vm.h>
47
48 #include "nvme_private.h"
49
50 #define B4_CHK_RDY_DELAY_MS     2300            /* work around controller bug */
51
52 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
53                                                 struct nvme_async_event_request *aer);
54
55 static void
56 nvme_ctrlr_barrier(struct nvme_controller *ctrlr, int flags)
57 {
58         bus_barrier(ctrlr->resource, 0, rman_get_size(ctrlr->resource), flags);
59 }
60
61 static void
62 nvme_ctrlr_devctl_log(struct nvme_controller *ctrlr, const char *type, const char *msg, ...)
63 {
64         struct sbuf sb;
65         va_list ap;
66         int error;
67
68         if (sbuf_new(&sb, NULL, 0, SBUF_AUTOEXTEND | SBUF_NOWAIT) == NULL)
69                 return;
70         sbuf_printf(&sb, "%s: ", device_get_nameunit(ctrlr->dev));
71         va_start(ap, msg);
72         sbuf_vprintf(&sb, msg, ap);
73         va_end(ap);
74         error = sbuf_finish(&sb);
75         if (error == 0)
76                 printf("%s\n", sbuf_data(&sb));
77
78         sbuf_clear(&sb);
79         sbuf_printf(&sb, "name=\"%s\" reason=\"", device_get_nameunit(ctrlr->dev));
80         va_start(ap, msg);
81         sbuf_vprintf(&sb, msg, ap);
82         va_end(ap);
83         sbuf_printf(&sb, "\"");
84         error = sbuf_finish(&sb);
85         if (error == 0)
86                 devctl_notify("nvme", "controller", type, sbuf_data(&sb));
87         sbuf_delete(&sb);
88 }
89
90 static int
91 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
92 {
93         struct nvme_qpair       *qpair;
94         uint32_t                num_entries;
95         int                     error;
96
97         qpair = &ctrlr->adminq;
98         qpair->id = 0;
99         qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1;
100         qpair->domain = ctrlr->domain;
101
102         num_entries = NVME_ADMIN_ENTRIES;
103         TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
104         /*
105          * If admin_entries was overridden to an invalid value, revert it
106          *  back to our default value.
107          */
108         if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
109             num_entries > NVME_MAX_ADMIN_ENTRIES) {
110                 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
111                     "specified\n", num_entries);
112                 num_entries = NVME_ADMIN_ENTRIES;
113         }
114
115         /*
116          * The admin queue's max xfer size is treated differently than the
117          *  max I/O xfer size.  16KB is sufficient here - maybe even less?
118          */
119         error = nvme_qpair_construct(qpair, num_entries, NVME_ADMIN_TRACKERS,
120              ctrlr);
121         return (error);
122 }
123
124 #define QP(ctrlr, c)    ((c) * (ctrlr)->num_io_queues / mp_ncpus)
125
126 static int
127 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
128 {
129         struct nvme_qpair       *qpair;
130         uint32_t                cap_lo;
131         uint16_t                mqes;
132         int                     c, error, i, n;
133         int                     num_entries, num_trackers, max_entries;
134
135         /*
136          * NVMe spec sets a hard limit of 64K max entries, but devices may
137          * specify a smaller limit, so we need to check the MQES field in the
138          * capabilities register. We have to cap the number of entries to the
139          * current stride allows for in BAR 0/1, otherwise the remainder entries
140          * are inaccessable. MQES should reflect this, and this is just a
141          * fail-safe.
142          */
143         max_entries =
144             (rman_get_size(ctrlr->resource) - nvme_mmio_offsetof(doorbell[0])) /
145             (1 << (ctrlr->dstrd + 1));
146         num_entries = NVME_IO_ENTRIES;
147         TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
148         cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
149         mqes = NVME_CAP_LO_MQES(cap_lo);
150         num_entries = min(num_entries, mqes + 1);
151         num_entries = min(num_entries, max_entries);
152
153         num_trackers = NVME_IO_TRACKERS;
154         TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
155
156         num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
157         num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
158         /*
159          * No need to have more trackers than entries in the submit queue.  Note
160          * also that for a queue size of N, we can only have (N-1) commands
161          * outstanding, hence the "-1" here.
162          */
163         num_trackers = min(num_trackers, (num_entries-1));
164
165         /*
166          * Our best estimate for the maximum number of I/Os that we should
167          * normally have in flight at one time. This should be viewed as a hint,
168          * not a hard limit and will need to be revisited when the upper layers
169          * of the storage system grows multi-queue support.
170          */
171         ctrlr->max_hw_pend_io = num_trackers * ctrlr->num_io_queues * 3 / 4;
172
173         ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
174             M_NVME, M_ZERO | M_WAITOK);
175
176         for (i = c = n = 0; i < ctrlr->num_io_queues; i++, c += n) {
177                 qpair = &ctrlr->ioq[i];
178
179                 /*
180                  * Admin queue has ID=0. IO queues start at ID=1 -
181                  *  hence the 'i+1' here.
182                  */
183                 qpair->id = i + 1;
184                 if (ctrlr->num_io_queues > 1) {
185                         /* Find number of CPUs served by this queue. */
186                         for (n = 1; QP(ctrlr, c + n) == i; n++)
187                                 ;
188                         /* Shuffle multiple NVMe devices between CPUs. */
189                         qpair->cpu = c + (device_get_unit(ctrlr->dev)+n/2) % n;
190                         qpair->domain = pcpu_find(qpair->cpu)->pc_domain;
191                 } else {
192                         qpair->cpu = CPU_FFS(&cpuset_domain[ctrlr->domain]) - 1;
193                         qpair->domain = ctrlr->domain;
194                 }
195
196                 /*
197                  * For I/O queues, use the controller-wide max_xfer_size
198                  *  calculated in nvme_attach().
199                  */
200                 error = nvme_qpair_construct(qpair, num_entries, num_trackers,
201                     ctrlr);
202                 if (error)
203                         return (error);
204
205                 /*
206                  * Do not bother binding interrupts if we only have one I/O
207                  *  interrupt thread for this controller.
208                  */
209                 if (ctrlr->num_io_queues > 1)
210                         bus_bind_intr(ctrlr->dev, qpair->res, qpair->cpu);
211         }
212
213         return (0);
214 }
215
216 static void
217 nvme_ctrlr_fail(struct nvme_controller *ctrlr)
218 {
219         int i;
220
221         ctrlr->is_failed = true;
222         nvme_admin_qpair_disable(&ctrlr->adminq);
223         nvme_qpair_fail(&ctrlr->adminq);
224         if (ctrlr->ioq != NULL) {
225                 for (i = 0; i < ctrlr->num_io_queues; i++) {
226                         nvme_io_qpair_disable(&ctrlr->ioq[i]);
227                         nvme_qpair_fail(&ctrlr->ioq[i]);
228                 }
229         }
230         nvme_notify_fail_consumers(ctrlr);
231 }
232
233 void
234 nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
235     struct nvme_request *req)
236 {
237
238         mtx_lock(&ctrlr->lock);
239         STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
240         mtx_unlock(&ctrlr->lock);
241         if (!ctrlr->is_dying)
242                 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
243 }
244
245 static void
246 nvme_ctrlr_fail_req_task(void *arg, int pending)
247 {
248         struct nvme_controller  *ctrlr = arg;
249         struct nvme_request     *req;
250
251         mtx_lock(&ctrlr->lock);
252         while ((req = STAILQ_FIRST(&ctrlr->fail_req)) != NULL) {
253                 STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
254                 mtx_unlock(&ctrlr->lock);
255                 nvme_qpair_manual_complete_request(req->qpair, req,
256                     NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST);
257                 mtx_lock(&ctrlr->lock);
258         }
259         mtx_unlock(&ctrlr->lock);
260 }
261
262 static int
263 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
264 {
265         int timeout = ticks + MSEC_2_TICKS(ctrlr->ready_timeout_in_ms);
266         uint32_t csts;
267
268         while (1) {
269                 csts = nvme_mmio_read_4(ctrlr, csts);
270                 if (csts == NVME_GONE)          /* Hot unplug. */
271                         return (ENXIO);
272                 if (((csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK)
273                     == desired_val)
274                         break;
275                 if (timeout - ticks < 0) {
276                         nvme_printf(ctrlr, "controller ready did not become %d "
277                             "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
278                         return (ENXIO);
279                 }
280                 pause("nvmerdy", 1);
281         }
282
283         return (0);
284 }
285
286 static int
287 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
288 {
289         uint32_t cc;
290         uint32_t csts;
291         uint8_t  en, rdy;
292         int err;
293
294         cc = nvme_mmio_read_4(ctrlr, cc);
295         csts = nvme_mmio_read_4(ctrlr, csts);
296
297         en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
298         rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
299
300         /*
301          * Per 3.1.5 in NVME 1.3 spec, transitioning CC.EN from 0 to 1
302          * when CSTS.RDY is 1 or transitioning CC.EN from 1 to 0 when
303          * CSTS.RDY is 0 "has undefined results" So make sure that CSTS.RDY
304          * isn't the desired value. Short circuit if we're already disabled.
305          */
306         if (en == 0) {
307                 /* Wait for RDY == 0 or timeout & fail */
308                 if (rdy == 0)
309                         return (0);
310                 return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
311         }
312         if (rdy == 0) {
313                 /* EN == 1, wait for  RDY == 1 or timeout & fail */
314                 err = nvme_ctrlr_wait_for_ready(ctrlr, 1);
315                 if (err != 0)
316                         return (err);
317         }
318
319         cc &= ~NVME_CC_REG_EN_MASK;
320         nvme_mmio_write_4(ctrlr, cc, cc);
321
322         /*
323          * A few drives have firmware bugs that freeze the drive if we access
324          * the mmio too soon after we disable.
325          */
326         if (ctrlr->quirks & QUIRK_DELAY_B4_CHK_RDY)
327                 pause("nvmeR", MSEC_2_TICKS(B4_CHK_RDY_DELAY_MS));
328         return (nvme_ctrlr_wait_for_ready(ctrlr, 0));
329 }
330
331 static int
332 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
333 {
334         uint32_t        cc;
335         uint32_t        csts;
336         uint32_t        aqa;
337         uint32_t        qsize;
338         uint8_t         en, rdy;
339         int             err;
340
341         cc = nvme_mmio_read_4(ctrlr, cc);
342         csts = nvme_mmio_read_4(ctrlr, csts);
343
344         en = (cc >> NVME_CC_REG_EN_SHIFT) & NVME_CC_REG_EN_MASK;
345         rdy = (csts >> NVME_CSTS_REG_RDY_SHIFT) & NVME_CSTS_REG_RDY_MASK;
346
347         /*
348          * See note in nvme_ctrlr_disable. Short circuit if we're already enabled.
349          */
350         if (en == 1) {
351                 if (rdy == 1)
352                         return (0);
353                 return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
354         }
355
356         /* EN == 0 already wait for RDY == 0 or timeout & fail */
357         err = nvme_ctrlr_wait_for_ready(ctrlr, 0);
358         if (err != 0)
359                 return (err);
360
361         nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
362         nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
363
364         /* acqs and asqs are 0-based. */
365         qsize = ctrlr->adminq.num_entries - 1;
366
367         aqa = 0;
368         aqa = (qsize & NVME_AQA_REG_ACQS_MASK) << NVME_AQA_REG_ACQS_SHIFT;
369         aqa |= (qsize & NVME_AQA_REG_ASQS_MASK) << NVME_AQA_REG_ASQS_SHIFT;
370         nvme_mmio_write_4(ctrlr, aqa, aqa);
371
372         /* Initialization values for CC */
373         cc = 0;
374         cc |= 1 << NVME_CC_REG_EN_SHIFT;
375         cc |= 0 << NVME_CC_REG_CSS_SHIFT;
376         cc |= 0 << NVME_CC_REG_AMS_SHIFT;
377         cc |= 0 << NVME_CC_REG_SHN_SHIFT;
378         cc |= 6 << NVME_CC_REG_IOSQES_SHIFT; /* SQ entry size == 64 == 2^6 */
379         cc |= 4 << NVME_CC_REG_IOCQES_SHIFT; /* CQ entry size == 16 == 2^4 */
380
381         /* This evaluates to 0, which is according to spec. */
382         cc |= (PAGE_SIZE >> 13) << NVME_CC_REG_MPS_SHIFT;
383
384         nvme_ctrlr_barrier(ctrlr, BUS_SPACE_BARRIER_WRITE);
385         nvme_mmio_write_4(ctrlr, cc, cc);
386
387         return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
388 }
389
390 static void
391 nvme_ctrlr_disable_qpairs(struct nvme_controller *ctrlr)
392 {
393         int i;
394
395         nvme_admin_qpair_disable(&ctrlr->adminq);
396         /*
397          * I/O queues are not allocated before the initial HW
398          *  reset, so do not try to disable them.  Use is_initialized
399          *  to determine if this is the initial HW reset.
400          */
401         if (ctrlr->is_initialized) {
402                 for (i = 0; i < ctrlr->num_io_queues; i++)
403                         nvme_io_qpair_disable(&ctrlr->ioq[i]);
404         }
405 }
406
407 static int
408 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
409 {
410         int err;
411
412         TSENTER();
413         nvme_ctrlr_disable_qpairs(ctrlr);
414
415         pause("nvmehwreset", hz / 10);
416
417         err = nvme_ctrlr_disable(ctrlr);
418         if (err != 0)
419                 return err;
420         err = nvme_ctrlr_enable(ctrlr);
421         TSEXIT();
422         return (err);
423 }
424
425 void
426 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
427 {
428         int cmpset;
429
430         cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
431
432         if (cmpset == 0 || ctrlr->is_failed)
433                 /*
434                  * Controller is already resetting or has failed.  Return
435                  *  immediately since there is no need to kick off another
436                  *  reset in these cases.
437                  */
438                 return;
439
440         if (!ctrlr->is_dying)
441                 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
442 }
443
444 static int
445 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
446 {
447         struct nvme_completion_poll_status      status;
448
449         status.done = 0;
450         nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
451             nvme_completion_poll_cb, &status);
452         nvme_completion_poll(&status);
453         if (nvme_completion_is_error(&status.cpl)) {
454                 nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
455                 return (ENXIO);
456         }
457
458         /* Convert data to host endian */
459         nvme_controller_data_swapbytes(&ctrlr->cdata);
460
461         /*
462          * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
463          *  controller supports.
464          */
465         if (ctrlr->cdata.mdts > 0)
466                 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
467                     ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
468
469         return (0);
470 }
471
472 static int
473 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
474 {
475         struct nvme_completion_poll_status      status;
476         int                                     cq_allocated, sq_allocated;
477
478         status.done = 0;
479         nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
480             nvme_completion_poll_cb, &status);
481         nvme_completion_poll(&status);
482         if (nvme_completion_is_error(&status.cpl)) {
483                 nvme_printf(ctrlr, "nvme_ctrlr_set_num_qpairs failed!\n");
484                 return (ENXIO);
485         }
486
487         /*
488          * Data in cdw0 is 0-based.
489          * Lower 16-bits indicate number of submission queues allocated.
490          * Upper 16-bits indicate number of completion queues allocated.
491          */
492         sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
493         cq_allocated = (status.cpl.cdw0 >> 16) + 1;
494
495         /*
496          * Controller may allocate more queues than we requested,
497          *  so use the minimum of the number requested and what was
498          *  actually allocated.
499          */
500         ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
501         ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
502         if (ctrlr->num_io_queues > vm_ndomains)
503                 ctrlr->num_io_queues -= ctrlr->num_io_queues % vm_ndomains;
504
505         return (0);
506 }
507
508 static int
509 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
510 {
511         struct nvme_completion_poll_status      status;
512         struct nvme_qpair                       *qpair;
513         int                                     i;
514
515         for (i = 0; i < ctrlr->num_io_queues; i++) {
516                 qpair = &ctrlr->ioq[i];
517
518                 status.done = 0;
519                 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair,
520                     nvme_completion_poll_cb, &status);
521                 nvme_completion_poll(&status);
522                 if (nvme_completion_is_error(&status.cpl)) {
523                         nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
524                         return (ENXIO);
525                 }
526
527                 status.done = 0;
528                 nvme_ctrlr_cmd_create_io_sq(ctrlr, qpair,
529                     nvme_completion_poll_cb, &status);
530                 nvme_completion_poll(&status);
531                 if (nvme_completion_is_error(&status.cpl)) {
532                         nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
533                         return (ENXIO);
534                 }
535         }
536
537         return (0);
538 }
539
540 static int
541 nvme_ctrlr_delete_qpairs(struct nvme_controller *ctrlr)
542 {
543         struct nvme_completion_poll_status      status;
544         struct nvme_qpair                       *qpair;
545
546         for (int i = 0; i < ctrlr->num_io_queues; i++) {
547                 qpair = &ctrlr->ioq[i];
548
549                 status.done = 0;
550                 nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair,
551                     nvme_completion_poll_cb, &status);
552                 nvme_completion_poll(&status);
553                 if (nvme_completion_is_error(&status.cpl)) {
554                         nvme_printf(ctrlr, "nvme_destroy_io_sq failed!\n");
555                         return (ENXIO);
556                 }
557
558                 status.done = 0;
559                 nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair,
560                     nvme_completion_poll_cb, &status);
561                 nvme_completion_poll(&status);
562                 if (nvme_completion_is_error(&status.cpl)) {
563                         nvme_printf(ctrlr, "nvme_destroy_io_cq failed!\n");
564                         return (ENXIO);
565                 }
566         }
567
568         return (0);
569 }
570
571 static int
572 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
573 {
574         struct nvme_namespace   *ns;
575         uint32_t                i;
576
577         for (i = 0; i < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); i++) {
578                 ns = &ctrlr->ns[i];
579                 nvme_ns_construct(ns, i+1, ctrlr);
580         }
581
582         return (0);
583 }
584
585 static bool
586 is_log_page_id_valid(uint8_t page_id)
587 {
588
589         switch (page_id) {
590         case NVME_LOG_ERROR:
591         case NVME_LOG_HEALTH_INFORMATION:
592         case NVME_LOG_FIRMWARE_SLOT:
593         case NVME_LOG_CHANGED_NAMESPACE:
594         case NVME_LOG_COMMAND_EFFECT:
595         case NVME_LOG_RES_NOTIFICATION:
596         case NVME_LOG_SANITIZE_STATUS:
597                 return (true);
598         }
599
600         return (false);
601 }
602
603 static uint32_t
604 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
605 {
606         uint32_t        log_page_size;
607
608         switch (page_id) {
609         case NVME_LOG_ERROR:
610                 log_page_size = min(
611                     sizeof(struct nvme_error_information_entry) *
612                     (ctrlr->cdata.elpe + 1), NVME_MAX_AER_LOG_SIZE);
613                 break;
614         case NVME_LOG_HEALTH_INFORMATION:
615                 log_page_size = sizeof(struct nvme_health_information_page);
616                 break;
617         case NVME_LOG_FIRMWARE_SLOT:
618                 log_page_size = sizeof(struct nvme_firmware_page);
619                 break;
620         case NVME_LOG_CHANGED_NAMESPACE:
621                 log_page_size = sizeof(struct nvme_ns_list);
622                 break;
623         case NVME_LOG_COMMAND_EFFECT:
624                 log_page_size = sizeof(struct nvme_command_effects_page);
625                 break;
626         case NVME_LOG_RES_NOTIFICATION:
627                 log_page_size = sizeof(struct nvme_res_notification_page);
628                 break;
629         case NVME_LOG_SANITIZE_STATUS:
630                 log_page_size = sizeof(struct nvme_sanitize_status_page);
631                 break;
632         default:
633                 log_page_size = 0;
634                 break;
635         }
636
637         return (log_page_size);
638 }
639
640 static void
641 nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
642     uint8_t state)
643 {
644
645         if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE)
646                 nvme_ctrlr_devctl_log(ctrlr, "critical",
647                     "available spare space below threshold");
648
649         if (state & NVME_CRIT_WARN_ST_TEMPERATURE)
650                 nvme_ctrlr_devctl_log(ctrlr, "critical",
651                     "temperature above threshold");
652
653         if (state & NVME_CRIT_WARN_ST_DEVICE_RELIABILITY)
654                 nvme_ctrlr_devctl_log(ctrlr, "critical",
655                     "device reliability degraded");
656
657         if (state & NVME_CRIT_WARN_ST_READ_ONLY)
658                 nvme_ctrlr_devctl_log(ctrlr, "critical",
659                     "media placed in read only mode");
660
661         if (state & NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP)
662                 nvme_ctrlr_devctl_log(ctrlr, "critical",
663                     "volatile memory backup device failed");
664
665         if (state & NVME_CRIT_WARN_ST_RESERVED_MASK)
666                 nvme_ctrlr_devctl_log(ctrlr, "critical",
667                     "unknown critical warning(s): state = 0x%02x", state);
668 }
669
670 static void
671 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
672 {
673         struct nvme_async_event_request         *aer = arg;
674         struct nvme_health_information_page     *health_info;
675         struct nvme_ns_list                     *nsl;
676         struct nvme_error_information_entry     *err;
677         int i;
678
679         /*
680          * If the log page fetch for some reason completed with an error,
681          *  don't pass log page data to the consumers.  In practice, this case
682          *  should never happen.
683          */
684         if (nvme_completion_is_error(cpl))
685                 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
686                     aer->log_page_id, NULL, 0);
687         else {
688                 /* Convert data to host endian */
689                 switch (aer->log_page_id) {
690                 case NVME_LOG_ERROR:
691                         err = (struct nvme_error_information_entry *)aer->log_page_buffer;
692                         for (i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++)
693                                 nvme_error_information_entry_swapbytes(err++);
694                         break;
695                 case NVME_LOG_HEALTH_INFORMATION:
696                         nvme_health_information_page_swapbytes(
697                             (struct nvme_health_information_page *)aer->log_page_buffer);
698                         break;
699                 case NVME_LOG_FIRMWARE_SLOT:
700                         nvme_firmware_page_swapbytes(
701                             (struct nvme_firmware_page *)aer->log_page_buffer);
702                         break;
703                 case NVME_LOG_CHANGED_NAMESPACE:
704                         nvme_ns_list_swapbytes(
705                             (struct nvme_ns_list *)aer->log_page_buffer);
706                         break;
707                 case NVME_LOG_COMMAND_EFFECT:
708                         nvme_command_effects_page_swapbytes(
709                             (struct nvme_command_effects_page *)aer->log_page_buffer);
710                         break;
711                 case NVME_LOG_RES_NOTIFICATION:
712                         nvme_res_notification_page_swapbytes(
713                             (struct nvme_res_notification_page *)aer->log_page_buffer);
714                         break;
715                 case NVME_LOG_SANITIZE_STATUS:
716                         nvme_sanitize_status_page_swapbytes(
717                             (struct nvme_sanitize_status_page *)aer->log_page_buffer);
718                         break;
719                 case INTEL_LOG_TEMP_STATS:
720                         intel_log_temp_stats_swapbytes(
721                             (struct intel_log_temp_stats *)aer->log_page_buffer);
722                         break;
723                 default:
724                         break;
725                 }
726
727                 if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
728                         health_info = (struct nvme_health_information_page *)
729                             aer->log_page_buffer;
730                         nvme_ctrlr_log_critical_warnings(aer->ctrlr,
731                             health_info->critical_warning);
732                         /*
733                          * Critical warnings reported through the
734                          *  SMART/health log page are persistent, so
735                          *  clear the associated bits in the async event
736                          *  config so that we do not receive repeated
737                          *  notifications for the same event.
738                          */
739                         aer->ctrlr->async_event_config &=
740                             ~health_info->critical_warning;
741                         nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
742                             aer->ctrlr->async_event_config, NULL, NULL);
743                 } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE &&
744                     !nvme_use_nvd) {
745                         nsl = (struct nvme_ns_list *)aer->log_page_buffer;
746                         for (i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) {
747                                 if (nsl->ns[i] > NVME_MAX_NAMESPACES)
748                                         break;
749                                 nvme_notify_ns(aer->ctrlr, nsl->ns[i]);
750                         }
751                 }
752
753                 /*
754                  * Pass the cpl data from the original async event completion,
755                  *  not the log page fetch.
756                  */
757                 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
758                     aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
759         }
760
761         /*
762          * Repost another asynchronous event request to replace the one
763          *  that just completed.
764          */
765         nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
766 }
767
768 static void
769 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
770 {
771         struct nvme_async_event_request *aer = arg;
772
773         if (nvme_completion_is_error(cpl)) {
774                 /*
775                  *  Do not retry failed async event requests.  This avoids
776                  *  infinite loops where a new async event request is submitted
777                  *  to replace the one just failed, only to fail again and
778                  *  perpetuate the loop.
779                  */
780                 return;
781         }
782
783         /* Associated log page is in bits 23:16 of completion entry dw0. */
784         aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
785
786         nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x,"
787             " page 0x%02x)\n", (cpl->cdw0 & 0x07), (cpl->cdw0 & 0xFF00) >> 8,
788             aer->log_page_id);
789
790         if (is_log_page_id_valid(aer->log_page_id)) {
791                 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
792                     aer->log_page_id);
793                 memcpy(&aer->cpl, cpl, sizeof(*cpl));
794                 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
795                     NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
796                     aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
797                     aer);
798                 /* Wait to notify consumers until after log page is fetched. */
799         } else {
800                 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
801                     NULL, 0);
802
803                 /*
804                  * Repost another asynchronous event request to replace the one
805                  *  that just completed.
806                  */
807                 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
808         }
809 }
810
811 static void
812 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
813     struct nvme_async_event_request *aer)
814 {
815         struct nvme_request *req;
816
817         aer->ctrlr = ctrlr;
818         req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
819         aer->req = req;
820
821         /*
822          * Disable timeout here, since asynchronous event requests should by
823          *  nature never be timed out.
824          */
825         req->timeout = false;
826         req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
827         nvme_ctrlr_submit_admin_request(ctrlr, req);
828 }
829
830 static void
831 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
832 {
833         struct nvme_completion_poll_status      status;
834         struct nvme_async_event_request         *aer;
835         uint32_t                                i;
836
837         ctrlr->async_event_config = NVME_CRIT_WARN_ST_AVAILABLE_SPARE |
838             NVME_CRIT_WARN_ST_DEVICE_RELIABILITY |
839             NVME_CRIT_WARN_ST_READ_ONLY |
840             NVME_CRIT_WARN_ST_VOLATILE_MEMORY_BACKUP;
841         if (ctrlr->cdata.ver >= NVME_REV(1, 2))
842                 ctrlr->async_event_config |= NVME_ASYNC_EVENT_NS_ATTRIBUTE |
843                     NVME_ASYNC_EVENT_FW_ACTIVATE;
844
845         status.done = 0;
846         nvme_ctrlr_cmd_get_feature(ctrlr, NVME_FEAT_TEMPERATURE_THRESHOLD,
847             0, NULL, 0, nvme_completion_poll_cb, &status);
848         nvme_completion_poll(&status);
849         if (nvme_completion_is_error(&status.cpl) ||
850             (status.cpl.cdw0 & 0xFFFF) == 0xFFFF ||
851             (status.cpl.cdw0 & 0xFFFF) == 0x0000) {
852                 nvme_printf(ctrlr, "temperature threshold not supported\n");
853         } else
854                 ctrlr->async_event_config |= NVME_CRIT_WARN_ST_TEMPERATURE;
855
856         nvme_ctrlr_cmd_set_async_event_config(ctrlr,
857             ctrlr->async_event_config, NULL, NULL);
858
859         /* aerl is a zero-based value, so we need to add 1 here. */
860         ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
861
862         for (i = 0; i < ctrlr->num_aers; i++) {
863                 aer = &ctrlr->aer[i];
864                 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
865         }
866 }
867
868 static void
869 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
870 {
871
872         ctrlr->int_coal_time = 0;
873         TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
874             &ctrlr->int_coal_time);
875
876         ctrlr->int_coal_threshold = 0;
877         TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
878             &ctrlr->int_coal_threshold);
879
880         nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
881             ctrlr->int_coal_threshold, NULL, NULL);
882 }
883
884 static void
885 nvme_ctrlr_hmb_free(struct nvme_controller *ctrlr)
886 {
887         struct nvme_hmb_chunk *hmbc;
888         int i;
889
890         if (ctrlr->hmb_desc_paddr) {
891                 bus_dmamap_unload(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map);
892                 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr,
893                     ctrlr->hmb_desc_map);
894                 ctrlr->hmb_desc_paddr = 0;
895         }
896         if (ctrlr->hmb_desc_tag) {
897                 bus_dma_tag_destroy(ctrlr->hmb_desc_tag);
898                 ctrlr->hmb_desc_tag = NULL;
899         }
900         for (i = 0; i < ctrlr->hmb_nchunks; i++) {
901                 hmbc = &ctrlr->hmb_chunks[i];
902                 bus_dmamap_unload(ctrlr->hmb_tag, hmbc->hmbc_map);
903                 bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr,
904                     hmbc->hmbc_map);
905         }
906         ctrlr->hmb_nchunks = 0;
907         if (ctrlr->hmb_tag) {
908                 bus_dma_tag_destroy(ctrlr->hmb_tag);
909                 ctrlr->hmb_tag = NULL;
910         }
911         if (ctrlr->hmb_chunks) {
912                 free(ctrlr->hmb_chunks, M_NVME);
913                 ctrlr->hmb_chunks = NULL;
914         }
915 }
916
917 static void
918 nvme_ctrlr_hmb_alloc(struct nvme_controller *ctrlr)
919 {
920         struct nvme_hmb_chunk *hmbc;
921         size_t pref, min, minc, size;
922         int err, i;
923         uint64_t max;
924
925         /* Limit HMB to 5% of RAM size per device by default. */
926         max = (uint64_t)physmem * PAGE_SIZE / 20;
927         TUNABLE_UINT64_FETCH("hw.nvme.hmb_max", &max);
928
929         min = (long long unsigned)ctrlr->cdata.hmmin * 4096;
930         if (max == 0 || max < min)
931                 return;
932         pref = MIN((long long unsigned)ctrlr->cdata.hmpre * 4096, max);
933         minc = MAX(ctrlr->cdata.hmminds * 4096, PAGE_SIZE);
934         if (min > 0 && ctrlr->cdata.hmmaxd > 0)
935                 minc = MAX(minc, min / ctrlr->cdata.hmmaxd);
936         ctrlr->hmb_chunk = pref;
937
938 again:
939         ctrlr->hmb_chunk = roundup2(ctrlr->hmb_chunk, PAGE_SIZE);
940         ctrlr->hmb_nchunks = howmany(pref, ctrlr->hmb_chunk);
941         if (ctrlr->cdata.hmmaxd > 0 && ctrlr->hmb_nchunks > ctrlr->cdata.hmmaxd)
942                 ctrlr->hmb_nchunks = ctrlr->cdata.hmmaxd;
943         ctrlr->hmb_chunks = malloc(sizeof(struct nvme_hmb_chunk) *
944             ctrlr->hmb_nchunks, M_NVME, M_WAITOK);
945         err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
946             PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
947             ctrlr->hmb_chunk, 1, ctrlr->hmb_chunk, 0, NULL, NULL, &ctrlr->hmb_tag);
948         if (err != 0) {
949                 nvme_printf(ctrlr, "HMB tag create failed %d\n", err);
950                 nvme_ctrlr_hmb_free(ctrlr);
951                 return;
952         }
953
954         for (i = 0; i < ctrlr->hmb_nchunks; i++) {
955                 hmbc = &ctrlr->hmb_chunks[i];
956                 if (bus_dmamem_alloc(ctrlr->hmb_tag,
957                     (void **)&hmbc->hmbc_vaddr, BUS_DMA_NOWAIT,
958                     &hmbc->hmbc_map)) {
959                         nvme_printf(ctrlr, "failed to alloc HMB\n");
960                         break;
961                 }
962                 if (bus_dmamap_load(ctrlr->hmb_tag, hmbc->hmbc_map,
963                     hmbc->hmbc_vaddr, ctrlr->hmb_chunk, nvme_single_map,
964                     &hmbc->hmbc_paddr, BUS_DMA_NOWAIT) != 0) {
965                         bus_dmamem_free(ctrlr->hmb_tag, hmbc->hmbc_vaddr,
966                             hmbc->hmbc_map);
967                         nvme_printf(ctrlr, "failed to load HMB\n");
968                         break;
969                 }
970                 bus_dmamap_sync(ctrlr->hmb_tag, hmbc->hmbc_map,
971                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
972         }
973
974         if (i < ctrlr->hmb_nchunks && i * ctrlr->hmb_chunk < min &&
975             ctrlr->hmb_chunk / 2 >= minc) {
976                 ctrlr->hmb_nchunks = i;
977                 nvme_ctrlr_hmb_free(ctrlr);
978                 ctrlr->hmb_chunk /= 2;
979                 goto again;
980         }
981         ctrlr->hmb_nchunks = i;
982         if (ctrlr->hmb_nchunks * ctrlr->hmb_chunk < min) {
983                 nvme_ctrlr_hmb_free(ctrlr);
984                 return;
985         }
986
987         size = sizeof(struct nvme_hmb_desc) * ctrlr->hmb_nchunks;
988         err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
989             16, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
990             size, 1, size, 0, NULL, NULL, &ctrlr->hmb_desc_tag);
991         if (err != 0) {
992                 nvme_printf(ctrlr, "HMB desc tag create failed %d\n", err);
993                 nvme_ctrlr_hmb_free(ctrlr);
994                 return;
995         }
996         if (bus_dmamem_alloc(ctrlr->hmb_desc_tag,
997             (void **)&ctrlr->hmb_desc_vaddr, BUS_DMA_WAITOK,
998             &ctrlr->hmb_desc_map)) {
999                 nvme_printf(ctrlr, "failed to alloc HMB desc\n");
1000                 nvme_ctrlr_hmb_free(ctrlr);
1001                 return;
1002         }
1003         if (bus_dmamap_load(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map,
1004             ctrlr->hmb_desc_vaddr, size, nvme_single_map,
1005             &ctrlr->hmb_desc_paddr, BUS_DMA_NOWAIT) != 0) {
1006                 bus_dmamem_free(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_vaddr,
1007                     ctrlr->hmb_desc_map);
1008                 nvme_printf(ctrlr, "failed to load HMB desc\n");
1009                 nvme_ctrlr_hmb_free(ctrlr);
1010                 return;
1011         }
1012
1013         for (i = 0; i < ctrlr->hmb_nchunks; i++) {
1014                 ctrlr->hmb_desc_vaddr[i].addr =
1015                     htole64(ctrlr->hmb_chunks[i].hmbc_paddr);
1016                 ctrlr->hmb_desc_vaddr[i].size = htole32(ctrlr->hmb_chunk / 4096);
1017         }
1018         bus_dmamap_sync(ctrlr->hmb_desc_tag, ctrlr->hmb_desc_map,
1019             BUS_DMASYNC_PREWRITE);
1020
1021         nvme_printf(ctrlr, "Allocated %lluMB host memory buffer\n",
1022             (long long unsigned)ctrlr->hmb_nchunks * ctrlr->hmb_chunk
1023             / 1024 / 1024);
1024 }
1025
1026 static void
1027 nvme_ctrlr_hmb_enable(struct nvme_controller *ctrlr, bool enable, bool memret)
1028 {
1029         struct nvme_completion_poll_status      status;
1030         uint32_t cdw11;
1031
1032         cdw11 = 0;
1033         if (enable)
1034                 cdw11 |= 1;
1035         if (memret)
1036                 cdw11 |= 2;
1037         status.done = 0;
1038         nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_HOST_MEMORY_BUFFER, cdw11,
1039             ctrlr->hmb_nchunks * ctrlr->hmb_chunk / 4096, ctrlr->hmb_desc_paddr,
1040             ctrlr->hmb_desc_paddr >> 32, ctrlr->hmb_nchunks, NULL, 0,
1041             nvme_completion_poll_cb, &status);
1042         nvme_completion_poll(&status);
1043         if (nvme_completion_is_error(&status.cpl))
1044                 nvme_printf(ctrlr, "nvme_ctrlr_hmb_enable failed!\n");
1045 }
1046
1047 static void
1048 nvme_ctrlr_start(void *ctrlr_arg, bool resetting)
1049 {
1050         struct nvme_controller *ctrlr = ctrlr_arg;
1051         uint32_t old_num_io_queues;
1052         int i;
1053
1054         TSENTER();
1055
1056         /*
1057          * Only reset adminq here when we are restarting the
1058          *  controller after a reset.  During initialization,
1059          *  we have already submitted admin commands to get
1060          *  the number of I/O queues supported, so cannot reset
1061          *  the adminq again here.
1062          */
1063         if (resetting) {
1064                 nvme_qpair_reset(&ctrlr->adminq);
1065                 nvme_admin_qpair_enable(&ctrlr->adminq);
1066         }
1067
1068         if (ctrlr->ioq != NULL) {
1069                 for (i = 0; i < ctrlr->num_io_queues; i++)
1070                         nvme_qpair_reset(&ctrlr->ioq[i]);
1071         }
1072
1073         /*
1074          * If it was a reset on initialization command timeout, just
1075          * return here, letting initialization code fail gracefully.
1076          */
1077         if (resetting && !ctrlr->is_initialized)
1078                 return;
1079
1080         if (resetting && nvme_ctrlr_identify(ctrlr) != 0) {
1081                 nvme_ctrlr_fail(ctrlr);
1082                 return;
1083         }
1084
1085         /*
1086          * The number of qpairs are determined during controller initialization,
1087          *  including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the
1088          *  HW limit.  We call SET_FEATURES again here so that it gets called
1089          *  after any reset for controllers that depend on the driver to
1090          *  explicit specify how many queues it will use.  This value should
1091          *  never change between resets, so panic if somehow that does happen.
1092          */
1093         if (resetting) {
1094                 old_num_io_queues = ctrlr->num_io_queues;
1095                 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
1096                         nvme_ctrlr_fail(ctrlr);
1097                         return;
1098                 }
1099
1100                 if (old_num_io_queues != ctrlr->num_io_queues) {
1101                         panic("num_io_queues changed from %u to %u",
1102                               old_num_io_queues, ctrlr->num_io_queues);
1103                 }
1104         }
1105
1106         if (ctrlr->cdata.hmpre > 0 && ctrlr->hmb_nchunks == 0) {
1107                 nvme_ctrlr_hmb_alloc(ctrlr);
1108                 if (ctrlr->hmb_nchunks > 0)
1109                         nvme_ctrlr_hmb_enable(ctrlr, true, false);
1110         } else if (ctrlr->hmb_nchunks > 0)
1111                 nvme_ctrlr_hmb_enable(ctrlr, true, true);
1112
1113         if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
1114                 nvme_ctrlr_fail(ctrlr);
1115                 return;
1116         }
1117
1118         if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
1119                 nvme_ctrlr_fail(ctrlr);
1120                 return;
1121         }
1122
1123         nvme_ctrlr_configure_aer(ctrlr);
1124         nvme_ctrlr_configure_int_coalescing(ctrlr);
1125
1126         for (i = 0; i < ctrlr->num_io_queues; i++)
1127                 nvme_io_qpair_enable(&ctrlr->ioq[i]);
1128         TSEXIT();
1129 }
1130
1131 void
1132 nvme_ctrlr_start_config_hook(void *arg)
1133 {
1134         struct nvme_controller *ctrlr = arg;
1135
1136         TSENTER();
1137
1138         /*
1139          * Reset controller twice to ensure we do a transition from cc.en==1 to
1140          * cc.en==0.  This is because we don't really know what status the
1141          * controller was left in when boot handed off to OS.  Linux doesn't do
1142          * this, however. If we adopt that policy, see also nvme_ctrlr_resume().
1143          */
1144         if (nvme_ctrlr_hw_reset(ctrlr) != 0) {
1145 fail:
1146                 nvme_ctrlr_fail(ctrlr);
1147                 config_intrhook_disestablish(&ctrlr->config_hook);
1148                 return;
1149         }
1150
1151         if (nvme_ctrlr_hw_reset(ctrlr) != 0)
1152                 goto fail;
1153
1154         nvme_qpair_reset(&ctrlr->adminq);
1155         nvme_admin_qpair_enable(&ctrlr->adminq);
1156
1157         if (nvme_ctrlr_identify(ctrlr) == 0 &&
1158             nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
1159             nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
1160                 nvme_ctrlr_start(ctrlr, false);
1161         else
1162                 goto fail;
1163
1164         nvme_sysctl_initialize_ctrlr(ctrlr);
1165         config_intrhook_disestablish(&ctrlr->config_hook);
1166
1167         ctrlr->is_initialized = 1;
1168         nvme_notify_new_controller(ctrlr);
1169         TSEXIT();
1170 }
1171
1172 static void
1173 nvme_ctrlr_reset_task(void *arg, int pending)
1174 {
1175         struct nvme_controller  *ctrlr = arg;
1176         int                     status;
1177
1178         nvme_ctrlr_devctl_log(ctrlr, "RESET", "resetting controller");
1179         status = nvme_ctrlr_hw_reset(ctrlr);
1180         /*
1181          * Use pause instead of DELAY, so that we yield to any nvme interrupt
1182          *  handlers on this CPU that were blocked on a qpair lock. We want
1183          *  all nvme interrupts completed before proceeding with restarting the
1184          *  controller.
1185          *
1186          * XXX - any way to guarantee the interrupt handlers have quiesced?
1187          */
1188         pause("nvmereset", hz / 10);
1189         if (status == 0)
1190                 nvme_ctrlr_start(ctrlr, true);
1191         else
1192                 nvme_ctrlr_fail(ctrlr);
1193
1194         atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
1195 }
1196
1197 /*
1198  * Poll all the queues enabled on the device for completion.
1199  */
1200 void
1201 nvme_ctrlr_poll(struct nvme_controller *ctrlr)
1202 {
1203         int i;
1204
1205         nvme_qpair_process_completions(&ctrlr->adminq);
1206
1207         for (i = 0; i < ctrlr->num_io_queues; i++)
1208                 if (ctrlr->ioq && ctrlr->ioq[i].cpl)
1209                         nvme_qpair_process_completions(&ctrlr->ioq[i]);
1210 }
1211
1212 /*
1213  * Poll the single-vector interrupt case: num_io_queues will be 1 and
1214  * there's only a single vector. While we're polling, we mask further
1215  * interrupts in the controller.
1216  */
1217 void
1218 nvme_ctrlr_shared_handler(void *arg)
1219 {
1220         struct nvme_controller *ctrlr = arg;
1221
1222         nvme_mmio_write_4(ctrlr, intms, 1);
1223         nvme_ctrlr_poll(ctrlr);
1224         nvme_mmio_write_4(ctrlr, intmc, 1);
1225 }
1226
1227 static void
1228 nvme_pt_done(void *arg, const struct nvme_completion *cpl)
1229 {
1230         struct nvme_pt_command *pt = arg;
1231         struct mtx *mtx = pt->driver_lock;
1232         uint16_t status;
1233
1234         bzero(&pt->cpl, sizeof(pt->cpl));
1235         pt->cpl.cdw0 = cpl->cdw0;
1236
1237         status = cpl->status;
1238         status &= ~NVME_STATUS_P_MASK;
1239         pt->cpl.status = status;
1240
1241         mtx_lock(mtx);
1242         pt->driver_lock = NULL;
1243         wakeup(pt);
1244         mtx_unlock(mtx);
1245 }
1246
1247 int
1248 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
1249     struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
1250     int is_admin_cmd)
1251 {
1252         struct nvme_request     *req;
1253         struct mtx              *mtx;
1254         struct buf              *buf = NULL;
1255         int                     ret = 0;
1256
1257         if (pt->len > 0) {
1258                 if (pt->len > ctrlr->max_xfer_size) {
1259                         nvme_printf(ctrlr, "pt->len (%d) "
1260                             "exceeds max_xfer_size (%d)\n", pt->len,
1261                             ctrlr->max_xfer_size);
1262                         return EIO;
1263                 }
1264                 if (is_user_buffer) {
1265                         /*
1266                          * Ensure the user buffer is wired for the duration of
1267                          *  this pass-through command.
1268                          */
1269                         PHOLD(curproc);
1270                         buf = uma_zalloc(pbuf_zone, M_WAITOK);
1271                         buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
1272                         if (vmapbuf(buf, pt->buf, pt->len, 1) < 0) {
1273                                 ret = EFAULT;
1274                                 goto err;
1275                         }
1276                         req = nvme_allocate_request_vaddr(buf->b_data, pt->len, 
1277                             nvme_pt_done, pt);
1278                 } else
1279                         req = nvme_allocate_request_vaddr(pt->buf, pt->len,
1280                             nvme_pt_done, pt);
1281         } else
1282                 req = nvme_allocate_request_null(nvme_pt_done, pt);
1283
1284         /* Assume user space already converted to little-endian */
1285         req->cmd.opc = pt->cmd.opc;
1286         req->cmd.fuse = pt->cmd.fuse;
1287         req->cmd.rsvd2 = pt->cmd.rsvd2;
1288         req->cmd.rsvd3 = pt->cmd.rsvd3;
1289         req->cmd.cdw10 = pt->cmd.cdw10;
1290         req->cmd.cdw11 = pt->cmd.cdw11;
1291         req->cmd.cdw12 = pt->cmd.cdw12;
1292         req->cmd.cdw13 = pt->cmd.cdw13;
1293         req->cmd.cdw14 = pt->cmd.cdw14;
1294         req->cmd.cdw15 = pt->cmd.cdw15;
1295
1296         req->cmd.nsid = htole32(nsid);
1297
1298         mtx = mtx_pool_find(mtxpool_sleep, pt);
1299         pt->driver_lock = mtx;
1300
1301         if (is_admin_cmd)
1302                 nvme_ctrlr_submit_admin_request(ctrlr, req);
1303         else
1304                 nvme_ctrlr_submit_io_request(ctrlr, req);
1305
1306         mtx_lock(mtx);
1307         while (pt->driver_lock != NULL)
1308                 mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
1309         mtx_unlock(mtx);
1310
1311 err:
1312         if (buf != NULL) {
1313                 uma_zfree(pbuf_zone, buf);
1314                 PRELE(curproc);
1315         }
1316
1317         return (ret);
1318 }
1319
1320 static int
1321 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
1322     struct thread *td)
1323 {
1324         struct nvme_controller                  *ctrlr;
1325         struct nvme_pt_command                  *pt;
1326
1327         ctrlr = cdev->si_drv1;
1328
1329         switch (cmd) {
1330         case NVME_RESET_CONTROLLER:
1331                 nvme_ctrlr_reset(ctrlr);
1332                 break;
1333         case NVME_PASSTHROUGH_CMD:
1334                 pt = (struct nvme_pt_command *)arg;
1335                 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, le32toh(pt->cmd.nsid),
1336                     1 /* is_user_buffer */, 1 /* is_admin_cmd */));
1337         case NVME_GET_NSID:
1338         {
1339                 struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)arg;
1340                 strncpy(gnsid->cdev, device_get_nameunit(ctrlr->dev),
1341                     sizeof(gnsid->cdev));
1342                 gnsid->cdev[sizeof(gnsid->cdev) - 1] = '\0';
1343                 gnsid->nsid = 0;
1344                 break;
1345         }
1346         case NVME_GET_MAX_XFER_SIZE:
1347                 *(uint64_t *)arg = ctrlr->max_xfer_size;
1348                 break;
1349         default:
1350                 return (ENOTTY);
1351         }
1352
1353         return (0);
1354 }
1355
1356 static struct cdevsw nvme_ctrlr_cdevsw = {
1357         .d_version =    D_VERSION,
1358         .d_flags =      0,
1359         .d_ioctl =      nvme_ctrlr_ioctl
1360 };
1361
1362 int
1363 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1364 {
1365         struct make_dev_args    md_args;
1366         uint32_t        cap_lo;
1367         uint32_t        cap_hi;
1368         uint32_t        to, vs, pmrcap;
1369         uint8_t         mpsmin;
1370         int             status, timeout_period;
1371
1372         ctrlr->dev = dev;
1373
1374         mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1375         if (bus_get_domain(dev, &ctrlr->domain) != 0)
1376                 ctrlr->domain = 0;
1377
1378         cap_lo = nvme_mmio_read_4(ctrlr, cap_lo);
1379         if (bootverbose) {
1380                 device_printf(dev, "CapLo: 0x%08x: MQES %u%s%s%s%s, TO %u\n",
1381                     cap_lo, NVME_CAP_LO_MQES(cap_lo),
1382                     NVME_CAP_LO_CQR(cap_lo) ? ", CQR" : "",
1383                     NVME_CAP_LO_AMS(cap_lo) ? ", AMS" : "",
1384                     (NVME_CAP_LO_AMS(cap_lo) & 0x1) ? " WRRwUPC" : "",
1385                     (NVME_CAP_LO_AMS(cap_lo) & 0x2) ? " VS" : "",
1386                     NVME_CAP_LO_TO(cap_lo));
1387         }
1388         cap_hi = nvme_mmio_read_4(ctrlr, cap_hi);
1389         if (bootverbose) {
1390                 device_printf(dev, "CapHi: 0x%08x: DSTRD %u%s, CSS %x%s, "
1391                     "MPSMIN %u, MPSMAX %u%s%s\n", cap_hi,
1392                     NVME_CAP_HI_DSTRD(cap_hi),
1393                     NVME_CAP_HI_NSSRS(cap_hi) ? ", NSSRS" : "",
1394                     NVME_CAP_HI_CSS(cap_hi),
1395                     NVME_CAP_HI_BPS(cap_hi) ? ", BPS" : "",
1396                     NVME_CAP_HI_MPSMIN(cap_hi),
1397                     NVME_CAP_HI_MPSMAX(cap_hi),
1398                     NVME_CAP_HI_PMRS(cap_hi) ? ", PMRS" : "",
1399                     NVME_CAP_HI_CMBS(cap_hi) ? ", CMBS" : "");
1400         }
1401         if (bootverbose) {
1402                 vs = nvme_mmio_read_4(ctrlr, vs);
1403                 device_printf(dev, "Version: 0x%08x: %d.%d\n", vs,
1404                     NVME_MAJOR(vs), NVME_MINOR(vs));
1405         }
1406         if (bootverbose && NVME_CAP_HI_PMRS(cap_hi)) {
1407                 pmrcap = nvme_mmio_read_4(ctrlr, pmrcap);
1408                 device_printf(dev, "PMRCap: 0x%08x: BIR %u%s%s, PMRTU %u, "
1409                     "PMRWBM %x, PMRTO %u%s\n", pmrcap,
1410                     NVME_PMRCAP_BIR(pmrcap),
1411                     NVME_PMRCAP_RDS(pmrcap) ? ", RDS" : "",
1412                     NVME_PMRCAP_WDS(pmrcap) ? ", WDS" : "",
1413                     NVME_PMRCAP_PMRTU(pmrcap),
1414                     NVME_PMRCAP_PMRWBM(pmrcap),
1415                     NVME_PMRCAP_PMRTO(pmrcap),
1416                     NVME_PMRCAP_CMSS(pmrcap) ? ", CMSS" : "");
1417         }
1418
1419         ctrlr->dstrd = NVME_CAP_HI_DSTRD(cap_hi) + 2;
1420
1421         mpsmin = NVME_CAP_HI_MPSMIN(cap_hi);
1422         ctrlr->min_page_size = 1 << (12 + mpsmin);
1423
1424         /* Get ready timeout value from controller, in units of 500ms. */
1425         to = NVME_CAP_LO_TO(cap_lo) + 1;
1426         ctrlr->ready_timeout_in_ms = to * 500;
1427
1428         timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
1429         TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
1430         timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1431         timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1432         ctrlr->timeout_period = timeout_period;
1433
1434         nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
1435         TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
1436
1437         ctrlr->enable_aborts = 0;
1438         TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1439
1440         ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
1441         if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0)
1442                 return (ENXIO);
1443
1444         /*
1445          * Create 2 threads for the taskqueue. The reset thread will block when
1446          * it detects that the controller has failed until all I/O has been
1447          * failed up the stack. The fail_req task needs to be able to run in
1448          * this case to finish the request failure for some cases.
1449          *
1450          * We could partially solve this race by draining the failed requeust
1451          * queue before proceding to free the sim, though nothing would stop
1452          * new I/O from coming in after we do that drain, but before we reach
1453          * cam_sim_free, so this big hammer is used instead.
1454          */
1455         ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1456             taskqueue_thread_enqueue, &ctrlr->taskqueue);
1457         taskqueue_start_threads(&ctrlr->taskqueue, 2, PI_DISK, "nvme taskq");
1458
1459         ctrlr->is_resetting = 0;
1460         ctrlr->is_initialized = 0;
1461         ctrlr->notification_sent = 0;
1462         TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1463         TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
1464         STAILQ_INIT(&ctrlr->fail_req);
1465         ctrlr->is_failed = false;
1466
1467         make_dev_args_init(&md_args);
1468         md_args.mda_devsw = &nvme_ctrlr_cdevsw;
1469         md_args.mda_uid = UID_ROOT;
1470         md_args.mda_gid = GID_WHEEL;
1471         md_args.mda_mode = 0600;
1472         md_args.mda_unit = device_get_unit(dev);
1473         md_args.mda_si_drv1 = (void *)ctrlr;
1474         status = make_dev_s(&md_args, &ctrlr->cdev, "nvme%d",
1475             device_get_unit(dev));
1476         if (status != 0)
1477                 return (ENXIO);
1478
1479         return (0);
1480 }
1481
1482 void
1483 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1484 {
1485         int     gone, i;
1486
1487         ctrlr->is_dying = true;
1488
1489         if (ctrlr->resource == NULL)
1490                 goto nores;
1491         if (!mtx_initialized(&ctrlr->adminq.lock))
1492                 goto noadminq;
1493
1494         /*
1495          * Check whether it is a hot unplug or a clean driver detach.
1496          * If device is not there any more, skip any shutdown commands.
1497          */
1498         gone = (nvme_mmio_read_4(ctrlr, csts) == NVME_GONE);
1499         if (gone)
1500                 nvme_ctrlr_fail(ctrlr);
1501         else
1502                 nvme_notify_fail_consumers(ctrlr);
1503
1504         for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1505                 nvme_ns_destruct(&ctrlr->ns[i]);
1506
1507         if (ctrlr->cdev)
1508                 destroy_dev(ctrlr->cdev);
1509
1510         if (ctrlr->is_initialized) {
1511                 if (!gone) {
1512                         if (ctrlr->hmb_nchunks > 0)
1513                                 nvme_ctrlr_hmb_enable(ctrlr, false, false);
1514                         nvme_ctrlr_delete_qpairs(ctrlr);
1515                 }
1516                 nvme_ctrlr_hmb_free(ctrlr);
1517         }
1518         if (ctrlr->ioq != NULL) {
1519                 for (i = 0; i < ctrlr->num_io_queues; i++)
1520                         nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1521                 free(ctrlr->ioq, M_NVME);
1522         }
1523         nvme_admin_qpair_destroy(&ctrlr->adminq);
1524
1525         /*
1526          *  Notify the controller of a shutdown, even though this is due to
1527          *   a driver unload, not a system shutdown (this path is not invoked
1528          *   during shutdown).  This ensures the controller receives a
1529          *   shutdown notification in case the system is shutdown before
1530          *   reloading the driver.
1531          */
1532         if (!gone)
1533                 nvme_ctrlr_shutdown(ctrlr);
1534
1535         if (!gone)
1536                 nvme_ctrlr_disable(ctrlr);
1537
1538 noadminq:
1539         if (ctrlr->taskqueue)
1540                 taskqueue_free(ctrlr->taskqueue);
1541
1542         if (ctrlr->tag)
1543                 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1544
1545         if (ctrlr->res)
1546                 bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1547                     rman_get_rid(ctrlr->res), ctrlr->res);
1548
1549         if (ctrlr->bar4_resource != NULL) {
1550                 bus_release_resource(dev, SYS_RES_MEMORY,
1551                     ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1552         }
1553
1554         bus_release_resource(dev, SYS_RES_MEMORY,
1555             ctrlr->resource_id, ctrlr->resource);
1556
1557 nores:
1558         mtx_destroy(&ctrlr->lock);
1559 }
1560
1561 void
1562 nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
1563 {
1564         uint32_t        cc;
1565         uint32_t        csts;
1566         int             timeout;
1567
1568         cc = nvme_mmio_read_4(ctrlr, cc);
1569         cc &= ~(NVME_CC_REG_SHN_MASK << NVME_CC_REG_SHN_SHIFT);
1570         cc |= NVME_SHN_NORMAL << NVME_CC_REG_SHN_SHIFT;
1571         nvme_mmio_write_4(ctrlr, cc, cc);
1572
1573         timeout = ticks + (ctrlr->cdata.rtd3e == 0 ? 5 * hz :
1574             ((uint64_t)ctrlr->cdata.rtd3e * hz + 999999) / 1000000);
1575         while (1) {
1576                 csts = nvme_mmio_read_4(ctrlr, csts);
1577                 if (csts == NVME_GONE)          /* Hot unplug. */
1578                         break;
1579                 if (NVME_CSTS_GET_SHST(csts) == NVME_SHST_COMPLETE)
1580                         break;
1581                 if (timeout - ticks < 0) {
1582                         nvme_printf(ctrlr, "shutdown timeout\n");
1583                         break;
1584                 }
1585                 pause("nvmeshut", 1);
1586         }
1587 }
1588
1589 void
1590 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1591     struct nvme_request *req)
1592 {
1593
1594         nvme_qpair_submit_request(&ctrlr->adminq, req);
1595 }
1596
1597 void
1598 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1599     struct nvme_request *req)
1600 {
1601         struct nvme_qpair       *qpair;
1602
1603         qpair = &ctrlr->ioq[QP(ctrlr, curcpu)];
1604         nvme_qpair_submit_request(qpair, req);
1605 }
1606
1607 device_t
1608 nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1609 {
1610
1611         return (ctrlr->dev);
1612 }
1613
1614 const struct nvme_controller_data *
1615 nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1616 {
1617
1618         return (&ctrlr->cdata);
1619 }
1620
1621 int
1622 nvme_ctrlr_suspend(struct nvme_controller *ctrlr)
1623 {
1624         int to = hz;
1625
1626         /*
1627          * Can't touch failed controllers, so it's already suspended.
1628          */
1629         if (ctrlr->is_failed)
1630                 return (0);
1631
1632         /*
1633          * We don't want the reset taskqueue running, since it does similar
1634          * things, so prevent it from running after we start. Wait for any reset
1635          * that may have been started to complete. The reset process we follow
1636          * will ensure that any new I/O will queue and be given to the hardware
1637          * after we resume (though there should be none).
1638          */
1639         while (atomic_cmpset_32(&ctrlr->is_resetting, 0, 1) == 0 && to-- > 0)
1640                 pause("nvmesusp", 1);
1641         if (to <= 0) {
1642                 nvme_printf(ctrlr,
1643                     "Competing reset task didn't finish. Try again later.\n");
1644                 return (EWOULDBLOCK);
1645         }
1646
1647         if (ctrlr->hmb_nchunks > 0)
1648                 nvme_ctrlr_hmb_enable(ctrlr, false, false);
1649
1650         /*
1651          * Per Section 7.6.2 of NVMe spec 1.4, to properly suspend, we need to
1652          * delete the hardware I/O queues, and then shutdown. This properly
1653          * flushes any metadata the drive may have stored so it can survive
1654          * having its power removed and prevents the unsafe shutdown count from
1655          * incriminating. Once we delete the qpairs, we have to disable them
1656          * before shutting down. The delay is out of paranoia in
1657          * nvme_ctrlr_hw_reset, and is repeated here (though we should have no
1658          * pending I/O that the delay copes with).
1659          */
1660         nvme_ctrlr_delete_qpairs(ctrlr);
1661         nvme_ctrlr_disable_qpairs(ctrlr);
1662         pause("nvmesusp", hz / 10);
1663         nvme_ctrlr_shutdown(ctrlr);
1664
1665         return (0);
1666 }
1667
1668 int
1669 nvme_ctrlr_resume(struct nvme_controller *ctrlr)
1670 {
1671
1672         /*
1673          * Can't touch failed controllers, so nothing to do to resume.
1674          */
1675         if (ctrlr->is_failed)
1676                 return (0);
1677
1678         /*
1679          * Have to reset the hardware twice, just like we do on attach. See
1680          * nmve_attach() for why.
1681          */
1682         if (nvme_ctrlr_hw_reset(ctrlr) != 0)
1683                 goto fail;
1684         if (nvme_ctrlr_hw_reset(ctrlr) != 0)
1685                 goto fail;
1686
1687         /*
1688          * Now that we've reset the hardware, we can restart the controller. Any
1689          * I/O that was pending is requeued. Any admin commands are aborted with
1690          * an error. Once we've restarted, take the controller out of reset.
1691          */
1692         nvme_ctrlr_start(ctrlr, true);
1693         (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
1694
1695         return (0);
1696 fail:
1697         /*
1698          * Since we can't bring the controller out of reset, announce and fail
1699          * the controller. However, we have to return success for the resume
1700          * itself, due to questionable APIs.
1701          */
1702         nvme_printf(ctrlr, "Failed to reset on resume, failing.\n");
1703         nvme_ctrlr_fail(ctrlr);
1704         (void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
1705         return (0);
1706 }