2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2012-2016 Intel Corporation
6 * Copyright (C) 2018 Alexander Motin <mav@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/queue.h>
39 #include <sys/sysctl.h>
40 #include <sys/systm.h>
41 #include <sys/taskqueue.h>
42 #include <machine/atomic.h>
44 #include <geom/geom.h>
45 #include <geom/geom_disk.h>
47 #include <dev/nvme/nvme.h>
52 struct nvd_controller;
54 static disk_ioctl_t nvd_ioctl;
55 static disk_strategy_t nvd_strategy;
56 static dumper_t nvd_dump;
57 static disk_getattr_t nvd_getattr;
59 static void nvd_done(void *arg, const struct nvme_completion *cpl);
60 static void nvd_gone(struct nvd_disk *ndisk);
62 static void *nvd_new_disk(struct nvme_namespace *ns, void *ctrlr);
64 static void *nvd_new_controller(struct nvme_controller *ctrlr);
65 static void nvd_controller_fail(void *ctrlr);
67 static int nvd_load(void);
68 static void nvd_unload(void);
70 MALLOC_DEFINE(M_NVD, "nvd", "nvd(4) allocations");
72 struct nvme_consumer *consumer_handle;
75 struct nvd_controller *ctrlr;
77 struct bio_queue_head bioq;
83 struct nvme_namespace *ns;
86 #define NVD_ODEPTH (1 << 30)
87 uint32_t ordered_in_flight;
90 TAILQ_ENTRY(nvd_disk) global_tailq;
91 TAILQ_ENTRY(nvd_disk) ctrlr_tailq;
94 struct nvd_controller {
96 TAILQ_ENTRY(nvd_controller) tailq;
97 TAILQ_HEAD(, nvd_disk) disk_head;
100 static struct mtx nvd_lock;
101 static TAILQ_HEAD(, nvd_controller) ctrlr_head;
102 static TAILQ_HEAD(disk_list, nvd_disk) disk_head;
104 static SYSCTL_NODE(_hw, OID_AUTO, nvd, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
105 "nvd driver parameters");
107 * The NVMe specification does not define a maximum or optimal delete size, so
108 * technically max delete size is min(full size of the namespace, 2^32 - 1
109 * LBAs). A single delete for a multi-TB NVMe namespace though may take much
110 * longer to complete than the nvme(4) I/O timeout period. So choose a sensible
111 * default here that is still suitably large to minimize the number of overall
114 static uint64_t nvd_delete_max = (1024 * 1024 * 1024); /* 1GB */
115 SYSCTL_UQUAD(_hw_nvd, OID_AUTO, delete_max, CTLFLAG_RDTUN, &nvd_delete_max, 0,
116 "nvd maximum BIO_DELETE size in bytes");
118 static int nvd_modevent(module_t mod, int type, void *arg)
136 moduledata_t nvd_mod = {
138 (modeventhand_t)nvd_modevent,
142 DECLARE_MODULE(nvd, nvd_mod, SI_SUB_DRIVERS, SI_ORDER_ANY);
143 MODULE_VERSION(nvd, 1);
144 MODULE_DEPEND(nvd, nvme, 1, 1, 1);
152 mtx_init(&nvd_lock, "nvd_lock", NULL, MTX_DEF);
153 TAILQ_INIT(&ctrlr_head);
154 TAILQ_INIT(&disk_head);
156 consumer_handle = nvme_register_consumer(nvd_new_disk,
157 nvd_new_controller, NULL, nvd_controller_fail);
159 return (consumer_handle != NULL ? 0 : -1);
165 struct nvd_controller *ctrlr;
166 struct nvd_disk *ndisk;
172 while ((ctrlr = TAILQ_FIRST(&ctrlr_head)) != NULL) {
173 TAILQ_REMOVE(&ctrlr_head, ctrlr, tailq);
174 TAILQ_FOREACH(ndisk, &ctrlr->disk_head, ctrlr_tailq)
176 while (!TAILQ_EMPTY(&ctrlr->disk_head))
177 msleep(&ctrlr->disk_head, &nvd_lock, 0, "nvd_unload",0);
180 mtx_unlock(&nvd_lock);
182 nvme_unregister_consumer(consumer_handle);
184 mtx_destroy(&nvd_lock);
188 nvd_bio_submit(struct nvd_disk *ndisk, struct bio *bp)
192 bp->bio_driver1 = NULL;
193 if (__predict_false(bp->bio_flags & BIO_ORDERED))
194 atomic_add_int(&ndisk->cur_depth, NVD_ODEPTH);
196 atomic_add_int(&ndisk->cur_depth, 1);
197 err = nvme_ns_bio_process(ndisk->ns, bp, nvd_done);
199 if (__predict_false(bp->bio_flags & BIO_ORDERED)) {
200 atomic_add_int(&ndisk->cur_depth, -NVD_ODEPTH);
201 atomic_add_int(&ndisk->ordered_in_flight, -1);
202 wakeup(&ndisk->cur_depth);
204 if (atomic_fetchadd_int(&ndisk->cur_depth, -1) == 1 &&
205 __predict_false(ndisk->ordered_in_flight != 0))
206 wakeup(&ndisk->cur_depth);
209 bp->bio_flags |= BIO_ERROR;
210 bp->bio_resid = bp->bio_bcount;
216 nvd_strategy(struct bio *bp)
218 struct nvd_disk *ndisk = (struct nvd_disk *)bp->bio_disk->d_drv1;
221 * bio with BIO_ORDERED flag must be executed after all previous
222 * bios in the queue, and before any successive bios.
224 if (__predict_false(bp->bio_flags & BIO_ORDERED)) {
225 if (atomic_fetchadd_int(&ndisk->ordered_in_flight, 1) == 0 &&
226 ndisk->cur_depth == 0 && bioq_first(&ndisk->bioq) == NULL) {
227 nvd_bio_submit(ndisk, bp);
230 } else if (__predict_true(ndisk->ordered_in_flight == 0)) {
231 nvd_bio_submit(ndisk, bp);
236 * There are ordered bios in flight, so we need to submit
237 * bios through the task queue to enforce ordering.
239 mtx_lock(&ndisk->bioqlock);
240 bioq_insert_tail(&ndisk->bioq, bp);
241 mtx_unlock(&ndisk->bioqlock);
242 taskqueue_enqueue(ndisk->tq, &ndisk->bioqtask);
246 nvd_gone(struct nvd_disk *ndisk)
250 printf(NVD_STR"%u: detached\n", ndisk->unit);
251 mtx_lock(&ndisk->bioqlock);
252 disk_gone(ndisk->disk);
253 while ((bp = bioq_takefirst(&ndisk->bioq)) != NULL) {
254 if (__predict_false(bp->bio_flags & BIO_ORDERED))
255 atomic_add_int(&ndisk->ordered_in_flight, -1);
256 bp->bio_error = ENXIO;
257 bp->bio_flags |= BIO_ERROR;
258 bp->bio_resid = bp->bio_bcount;
261 mtx_unlock(&ndisk->bioqlock);
265 nvd_gonecb(struct disk *dp)
267 struct nvd_disk *ndisk = (struct nvd_disk *)dp->d_drv1;
269 disk_destroy(ndisk->disk);
271 TAILQ_REMOVE(&disk_head, ndisk, global_tailq);
272 TAILQ_REMOVE(&ndisk->ctrlr->disk_head, ndisk, ctrlr_tailq);
273 if (TAILQ_EMPTY(&ndisk->ctrlr->disk_head))
274 wakeup(&ndisk->ctrlr->disk_head);
275 mtx_unlock(&nvd_lock);
276 taskqueue_free(ndisk->tq);
277 mtx_destroy(&ndisk->bioqlock);
282 nvd_ioctl(struct disk *dp, u_long cmd, void *data, int fflag,
285 struct nvd_disk *ndisk = dp->d_drv1;
287 return (nvme_ns_ioctl_process(ndisk->ns, cmd, data, fflag, td));
291 nvd_dump(void *arg, void *virt, vm_offset_t phys, off_t offset, size_t len)
293 struct disk *dp = arg;
294 struct nvd_disk *ndisk = dp->d_drv1;
296 return (nvme_ns_dump(ndisk->ns, virt, offset, len));
300 nvd_getattr(struct bio *bp)
302 struct nvd_disk *ndisk = (struct nvd_disk *)bp->bio_disk->d_drv1;
303 const struct nvme_namespace_data *nsdata;
306 if (!strcmp("GEOM::lunid", bp->bio_attribute)) {
307 nsdata = nvme_ns_get_data(ndisk->ns);
309 /* Try to return NGUID as lunid. */
310 for (i = 0; i < sizeof(nsdata->nguid); i++) {
311 if (nsdata->nguid[i] != 0)
314 if (i < sizeof(nsdata->nguid)) {
315 if (bp->bio_length < sizeof(nsdata->nguid) * 2 + 1)
317 for (i = 0; i < sizeof(nsdata->nguid); i++) {
318 sprintf(&bp->bio_data[i * 2], "%02x",
321 bp->bio_completed = bp->bio_length;
325 /* Try to return EUI64 as lunid. */
326 for (i = 0; i < sizeof(nsdata->eui64); i++) {
327 if (nsdata->eui64[i] != 0)
330 if (i < sizeof(nsdata->eui64)) {
331 if (bp->bio_length < sizeof(nsdata->eui64) * 2 + 1)
333 for (i = 0; i < sizeof(nsdata->eui64); i++) {
334 sprintf(&bp->bio_data[i * 2], "%02x",
337 bp->bio_completed = bp->bio_length;
345 nvd_done(void *arg, const struct nvme_completion *cpl)
347 struct bio *bp = (struct bio *)arg;
348 struct nvd_disk *ndisk = bp->bio_disk->d_drv1;
350 if (__predict_false(bp->bio_flags & BIO_ORDERED)) {
351 atomic_add_int(&ndisk->cur_depth, -NVD_ODEPTH);
352 atomic_add_int(&ndisk->ordered_in_flight, -1);
353 wakeup(&ndisk->cur_depth);
355 if (atomic_fetchadd_int(&ndisk->cur_depth, -1) == 1 &&
356 __predict_false(ndisk->ordered_in_flight != 0))
357 wakeup(&ndisk->cur_depth);
364 nvd_bioq_process(void *arg, int pending)
366 struct nvd_disk *ndisk = arg;
370 mtx_lock(&ndisk->bioqlock);
371 bp = bioq_takefirst(&ndisk->bioq);
372 mtx_unlock(&ndisk->bioqlock);
376 if (__predict_false(bp->bio_flags & BIO_ORDERED)) {
378 * bio with BIO_ORDERED flag set must be executed
379 * after all previous bios.
381 while (ndisk->cur_depth > 0)
382 tsleep(&ndisk->cur_depth, 0, "nvdorb", 1);
385 * bio with BIO_ORDERED flag set must be completed
386 * before proceeding with additional bios.
388 while (ndisk->cur_depth >= NVD_ODEPTH)
389 tsleep(&ndisk->cur_depth, 0, "nvdora", 1);
392 nvd_bio_submit(ndisk, bp);
397 nvd_new_controller(struct nvme_controller *ctrlr)
399 struct nvd_controller *nvd_ctrlr;
401 nvd_ctrlr = malloc(sizeof(struct nvd_controller), M_NVD,
404 TAILQ_INIT(&nvd_ctrlr->disk_head);
406 TAILQ_INSERT_TAIL(&ctrlr_head, nvd_ctrlr, tailq);
407 mtx_unlock(&nvd_lock);
413 nvd_new_disk(struct nvme_namespace *ns, void *ctrlr_arg)
415 uint8_t descr[NVME_MODEL_NUMBER_LENGTH+1];
416 struct nvd_disk *ndisk, *tnd;
418 struct nvd_controller *ctrlr = ctrlr_arg;
421 ndisk = malloc(sizeof(struct nvd_disk), M_NVD, M_ZERO | M_WAITOK);
422 ndisk->ctrlr = ctrlr;
424 ndisk->cur_depth = 0;
425 ndisk->ordered_in_flight = 0;
426 mtx_init(&ndisk->bioqlock, "nvd bioq lock", NULL, MTX_DEF);
427 bioq_init(&ndisk->bioq);
428 TASK_INIT(&ndisk->bioqtask, 0, nvd_bioq_process, ndisk);
432 TAILQ_FOREACH(tnd, &disk_head, global_tailq) {
433 if (tnd->unit > unit)
435 unit = tnd->unit + 1;
439 TAILQ_INSERT_BEFORE(tnd, ndisk, global_tailq);
441 TAILQ_INSERT_TAIL(&disk_head, ndisk, global_tailq);
442 TAILQ_INSERT_TAIL(&ctrlr->disk_head, ndisk, ctrlr_tailq);
443 mtx_unlock(&nvd_lock);
445 ndisk->tq = taskqueue_create("nvd_taskq", M_WAITOK,
446 taskqueue_thread_enqueue, &ndisk->tq);
447 taskqueue_start_threads(&ndisk->tq, 1, PI_DISK, "nvd taskq");
449 disk = ndisk->disk = disk_alloc();
450 disk->d_strategy = nvd_strategy;
451 disk->d_ioctl = nvd_ioctl;
452 disk->d_dump = nvd_dump;
453 disk->d_getattr = nvd_getattr;
454 disk->d_gone = nvd_gonecb;
455 disk->d_name = NVD_STR;
456 disk->d_unit = ndisk->unit;
457 disk->d_drv1 = ndisk;
459 disk->d_sectorsize = nvme_ns_get_sector_size(ns);
460 disk->d_mediasize = (off_t)nvme_ns_get_size(ns);
461 disk->d_maxsize = nvme_ns_get_max_io_xfer_size(ns);
462 disk->d_delmaxsize = (off_t)nvme_ns_get_size(ns);
463 if (disk->d_delmaxsize > nvd_delete_max)
464 disk->d_delmaxsize = nvd_delete_max;
465 disk->d_stripesize = nvme_ns_get_stripesize(ns);
466 disk->d_flags = DISKFLAG_UNMAPPED_BIO | DISKFLAG_DIRECT_COMPLETION;
467 if (nvme_ns_get_flags(ns) & NVME_NS_DEALLOCATE_SUPPORTED)
468 disk->d_flags |= DISKFLAG_CANDELETE;
469 if (nvme_ns_get_flags(ns) & NVME_NS_FLUSH_SUPPORTED)
470 disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
473 * d_ident and d_descr are both far bigger than the length of either
474 * the serial or model number strings.
476 nvme_strvis(disk->d_ident, nvme_ns_get_serial_number(ns),
477 sizeof(disk->d_ident), NVME_SERIAL_NUMBER_LENGTH);
478 nvme_strvis(descr, nvme_ns_get_model_number(ns), sizeof(descr),
479 NVME_MODEL_NUMBER_LENGTH);
480 strlcpy(disk->d_descr, descr, sizeof(descr));
482 disk->d_rotation_rate = DISK_RR_NON_ROTATING;
484 disk_create(disk, DISK_VERSION);
486 printf(NVD_STR"%u: <%s> NVMe namespace\n", disk->d_unit, descr);
487 printf(NVD_STR"%u: %juMB (%ju %u byte sectors)\n", disk->d_unit,
488 (uintmax_t)disk->d_mediasize / (1024*1024),
489 (uintmax_t)disk->d_mediasize / disk->d_sectorsize,
496 nvd_controller_fail(void *ctrlr_arg)
498 struct nvd_controller *ctrlr = ctrlr_arg;
499 struct nvd_disk *ndisk;
502 TAILQ_REMOVE(&ctrlr_head, ctrlr, tailq);
503 TAILQ_FOREACH(ndisk, &ctrlr->disk_head, ctrlr_tailq)
505 while (!TAILQ_EMPTY(&ctrlr->disk_head))
506 msleep(&ctrlr->disk_head, &nvd_lock, 0, "nvd_fail", 0);
507 mtx_unlock(&nvd_lock);