2 * Copyright (C) 2012-2013 Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
35 #include <sys/fcntl.h>
36 #include <sys/ioccom.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
40 #include <sys/systm.h>
42 #include <dev/pci/pcivar.h>
44 #include <geom/geom.h>
46 #include "nvme_private.h"
48 extern int nvme_max_optimal_sectorsize;
50 static void nvme_bio_child_inbed(struct bio *parent, int bio_error);
51 static void nvme_bio_child_done(void *arg,
52 const struct nvme_completion *cpl);
53 static uint32_t nvme_get_num_segments(uint64_t addr, uint64_t size,
55 static void nvme_free_child_bios(int num_bios,
56 struct bio **child_bios);
57 static struct bio ** nvme_allocate_child_bios(int num_bios);
58 static struct bio ** nvme_construct_child_bios(struct bio *bp,
61 static int nvme_ns_split_bio(struct nvme_namespace *ns,
66 nvme_ns_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
69 struct nvme_namespace *ns;
70 struct nvme_controller *ctrlr;
71 struct nvme_pt_command *pt;
79 nvme_ns_test(ns, cmd, arg);
81 case NVME_PASSTHROUGH_CMD:
82 pt = (struct nvme_pt_command *)arg;
83 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, ns->id,
84 1 /* is_user_buffer */, 0 /* is_admin_cmd */));
86 *(off_t *)arg = (off_t)nvme_ns_get_size(ns);
89 *(u_int *)arg = nvme_ns_get_sector_size(ns);
99 nvme_ns_open(struct cdev *dev __unused, int flags, int fmt __unused,
105 error = securelevel_gt(td->td_ucred, 0);
111 nvme_ns_close(struct cdev *dev __unused, int flags, int fmt __unused,
119 nvme_ns_strategy_done(void *arg, const struct nvme_completion *cpl)
121 struct bio *bp = arg;
124 * TODO: add more extensive translation of NVMe status codes
125 * to different bio error codes (i.e. EIO, EINVAL, etc.)
127 if (nvme_completion_is_error(cpl)) {
129 bp->bio_flags |= BIO_ERROR;
130 bp->bio_resid = bp->bio_bcount;
138 nvme_ns_strategy(struct bio *bp)
140 struct nvme_namespace *ns;
143 ns = bp->bio_dev->si_drv1;
144 err = nvme_ns_bio_process(ns, bp, nvme_ns_strategy_done);
148 bp->bio_flags |= BIO_ERROR;
149 bp->bio_resid = bp->bio_bcount;
155 static struct cdevsw nvme_ns_cdevsw = {
156 .d_version = D_VERSION,
159 .d_write = physwrite,
160 .d_open = nvme_ns_open,
161 .d_close = nvme_ns_close,
162 .d_strategy = nvme_ns_strategy,
163 .d_ioctl = nvme_ns_ioctl
167 nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns)
169 return ns->ctrlr->max_xfer_size;
173 nvme_ns_get_sector_size(struct nvme_namespace *ns)
175 return (1 << ns->data.lbaf[ns->data.flbas.format].lbads);
179 nvme_ns_get_num_sectors(struct nvme_namespace *ns)
181 return (ns->data.nsze);
185 nvme_ns_get_size(struct nvme_namespace *ns)
187 return (nvme_ns_get_num_sectors(ns) * nvme_ns_get_sector_size(ns));
191 nvme_ns_get_flags(struct nvme_namespace *ns)
197 nvme_ns_get_serial_number(struct nvme_namespace *ns)
199 return ((const char *)ns->ctrlr->cdata.sn);
203 nvme_ns_get_model_number(struct nvme_namespace *ns)
205 return ((const char *)ns->ctrlr->cdata.mn);
208 const struct nvme_namespace_data *
209 nvme_ns_get_data(struct nvme_namespace *ns)
216 nvme_ns_get_stripesize(struct nvme_namespace *ns)
219 return (ns->stripesize);
223 nvme_ns_get_optimal_sector_size(struct nvme_namespace *ns)
227 stripesize = nvme_ns_get_stripesize(ns);
230 return nvme_ns_get_sector_size(ns);
232 if (nvme_max_optimal_sectorsize == 0)
235 return (MIN(stripesize, nvme_max_optimal_sectorsize));
239 nvme_ns_bio_done(void *arg, const struct nvme_completion *status)
241 struct bio *bp = arg;
242 nvme_cb_fn_t bp_cb_fn;
244 bp_cb_fn = bp->bio_driver1;
247 free(bp->bio_driver2, M_NVME);
249 if (nvme_completion_is_error(status)) {
250 bp->bio_flags |= BIO_ERROR;
251 if (bp->bio_error == 0)
255 if ((bp->bio_flags & BIO_ERROR) == 0)
258 bp->bio_resid = bp->bio_bcount;
260 bp_cb_fn(bp, status);
264 nvme_bio_child_inbed(struct bio *parent, int bio_error)
266 struct nvme_completion parent_cpl;
269 if (bio_error != 0) {
270 parent->bio_flags |= BIO_ERROR;
271 parent->bio_error = bio_error;
275 * atomic_fetchadd will return value before adding 1, so we still
276 * must add 1 to get the updated inbed number. Save bio_children
277 * before incrementing to guard against race conditions when
278 * two children bios complete on different queues.
280 children = atomic_load_acq_int(&parent->bio_children);
281 inbed = atomic_fetchadd_int(&parent->bio_inbed, 1) + 1;
282 if (inbed == children) {
283 bzero(&parent_cpl, sizeof(parent_cpl));
284 if (parent->bio_flags & BIO_ERROR)
285 parent_cpl.status.sc = NVME_SC_DATA_TRANSFER_ERROR;
286 nvme_ns_bio_done(parent, &parent_cpl);
291 nvme_bio_child_done(void *arg, const struct nvme_completion *cpl)
293 struct bio *child = arg;
297 parent = child->bio_parent;
298 g_destroy_bio(child);
299 bio_error = nvme_completion_is_error(cpl) ? EIO : 0;
300 nvme_bio_child_inbed(parent, bio_error);
304 nvme_get_num_segments(uint64_t addr, uint64_t size, uint32_t align)
306 uint32_t num_segs, offset, remainder;
311 KASSERT((align & (align - 1)) == 0, ("alignment not power of 2\n"));
313 num_segs = size / align;
314 remainder = size & (align - 1);
315 offset = addr & (align - 1);
316 if (remainder > 0 || offset > 0)
317 num_segs += 1 + (remainder + offset - 1) / align;
322 nvme_free_child_bios(int num_bios, struct bio **child_bios)
326 for (i = 0; i < num_bios; i++) {
327 if (child_bios[i] != NULL)
328 g_destroy_bio(child_bios[i]);
331 free(child_bios, M_NVME);
335 nvme_allocate_child_bios(int num_bios)
337 struct bio **child_bios;
340 child_bios = malloc(num_bios * sizeof(struct bio *), M_NVME, M_NOWAIT);
341 if (child_bios == NULL)
344 for (i = 0; i < num_bios; i++) {
345 child_bios[i] = g_new_bio();
346 if (child_bios[i] == NULL)
351 nvme_free_child_bios(num_bios, child_bios);
359 nvme_construct_child_bios(struct bio *bp, uint32_t alignment, int *num_bios)
361 struct bio **child_bios;
367 #ifdef NVME_UNMAPPED_BIO_SUPPORT
372 *num_bios = nvme_get_num_segments(bp->bio_offset, bp->bio_bcount,
374 child_bios = nvme_allocate_child_bios(*num_bios);
375 if (child_bios == NULL)
378 bp->bio_children = *num_bios;
380 cur_offset = bp->bio_offset;
381 rem_bcount = bp->bio_bcount;
383 #ifdef NVME_UNMAPPED_BIO_SUPPORT
384 ma_offset = bp->bio_ma_offset;
388 for (i = 0; i < *num_bios; i++) {
389 child = child_bios[i];
390 child->bio_parent = bp;
391 child->bio_cmd = bp->bio_cmd;
392 child->bio_offset = cur_offset;
393 child->bio_bcount = min(rem_bcount,
394 alignment - (cur_offset & (alignment - 1)));
395 child->bio_flags = bp->bio_flags;
396 #ifdef NVME_UNMAPPED_BIO_SUPPORT
397 if (bp->bio_flags & BIO_UNMAPPED) {
398 child->bio_ma_offset = ma_offset;
401 nvme_get_num_segments(child->bio_ma_offset,
402 child->bio_bcount, PAGE_SIZE);
403 ma_offset = (ma_offset + child->bio_bcount) &
405 ma += child->bio_ma_n;
411 child->bio_data = data;
412 data += child->bio_bcount;
414 cur_offset += child->bio_bcount;
415 rem_bcount -= child->bio_bcount;
422 nvme_ns_split_bio(struct nvme_namespace *ns, struct bio *bp,
426 struct bio **child_bios;
427 int err, i, num_bios;
429 child_bios = nvme_construct_child_bios(bp, alignment, &num_bios);
430 if (child_bios == NULL)
433 for (i = 0; i < num_bios; i++) {
434 child = child_bios[i];
435 err = nvme_ns_bio_process(ns, child, nvme_bio_child_done);
437 nvme_bio_child_inbed(bp, err);
438 g_destroy_bio(child);
442 free(child_bios, M_NVME);
447 nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp,
450 struct nvme_dsm_range *dsm_range;
454 bp->bio_driver1 = cb_fn;
456 if (ns->stripesize > 0 &&
457 (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
458 num_bios = nvme_get_num_segments(bp->bio_offset,
459 bp->bio_bcount, ns->stripesize);
461 return (nvme_ns_split_bio(ns, bp, ns->stripesize));
464 switch (bp->bio_cmd) {
466 err = nvme_ns_cmd_read_bio(ns, bp, nvme_ns_bio_done, bp);
469 err = nvme_ns_cmd_write_bio(ns, bp, nvme_ns_bio_done, bp);
472 err = nvme_ns_cmd_flush(ns, nvme_ns_bio_done, bp);
476 malloc(sizeof(struct nvme_dsm_range), M_NVME,
479 bp->bio_bcount/nvme_ns_get_sector_size(ns);
480 dsm_range->starting_lba =
481 bp->bio_offset/nvme_ns_get_sector_size(ns);
482 bp->bio_driver2 = dsm_range;
483 err = nvme_ns_cmd_deallocate(ns, dsm_range, 1,
484 nvme_ns_bio_done, bp);
486 free(dsm_range, M_NVME);
497 nvme_ns_construct(struct nvme_namespace *ns, uint16_t id,
498 struct nvme_controller *ctrlr)
500 struct nvme_completion_poll_status status;
507 if (pci_get_devid(ctrlr->dev) == 0x09538086 && ctrlr->cdata.vs[3] != 0)
509 (1 << ctrlr->cdata.vs[3]) * ctrlr->min_page_size;
512 * Namespaces are reconstructed after a controller reset, so check
513 * to make sure we only call mtx_init once on each mtx.
515 * TODO: Move this somewhere where it gets called at controller
516 * construction time, which is not invoked as part of each
519 if (!mtx_initialized(&ns->lock))
520 mtx_init(&ns->lock, "nvme ns lock", NULL, MTX_DEF);
523 nvme_ctrlr_cmd_identify_namespace(ctrlr, id, &ns->data,
524 nvme_completion_poll_cb, &status);
525 while (status.done == FALSE)
527 if (nvme_completion_is_error(&status.cpl)) {
528 nvme_printf(ctrlr, "nvme_identify_namespace failed\n");
533 * Note: format is a 0-based value, so > is appropriate here,
536 if (ns->data.flbas.format > ns->data.nlbaf) {
537 printf("lba format %d exceeds number supported (%d)\n",
538 ns->data.flbas.format, ns->data.nlbaf+1);
542 if (ctrlr->cdata.oncs.dsm)
543 ns->flags |= NVME_NS_DEALLOCATE_SUPPORTED;
545 if (ctrlr->cdata.vwc.present)
546 ns->flags |= NVME_NS_FLUSH_SUPPORTED;
549 * cdev may have already been created, if we are reconstructing the
550 * namespace after a controller-level reset.
552 if (ns->cdev != NULL)
556 * Namespace IDs start at 1, so we need to subtract 1 to create a
557 * correct unit number.
559 unit = device_get_unit(ctrlr->dev) * NVME_MAX_NAMESPACES + ns->id - 1;
562 * MAKEDEV_ETERNAL was added in r210923, for cdevs that will never
563 * be destroyed. This avoids refcounting on the cdev object.
564 * That should be OK case here, as long as we're not supporting PCIe
565 * surprise removal nor namespace deletion.
567 #ifdef MAKEDEV_ETERNAL_KLD
568 ns->cdev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &nvme_ns_cdevsw, unit,
569 NULL, UID_ROOT, GID_WHEEL, 0600, "nvme%dns%d",
570 device_get_unit(ctrlr->dev), ns->id);
572 ns->cdev = make_dev_credf(0, &nvme_ns_cdevsw, unit,
573 NULL, UID_ROOT, GID_WHEEL, 0600, "nvme%dns%d",
574 device_get_unit(ctrlr->dev), ns->id);
576 #ifdef NVME_UNMAPPED_BIO_SUPPORT
577 ns->cdev->si_flags |= SI_UNMAPPED;
580 if (ns->cdev != NULL)
581 ns->cdev->si_drv1 = ns;
586 void nvme_ns_destruct(struct nvme_namespace *ns)
589 if (ns->cdev != NULL)
590 destroy_dev(ns->cdev);