2 * Copyright (C) 2012-2013 Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
35 #include <sys/fcntl.h>
36 #include <sys/ioccom.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
40 #include <sys/systm.h>
42 #include <dev/pci/pcivar.h>
44 #include <geom/geom.h>
46 #include "nvme_private.h"
48 static void nvme_bio_child_inbed(struct bio *parent, int bio_error);
49 static void nvme_bio_child_done(void *arg,
50 const struct nvme_completion *cpl);
51 static uint32_t nvme_get_num_segments(uint64_t addr, uint64_t size,
53 static void nvme_free_child_bios(int num_bios,
54 struct bio **child_bios);
55 static struct bio ** nvme_allocate_child_bios(int num_bios);
56 static struct bio ** nvme_construct_child_bios(struct bio *bp,
59 static int nvme_ns_split_bio(struct nvme_namespace *ns,
64 nvme_ns_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
67 struct nvme_namespace *ns;
68 struct nvme_controller *ctrlr;
69 struct nvme_pt_command *pt;
77 nvme_ns_test(ns, cmd, arg);
79 case NVME_PASSTHROUGH_CMD:
80 pt = (struct nvme_pt_command *)arg;
81 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, ns->id,
82 1 /* is_user_buffer */, 0 /* is_admin_cmd */));
84 *(off_t *)arg = (off_t)nvme_ns_get_size(ns);
87 *(u_int *)arg = nvme_ns_get_sector_size(ns);
97 nvme_ns_open(struct cdev *dev __unused, int flags, int fmt __unused,
103 error = securelevel_gt(td->td_ucred, 0);
109 nvme_ns_close(struct cdev *dev __unused, int flags, int fmt __unused,
117 nvme_ns_strategy_done(void *arg, const struct nvme_completion *cpl)
119 struct bio *bp = arg;
122 * TODO: add more extensive translation of NVMe status codes
123 * to different bio error codes (i.e. EIO, EINVAL, etc.)
125 if (nvme_completion_is_error(cpl)) {
127 bp->bio_flags |= BIO_ERROR;
128 bp->bio_resid = bp->bio_bcount;
136 nvme_ns_strategy(struct bio *bp)
138 struct nvme_namespace *ns;
141 ns = bp->bio_dev->si_drv1;
142 err = nvme_ns_bio_process(ns, bp, nvme_ns_strategy_done);
146 bp->bio_flags |= BIO_ERROR;
147 bp->bio_resid = bp->bio_bcount;
153 static struct cdevsw nvme_ns_cdevsw = {
154 .d_version = D_VERSION,
157 .d_write = physwrite,
158 .d_open = nvme_ns_open,
159 .d_close = nvme_ns_close,
160 .d_strategy = nvme_ns_strategy,
161 .d_ioctl = nvme_ns_ioctl
165 nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns)
167 return ns->ctrlr->max_xfer_size;
171 nvme_ns_get_sector_size(struct nvme_namespace *ns)
173 return (1 << ns->data.lbaf[ns->data.flbas.format].lbads);
177 nvme_ns_get_num_sectors(struct nvme_namespace *ns)
179 return (ns->data.nsze);
183 nvme_ns_get_size(struct nvme_namespace *ns)
185 return (nvme_ns_get_num_sectors(ns) * nvme_ns_get_sector_size(ns));
189 nvme_ns_get_flags(struct nvme_namespace *ns)
195 nvme_ns_get_serial_number(struct nvme_namespace *ns)
197 return ((const char *)ns->ctrlr->cdata.sn);
201 nvme_ns_get_model_number(struct nvme_namespace *ns)
203 return ((const char *)ns->ctrlr->cdata.mn);
206 const struct nvme_namespace_data *
207 nvme_ns_get_data(struct nvme_namespace *ns)
214 nvme_ns_bio_done(void *arg, const struct nvme_completion *status)
216 struct bio *bp = arg;
217 nvme_cb_fn_t bp_cb_fn;
219 bp_cb_fn = bp->bio_driver1;
222 free(bp->bio_driver2, M_NVME);
224 if (nvme_completion_is_error(status)) {
225 bp->bio_flags |= BIO_ERROR;
226 if (bp->bio_error == 0)
230 if ((bp->bio_flags & BIO_ERROR) == 0)
233 bp->bio_resid = bp->bio_bcount;
235 bp_cb_fn(bp, status);
239 nvme_bio_child_inbed(struct bio *parent, int bio_error)
241 struct nvme_completion parent_cpl;
244 if (bio_error != 0) {
245 parent->bio_flags |= BIO_ERROR;
246 parent->bio_error = bio_error;
250 * atomic_fetchadd will return value before adding 1, so we still
251 * must add 1 to get the updated inbed number.
253 inbed = atomic_fetchadd_int(&parent->bio_inbed, 1) + 1;
254 if (inbed == parent->bio_children) {
255 bzero(&parent_cpl, sizeof(parent_cpl));
256 if (parent->bio_flags & BIO_ERROR)
257 parent_cpl.status.sc = NVME_SC_DATA_TRANSFER_ERROR;
258 nvme_ns_bio_done(parent, &parent_cpl);
263 nvme_bio_child_done(void *arg, const struct nvme_completion *cpl)
265 struct bio *child = arg;
269 parent = child->bio_parent;
270 g_destroy_bio(child);
271 bio_error = nvme_completion_is_error(cpl) ? EIO : 0;
272 nvme_bio_child_inbed(parent, bio_error);
276 nvme_get_num_segments(uint64_t addr, uint64_t size, uint32_t align)
278 uint32_t num_segs, offset, remainder;
283 KASSERT((align & (align - 1)) == 0, ("alignment not power of 2\n"));
285 num_segs = size / align;
286 remainder = size & (align - 1);
287 offset = addr & (align - 1);
288 if (remainder > 0 || offset > 0)
289 num_segs += 1 + (remainder + offset - 1) / align;
294 nvme_free_child_bios(int num_bios, struct bio **child_bios)
298 for (i = 0; i < num_bios; i++) {
299 if (child_bios[i] != NULL)
300 g_destroy_bio(child_bios[i]);
303 free(child_bios, M_NVME);
307 nvme_allocate_child_bios(int num_bios)
309 struct bio **child_bios;
312 child_bios = malloc(num_bios * sizeof(struct bio *), M_NVME, M_NOWAIT);
313 if (child_bios == NULL)
316 for (i = 0; i < num_bios; i++) {
317 child_bios[i] = g_new_bio();
318 if (child_bios[i] == NULL)
323 nvme_free_child_bios(num_bios, child_bios);
331 nvme_construct_child_bios(struct bio *bp, uint32_t alignment, int *num_bios)
333 struct bio **child_bios;
339 #ifdef NVME_UNMAPPED_BIO_SUPPORT
344 *num_bios = nvme_get_num_segments(bp->bio_offset, bp->bio_bcount,
346 child_bios = nvme_allocate_child_bios(*num_bios);
347 if (child_bios == NULL)
350 bp->bio_children = *num_bios;
352 cur_offset = bp->bio_offset;
353 rem_bcount = bp->bio_bcount;
355 #ifdef NVME_UNMAPPED_BIO_SUPPORT
356 ma_offset = bp->bio_ma_offset;
360 for (i = 0; i < *num_bios; i++) {
361 child = child_bios[i];
362 child->bio_parent = bp;
363 child->bio_cmd = bp->bio_cmd;
364 child->bio_offset = cur_offset;
365 child->bio_bcount = min(rem_bcount,
366 alignment - (cur_offset & (alignment - 1)));
367 child->bio_flags = bp->bio_flags;
368 #ifdef NVME_UNMAPPED_BIO_SUPPORT
369 if (bp->bio_flags & BIO_UNMAPPED) {
370 child->bio_ma_offset = ma_offset;
373 nvme_get_num_segments(child->bio_ma_offset,
374 child->bio_bcount, PAGE_SIZE);
375 ma_offset = (ma_offset + child->bio_bcount) &
377 ma += child->bio_ma_n;
383 child->bio_data = data;
384 data += child->bio_bcount;
386 cur_offset += child->bio_bcount;
387 rem_bcount -= child->bio_bcount;
394 nvme_ns_split_bio(struct nvme_namespace *ns, struct bio *bp,
398 struct bio **child_bios;
399 int err, i, num_bios;
401 child_bios = nvme_construct_child_bios(bp, alignment, &num_bios);
402 if (child_bios == NULL)
405 for (i = 0; i < num_bios; i++) {
406 child = child_bios[i];
407 err = nvme_ns_bio_process(ns, child, nvme_bio_child_done);
409 nvme_bio_child_inbed(bp, err);
410 g_destroy_bio(child);
414 free(child_bios, M_NVME);
419 nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp,
422 struct nvme_dsm_range *dsm_range;
426 bp->bio_driver1 = cb_fn;
428 if (ns->stripesize > 0 &&
429 (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) {
430 num_bios = nvme_get_num_segments(bp->bio_offset,
431 bp->bio_bcount, ns->stripesize);
433 return (nvme_ns_split_bio(ns, bp, ns->stripesize));
436 switch (bp->bio_cmd) {
438 err = nvme_ns_cmd_read_bio(ns, bp, nvme_ns_bio_done, bp);
441 err = nvme_ns_cmd_write_bio(ns, bp, nvme_ns_bio_done, bp);
444 err = nvme_ns_cmd_flush(ns, nvme_ns_bio_done, bp);
448 malloc(sizeof(struct nvme_dsm_range), M_NVME,
451 bp->bio_bcount/nvme_ns_get_sector_size(ns);
452 dsm_range->starting_lba =
453 bp->bio_offset/nvme_ns_get_sector_size(ns);
454 bp->bio_driver2 = dsm_range;
455 err = nvme_ns_cmd_deallocate(ns, dsm_range, 1,
456 nvme_ns_bio_done, bp);
458 free(dsm_range, M_NVME);
470 nvme_ns_populate_chatham_data(struct nvme_namespace *ns)
472 struct nvme_controller *ctrlr;
473 struct nvme_namespace_data *nsdata;
478 nsdata->nsze = ctrlr->chatham_lbas;
479 nsdata->ncap = ctrlr->chatham_lbas;
480 nsdata->nuse = ctrlr->chatham_lbas;
482 /* Chatham2 doesn't support thin provisioning. */
483 nsdata->nsfeat.thin_prov = 0;
485 /* Set LBA size to 512 bytes. */
486 nsdata->lbaf[0].lbads = 9;
488 #endif /* CHATHAM2 */
491 nvme_ns_construct(struct nvme_namespace *ns, uint16_t id,
492 struct nvme_controller *ctrlr)
494 struct nvme_completion_poll_status status;
501 if (pci_get_devid(ctrlr->dev) == 0x09538086 && ctrlr->cdata.vs[3] != 0)
503 (1 << ctrlr->cdata.vs[3]) * ctrlr->min_page_size;
506 * Namespaces are reconstructed after a controller reset, so check
507 * to make sure we only call mtx_init once on each mtx.
509 * TODO: Move this somewhere where it gets called at controller
510 * construction time, which is not invoked as part of each
513 if (!mtx_initialized(&ns->lock))
514 mtx_init(&ns->lock, "nvme ns lock", NULL, MTX_DEF);
517 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
518 nvme_ns_populate_chatham_data(ns);
522 nvme_ctrlr_cmd_identify_namespace(ctrlr, id, &ns->data,
523 nvme_completion_poll_cb, &status);
524 while (status.done == FALSE)
526 if (nvme_completion_is_error(&status.cpl)) {
527 nvme_printf(ctrlr, "nvme_identify_namespace failed\n");
535 * Note: format is a 0-based value, so > is appropriate here,
538 if (ns->data.flbas.format > ns->data.nlbaf) {
539 printf("lba format %d exceeds number supported (%d)\n",
540 ns->data.flbas.format, ns->data.nlbaf+1);
544 if (ctrlr->cdata.oncs.dsm)
545 ns->flags |= NVME_NS_DEALLOCATE_SUPPORTED;
547 if (ctrlr->cdata.vwc.present)
548 ns->flags |= NVME_NS_FLUSH_SUPPORTED;
551 * cdev may have already been created, if we are reconstructing the
552 * namespace after a controller-level reset.
554 if (ns->cdev != NULL)
558 * Namespace IDs start at 1, so we need to subtract 1 to create a
559 * correct unit number.
561 unit = device_get_unit(ctrlr->dev) * NVME_MAX_NAMESPACES + ns->id - 1;
564 * MAKEDEV_ETERNAL was added in r210923, for cdevs that will never
565 * be destroyed. This avoids refcounting on the cdev object.
566 * That should be OK case here, as long as we're not supporting PCIe
567 * surprise removal nor namespace deletion.
569 #ifdef MAKEDEV_ETERNAL_KLD
570 ns->cdev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &nvme_ns_cdevsw, unit,
571 NULL, UID_ROOT, GID_WHEEL, 0600, "nvme%dns%d",
572 device_get_unit(ctrlr->dev), ns->id);
574 ns->cdev = make_dev_credf(0, &nvme_ns_cdevsw, unit,
575 NULL, UID_ROOT, GID_WHEEL, 0600, "nvme%dns%d",
576 device_get_unit(ctrlr->dev), ns->id);
578 #ifdef NVME_UNMAPPED_BIO_SUPPORT
579 ns->cdev->si_flags |= SI_UNMAPPED;
582 if (ns->cdev != NULL)
583 ns->cdev->si_drv1 = ns;
588 void nvme_ns_destruct(struct nvme_namespace *ns)
591 if (ns->cdev != NULL)
592 destroy_dev(ns->cdev);