]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/nvme/nvme_ns.c
Remove the NVMe-specific physio and associated routines.
[FreeBSD/FreeBSD.git] / sys / dev / nvme / nvme_ns.c
1 /*-
2  * Copyright (C) 2012 Intel Corporation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/bio.h>
32 #include <sys/bus.h>
33 #include <sys/conf.h>
34 #include <sys/disk.h>
35 #include <sys/fcntl.h>
36 #include <sys/ioccom.h>
37 #include <sys/module.h>
38 #include <sys/proc.h>
39
40 #include <dev/pci/pcivar.h>
41
42 #include "nvme_private.h"
43
44 static int
45 nvme_ns_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
46     struct thread *td)
47 {
48         struct nvme_completion_poll_status      status;
49         struct nvme_namespace                   *ns;
50         struct nvme_controller                  *ctrlr;
51
52         ns = cdev->si_drv1;
53         ctrlr = ns->ctrlr;
54
55         switch (cmd) {
56         case NVME_IDENTIFY_NAMESPACE:
57 #ifdef CHATHAM2
58                 /*
59                  * Don't refresh data on Chatham, since Chatham returns
60                  *  garbage on IDENTIFY anyways.
61                  */
62                 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID) {
63                         memcpy(arg, &ns->data, sizeof(ns->data));
64                         break;
65                 }
66 #endif
67                 /* Refresh data before returning to user. */
68                 status.done = FALSE;
69                 nvme_ctrlr_cmd_identify_namespace(ctrlr, ns->id, &ns->data,
70                     nvme_completion_poll_cb, &status);
71                 while (status.done == FALSE)
72                         DELAY(5);
73                 if (nvme_completion_is_error(&status.cpl))
74                         return (ENXIO);
75                 memcpy(arg, &ns->data, sizeof(ns->data));
76                 break;
77         case NVME_IO_TEST:
78         case NVME_BIO_TEST:
79                 nvme_ns_test(ns, cmd, arg);
80                 break;
81         case DIOCGMEDIASIZE:
82                 *(off_t *)arg = (off_t)nvme_ns_get_size(ns);
83                 break;
84         case DIOCGSECTORSIZE:
85                 *(u_int *)arg = nvme_ns_get_sector_size(ns);
86                 break;
87         default:
88                 return (ENOTTY);
89         }
90
91         return (0);
92 }
93
94 static int
95 nvme_ns_open(struct cdev *dev __unused, int flags, int fmt __unused,
96     struct thread *td)
97 {
98         int error = 0;
99
100         if (flags & FWRITE)
101                 error = securelevel_gt(td->td_ucred, 0);
102
103         return (error);
104 }
105
106 static int
107 nvme_ns_close(struct cdev *dev __unused, int flags, int fmt __unused,
108     struct thread *td)
109 {
110
111         return (0);
112 }
113
114 static void
115 nvme_ns_strategy_done(void *arg, const struct nvme_completion *cpl)
116 {
117         struct bio *bp = arg;
118
119         /*
120          * TODO: add more extensive translation of NVMe status codes
121          *  to different bio error codes (i.e. EIO, EINVAL, etc.)
122          */
123         if (nvme_completion_is_error(cpl)) {
124                 bp->bio_error = EIO;
125                 bp->bio_flags |= BIO_ERROR;
126                 bp->bio_resid = bp->bio_bcount;
127         } else
128                 bp->bio_resid = 0;
129
130         biodone(bp);
131 }
132
133 static void
134 nvme_ns_strategy(struct bio *bp)
135 {
136         struct nvme_namespace   *ns;
137         int                     err;
138
139         ns = bp->bio_dev->si_drv1;
140         err = nvme_ns_bio_process(ns, bp, nvme_ns_strategy_done);
141
142         if (err) {
143                 bp->bio_error = err;
144                 bp->bio_flags |= BIO_ERROR;
145                 bp->bio_resid = bp->bio_bcount;
146                 biodone(bp);
147         }
148
149 }
150
151 static struct cdevsw nvme_ns_cdevsw = {
152         .d_version =    D_VERSION,
153 #ifdef NVME_UNMAPPED_BIO_SUPPORT
154         .d_flags =      D_DISK | D_UNMAPPED_IO,
155 #else
156         .d_flags =      D_DISK,
157 #endif
158         .d_read =       physread,
159         .d_write =      physwrite,
160         .d_open =       nvme_ns_open,
161         .d_close =      nvme_ns_close,
162         .d_strategy =   nvme_ns_strategy,
163         .d_ioctl =      nvme_ns_ioctl
164 };
165
166 uint32_t
167 nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns)
168 {
169         return ns->ctrlr->max_xfer_size;
170 }
171
172 uint32_t
173 nvme_ns_get_sector_size(struct nvme_namespace *ns)
174 {
175         return (1 << ns->data.lbaf[0].lbads);
176 }
177
178 uint64_t
179 nvme_ns_get_num_sectors(struct nvme_namespace *ns)
180 {
181         return (ns->data.nsze);
182 }
183
184 uint64_t
185 nvme_ns_get_size(struct nvme_namespace *ns)
186 {
187         return (nvme_ns_get_num_sectors(ns) * nvme_ns_get_sector_size(ns));
188 }
189
190 uint32_t
191 nvme_ns_get_flags(struct nvme_namespace *ns)
192 {
193         return (ns->flags);
194 }
195
196 const char *
197 nvme_ns_get_serial_number(struct nvme_namespace *ns)
198 {
199         return ((const char *)ns->ctrlr->cdata.sn);
200 }
201
202 const char *
203 nvme_ns_get_model_number(struct nvme_namespace *ns)
204 {
205         return ((const char *)ns->ctrlr->cdata.mn);
206 }
207
208 const struct nvme_namespace_data *
209 nvme_ns_get_data(struct nvme_namespace *ns)
210 {
211
212         return (&ns->data);
213 }
214
215 static void
216 nvme_ns_bio_done(void *arg, const struct nvme_completion *status)
217 {
218         struct bio      *bp = arg;
219         nvme_cb_fn_t    bp_cb_fn;
220
221         bp_cb_fn = bp->bio_driver1;
222
223         if (bp->bio_driver2)
224                 free(bp->bio_driver2, M_NVME);
225
226         bp_cb_fn(bp, status);
227 }
228
229 int
230 nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp,
231         nvme_cb_fn_t cb_fn)
232 {
233         struct nvme_dsm_range   *dsm_range;
234         int                     err;
235
236         bp->bio_driver1 = cb_fn;
237
238         switch (bp->bio_cmd) {
239         case BIO_READ:
240                 err = nvme_ns_cmd_read_bio(ns, bp, nvme_ns_bio_done, bp);
241                 break;
242         case BIO_WRITE:
243                 err = nvme_ns_cmd_write_bio(ns, bp, nvme_ns_bio_done, bp);
244                 break;
245         case BIO_FLUSH:
246                 err = nvme_ns_cmd_flush(ns, nvme_ns_bio_done, bp);
247                 break;
248         case BIO_DELETE:
249                 dsm_range =
250                     malloc(sizeof(struct nvme_dsm_range), M_NVME,
251                     M_ZERO | M_WAITOK);
252                 dsm_range->length =
253                     bp->bio_bcount/nvme_ns_get_sector_size(ns);
254                 dsm_range->starting_lba =
255                     bp->bio_offset/nvme_ns_get_sector_size(ns);
256                 bp->bio_driver2 = dsm_range;
257                 err = nvme_ns_cmd_deallocate(ns, dsm_range, 1,
258                         nvme_ns_bio_done, bp);
259                 if (err != 0)
260                         free(dsm_range, M_NVME);
261                 break;
262         default:
263                 err = EIO;
264                 break;
265         }
266
267         return (err);
268 }
269
270 #ifdef CHATHAM2
271 static void
272 nvme_ns_populate_chatham_data(struct nvme_namespace *ns)
273 {
274         struct nvme_controller          *ctrlr;
275         struct nvme_namespace_data      *nsdata;
276
277         ctrlr = ns->ctrlr;
278         nsdata = &ns->data;
279
280         nsdata->nsze = ctrlr->chatham_lbas;
281         nsdata->ncap = ctrlr->chatham_lbas;
282         nsdata->nuse = ctrlr->chatham_lbas;
283
284         /* Chatham2 doesn't support thin provisioning. */
285         nsdata->nsfeat.thin_prov = 0;
286
287         /* Set LBA size to 512 bytes. */
288         nsdata->lbaf[0].lbads = 9;
289 }
290 #endif /* CHATHAM2 */
291
292 int
293 nvme_ns_construct(struct nvme_namespace *ns, uint16_t id,
294     struct nvme_controller *ctrlr)
295 {
296         struct nvme_completion_poll_status      status;
297
298         ns->ctrlr = ctrlr;
299         ns->id = id;
300
301         /*
302          * Namespaces are reconstructed after a controller reset, so check
303          *  to make sure we only call mtx_init once on each mtx.
304          *
305          * TODO: Move this somewhere where it gets called at controller
306          *  construction time, which is not invoked as part of each
307          *  controller reset.
308          */
309         if (!mtx_initialized(&ns->lock))
310                 mtx_init(&ns->lock, "nvme ns lock", NULL, MTX_DEF);
311
312 #ifdef CHATHAM2
313         if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
314                 nvme_ns_populate_chatham_data(ns);
315         else {
316 #endif
317                 status.done = FALSE;
318                 nvme_ctrlr_cmd_identify_namespace(ctrlr, id, &ns->data,
319                     nvme_completion_poll_cb, &status);
320                 while (status.done == FALSE)
321                         DELAY(5);
322                 if (nvme_completion_is_error(&status.cpl)) {
323                         nvme_printf(ctrlr, "nvme_identify_namespace failed\n");
324                         return (ENXIO);
325                 }
326 #ifdef CHATHAM2
327         }
328 #endif
329
330         if (ctrlr->cdata.oncs.dsm)
331                 ns->flags |= NVME_NS_DEALLOCATE_SUPPORTED;
332
333         if (ctrlr->cdata.vwc.present)
334                 ns->flags |= NVME_NS_FLUSH_SUPPORTED;
335
336         /*
337          * cdev may have already been created, if we are reconstructing the
338          *  namespace after a controller-level reset.
339          */
340         if (ns->cdev != NULL)
341                 return (0);
342
343 /*
344  * MAKEDEV_ETERNAL was added in r210923, for cdevs that will never
345  *  be destroyed.  This avoids refcounting on the cdev object.
346  *  That should be OK case here, as long as we're not supporting PCIe
347  *  surprise removal nor namespace deletion.
348  */
349 #ifdef MAKEDEV_ETERNAL_KLD
350         ns->cdev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &nvme_ns_cdevsw, 0,
351             NULL, UID_ROOT, GID_WHEEL, 0600, "nvme%dns%d",
352             device_get_unit(ctrlr->dev), ns->id);
353 #else
354         ns->cdev = make_dev_credf(0, &nvme_ns_cdevsw, 0,
355             NULL, UID_ROOT, GID_WHEEL, 0600, "nvme%dns%d",
356             device_get_unit(ctrlr->dev), ns->id);
357 #endif
358
359         if (ns->cdev != NULL)
360                 ns->cdev->si_drv1 = ns;
361
362         return (0);
363 }
364
365 void nvme_ns_destruct(struct nvme_namespace *ns)
366 {
367
368         if (ns->cdev != NULL)
369                 destroy_dev(ns->cdev);
370 }