]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/nvme/nvme_ns.c
Remove obsolete comment. This code has now been tested with the QEMU
[FreeBSD/FreeBSD.git] / sys / dev / nvme / nvme_ns.c
1 /*-
2  * Copyright (C) 2012 Intel Corporation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/bio.h>
32 #include <sys/bus.h>
33 #include <sys/conf.h>
34 #include <sys/disk.h>
35 #include <sys/fcntl.h>
36 #include <sys/ioccom.h>
37 #include <sys/module.h>
38 #include <sys/proc.h>
39
40 #include <dev/pci/pcivar.h>
41
42 #include "nvme_private.h"
43
44 static int
45 nvme_ns_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
46     struct thread *td)
47 {
48         struct nvme_completion_poll_status      status;
49         struct nvme_namespace                   *ns;
50         struct nvme_controller                  *ctrlr;
51
52         ns = cdev->si_drv1;
53         ctrlr = ns->ctrlr;
54
55         switch (cmd) {
56         case NVME_IDENTIFY_NAMESPACE:
57 #ifdef CHATHAM2
58                 /*
59                  * Don't refresh data on Chatham, since Chatham returns
60                  *  garbage on IDENTIFY anyways.
61                  */
62                 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID) {
63                         memcpy(arg, &ns->data, sizeof(ns->data));
64                         break;
65                 }
66 #endif
67                 /* Refresh data before returning to user. */
68                 status.done = FALSE;
69                 nvme_ctrlr_cmd_identify_namespace(ctrlr, ns->id, &ns->data,
70                     nvme_completion_poll_cb, &status);
71                 while (status.done == FALSE)
72                         DELAY(5);
73                 if (nvme_completion_is_error(&status.cpl))
74                         return (ENXIO);
75                 memcpy(arg, &ns->data, sizeof(ns->data));
76                 break;
77         case NVME_IO_TEST:
78         case NVME_BIO_TEST:
79                 nvme_ns_test(ns, cmd, arg);
80                 break;
81         case DIOCGMEDIASIZE:
82                 *(off_t *)arg = (off_t)nvme_ns_get_size(ns);
83                 break;
84         case DIOCGSECTORSIZE:
85                 *(u_int *)arg = nvme_ns_get_sector_size(ns);
86                 break;
87         default:
88                 return (ENOTTY);
89         }
90
91         return (0);
92 }
93
94 static int
95 nvme_ns_open(struct cdev *dev __unused, int flags, int fmt __unused,
96     struct thread *td)
97 {
98         int error = 0;
99
100         if (flags & FWRITE)
101                 error = securelevel_gt(td->td_ucred, 0);
102
103         return (error);
104 }
105
106 static int
107 nvme_ns_close(struct cdev *dev __unused, int flags, int fmt __unused,
108     struct thread *td)
109 {
110
111         return (0);
112 }
113
114 static void
115 nvme_ns_strategy_done(void *arg, const struct nvme_completion *cpl)
116 {
117         struct bio *bp = arg;
118
119         /*
120          * TODO: add more extensive translation of NVMe status codes
121          *  to different bio error codes (i.e. EIO, EINVAL, etc.)
122          */
123         if (nvme_completion_is_error(cpl)) {
124                 bp->bio_error = EIO;
125                 bp->bio_flags |= BIO_ERROR;
126                 bp->bio_resid = bp->bio_bcount;
127         } else
128                 bp->bio_resid = 0;
129
130         biodone(bp);
131 }
132
133 static void
134 nvme_ns_strategy(struct bio *bp)
135 {
136         struct nvme_namespace   *ns;
137         int                     err;
138
139         ns = bp->bio_dev->si_drv1;
140         err = nvme_ns_bio_process(ns, bp, nvme_ns_strategy_done);
141
142         if (err) {
143                 bp->bio_error = err;
144                 bp->bio_flags |= BIO_ERROR;
145                 bp->bio_resid = bp->bio_bcount;
146                 biodone(bp);
147         }
148
149 }
150
151 static struct cdevsw nvme_ns_cdevsw = {
152         .d_version =    D_VERSION,
153         .d_flags =      D_DISK,
154         .d_open =       nvme_ns_open,
155         .d_close =      nvme_ns_close,
156         .d_read =       nvme_ns_physio,
157         .d_write =      nvme_ns_physio,
158         .d_strategy =   nvme_ns_strategy,
159         .d_ioctl =      nvme_ns_ioctl
160 };
161
162 uint32_t
163 nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns)
164 {
165         return ns->ctrlr->max_xfer_size;
166 }
167
168 uint32_t
169 nvme_ns_get_sector_size(struct nvme_namespace *ns)
170 {
171         return (1 << ns->data.lbaf[0].lbads);
172 }
173
174 uint64_t
175 nvme_ns_get_num_sectors(struct nvme_namespace *ns)
176 {
177         return (ns->data.nsze);
178 }
179
180 uint64_t
181 nvme_ns_get_size(struct nvme_namespace *ns)
182 {
183         return (nvme_ns_get_num_sectors(ns) * nvme_ns_get_sector_size(ns));
184 }
185
186 uint32_t
187 nvme_ns_get_flags(struct nvme_namespace *ns)
188 {
189         return (ns->flags);
190 }
191
192 const char *
193 nvme_ns_get_serial_number(struct nvme_namespace *ns)
194 {
195         return ((const char *)ns->ctrlr->cdata.sn);
196 }
197
198 const char *
199 nvme_ns_get_model_number(struct nvme_namespace *ns)
200 {
201         return ((const char *)ns->ctrlr->cdata.mn);
202 }
203
204 const struct nvme_namespace_data *
205 nvme_ns_get_data(struct nvme_namespace *ns)
206 {
207
208         return (&ns->data);
209 }
210
211 static void
212 nvme_ns_bio_done(void *arg, const struct nvme_completion *status)
213 {
214         struct bio      *bp = arg;
215         nvme_cb_fn_t    bp_cb_fn;
216
217         bp_cb_fn = bp->bio_driver1;
218
219         if (bp->bio_driver2)
220                 free(bp->bio_driver2, M_NVME);
221
222         bp_cb_fn(bp, status);
223 }
224
225 int
226 nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp,
227         nvme_cb_fn_t cb_fn)
228 {
229         struct nvme_dsm_range   *dsm_range;
230         int                     err;
231
232         bp->bio_driver1 = cb_fn;
233
234         switch (bp->bio_cmd) {
235         case BIO_READ:
236                 err = nvme_ns_cmd_read(ns, bp->bio_data,
237                         bp->bio_offset/nvme_ns_get_sector_size(ns),
238                         bp->bio_bcount/nvme_ns_get_sector_size(ns),
239                         nvme_ns_bio_done, bp);
240                 break;
241         case BIO_WRITE:
242                 err = nvme_ns_cmd_write(ns, bp->bio_data,
243                         bp->bio_offset/nvme_ns_get_sector_size(ns),
244                         bp->bio_bcount/nvme_ns_get_sector_size(ns),
245                         nvme_ns_bio_done, bp);
246                 break;
247         case BIO_FLUSH:
248                 err = nvme_ns_cmd_flush(ns, nvme_ns_bio_done, bp);
249                 break;
250         case BIO_DELETE:
251                 dsm_range =
252                     malloc(sizeof(struct nvme_dsm_range), M_NVME,
253                     M_ZERO | M_WAITOK);
254                 dsm_range->length =
255                     bp->bio_bcount/nvme_ns_get_sector_size(ns);
256                 dsm_range->starting_lba =
257                     bp->bio_offset/nvme_ns_get_sector_size(ns);
258                 bp->bio_driver2 = dsm_range;
259                 err = nvme_ns_cmd_deallocate(ns, dsm_range, 1,
260                         nvme_ns_bio_done, bp);
261                 if (err != 0)
262                         free(dsm_range, M_NVME);
263                 break;
264         default:
265                 err = EIO;
266                 break;
267         }
268
269         return (err);
270 }
271
272 #ifdef CHATHAM2
273 static void
274 nvme_ns_populate_chatham_data(struct nvme_namespace *ns)
275 {
276         struct nvme_controller          *ctrlr;
277         struct nvme_namespace_data      *nsdata;
278
279         ctrlr = ns->ctrlr;
280         nsdata = &ns->data;
281
282         nsdata->nsze = ctrlr->chatham_lbas;
283         nsdata->ncap = ctrlr->chatham_lbas;
284         nsdata->nuse = ctrlr->chatham_lbas;
285
286         /* Chatham2 doesn't support thin provisioning. */
287         nsdata->nsfeat.thin_prov = 0;
288
289         /* Set LBA size to 512 bytes. */
290         nsdata->lbaf[0].lbads = 9;
291 }
292 #endif /* CHATHAM2 */
293
294 int
295 nvme_ns_construct(struct nvme_namespace *ns, uint16_t id,
296     struct nvme_controller *ctrlr)
297 {
298         struct nvme_completion_poll_status      status;
299
300         ns->ctrlr = ctrlr;
301         ns->id = id;
302
303 #ifdef CHATHAM2
304         if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
305                 nvme_ns_populate_chatham_data(ns);
306         else {
307 #endif
308                 status.done = FALSE;
309                 nvme_ctrlr_cmd_identify_namespace(ctrlr, id, &ns->data,
310                     nvme_completion_poll_cb, &status);
311                 while (status.done == FALSE)
312                         DELAY(5);
313                 if (nvme_completion_is_error(&status.cpl)) {
314                         nvme_printf(ctrlr, "nvme_identify_namespace failed\n");
315                         return (ENXIO);
316                 }
317 #ifdef CHATHAM2
318         }
319 #endif
320
321         if (ctrlr->cdata.oncs.dsm)
322                 ns->flags |= NVME_NS_DEALLOCATE_SUPPORTED;
323
324         if (ctrlr->cdata.vwc.present)
325                 ns->flags |= NVME_NS_FLUSH_SUPPORTED;
326
327         /*
328          * cdev may have already been created, if we are reconstructing the
329          *  namespace after a controller-level reset.
330          */
331         if (ns->cdev != NULL)
332                 return (0);
333
334 /*
335  * MAKEDEV_ETERNAL was added in r210923, for cdevs that will never
336  *  be destroyed.  This avoids refcounting on the cdev object.
337  *  That should be OK case here, as long as we're not supporting PCIe
338  *  surprise removal nor namespace deletion.
339  */
340 #ifdef MAKEDEV_ETERNAL_KLD
341         ns->cdev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &nvme_ns_cdevsw, 0,
342             NULL, UID_ROOT, GID_WHEEL, 0600, "nvme%dns%d",
343             device_get_unit(ctrlr->dev), ns->id);
344 #else
345         ns->cdev = make_dev_credf(0, &nvme_ns_cdevsw, 0,
346             NULL, UID_ROOT, GID_WHEEL, 0600, "nvme%dns%d",
347             device_get_unit(ctrlr->dev), ns->id);
348 #endif
349
350         if (ns->cdev != NULL)
351                 ns->cdev->si_drv1 = ns;
352
353         return (0);
354 }
355
356 void nvme_ns_destruct(struct nvme_namespace *ns)
357 {
358
359         if (ns->cdev != NULL)
360                 destroy_dev(ns->cdev);
361 }