]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/nvme/nvme.c
special-case getvfsbyname(3) for fusefs(5)
[FreeBSD/FreeBSD.git] / sys / dev / nvme / nvme.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2012-2014 Intel Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/conf.h>
35 #include <sys/module.h>
36
37 #include <vm/uma.h>
38
39 #include <dev/pci/pcireg.h>
40 #include <dev/pci/pcivar.h>
41
42 #include "nvme_private.h"
43
44 struct nvme_consumer {
45         uint32_t                id;
46         nvme_cons_ns_fn_t       ns_fn;
47         nvme_cons_ctrlr_fn_t    ctrlr_fn;
48         nvme_cons_async_fn_t    async_fn;
49         nvme_cons_fail_fn_t     fail_fn;
50 };
51
52 struct nvme_consumer nvme_consumer[NVME_MAX_CONSUMERS];
53 #define INVALID_CONSUMER_ID     0xFFFF
54
55 uma_zone_t      nvme_request_zone;
56 int32_t         nvme_retry_count;
57
58
59 MALLOC_DEFINE(M_NVME, "nvme", "nvme(4) memory allocations");
60
61 static int    nvme_probe(device_t);
62 static int    nvme_attach(device_t);
63 static int    nvme_detach(device_t);
64 static int    nvme_shutdown(device_t);
65
66 static devclass_t nvme_devclass;
67
68 static device_method_t nvme_pci_methods[] = {
69         /* Device interface */
70         DEVMETHOD(device_probe,     nvme_probe),
71         DEVMETHOD(device_attach,    nvme_attach),
72         DEVMETHOD(device_detach,    nvme_detach),
73         DEVMETHOD(device_shutdown,  nvme_shutdown),
74         { 0, 0 }
75 };
76
77 static driver_t nvme_pci_driver = {
78         "nvme",
79         nvme_pci_methods,
80         sizeof(struct nvme_controller),
81 };
82
83 DRIVER_MODULE(nvme, pci, nvme_pci_driver, nvme_devclass, NULL, NULL);
84 MODULE_VERSION(nvme, 1);
85 MODULE_DEPEND(nvme, cam, 1, 1, 1);
86
87 static struct _pcsid
88 {
89         uint32_t        devid;
90         int             match_subdevice;
91         uint16_t        subdevice;
92         const char      *desc;
93         uint32_t        quirks;
94 } pci_ids[] = {
95         { 0x01118086,           0, 0, "NVMe Controller"  },
96         { IDT32_PCI_ID,         0, 0, "IDT NVMe Controller (32 channel)"  },
97         { IDT8_PCI_ID,          0, 0, "IDT NVMe Controller (8 channel)" },
98         { 0x09538086,           1, 0x3702, "DC P3700 SSD" },
99         { 0x09538086,           1, 0x3703, "DC P3700 SSD [2.5\" SFF]" },
100         { 0x09538086,           1, 0x3704, "DC P3500 SSD [Add-in Card]" },
101         { 0x09538086,           1, 0x3705, "DC P3500 SSD [2.5\" SFF]" },
102         { 0x09538086,           1, 0x3709, "DC P3600 SSD [Add-in Card]" },
103         { 0x09538086,           1, 0x370a, "DC P3600 SSD [2.5\" SFF]" },
104         { 0x00031c58,           0, 0, "HGST SN100",     QUIRK_DELAY_B4_CHK_RDY },
105         { 0x00231c58,           0, 0, "WDC SN200",      QUIRK_DELAY_B4_CHK_RDY },
106         { 0x05401c5f,           0, 0, "Memblaze Pblaze4", QUIRK_DELAY_B4_CHK_RDY },
107         { 0xa821144d,           0, 0, "Samsung PM1725", QUIRK_DELAY_B4_CHK_RDY },
108         { 0xa822144d,           0, 0, "Samsung PM1725a", QUIRK_DELAY_B4_CHK_RDY },
109         { 0x01161179,           0, 0, "Toshiba XG5", QUIRK_DISABLE_TIMEOUT },
110         { 0x00000000,           0, 0, NULL  }
111 };
112
113 static int
114 nvme_match(uint32_t devid, uint16_t subdevice, struct _pcsid *ep)
115 {
116         if (devid != ep->devid)
117                 return 0;
118
119         if (!ep->match_subdevice)
120                 return 1;
121
122         if (subdevice == ep->subdevice)
123                 return 1;
124         else
125                 return 0;
126 }
127
128 static int
129 nvme_probe (device_t device)
130 {
131         struct _pcsid   *ep;
132         uint32_t        devid;
133         uint16_t        subdevice;
134
135         devid = pci_get_devid(device);
136         subdevice = pci_get_subdevice(device);
137         ep = pci_ids;
138
139         while (ep->devid) {
140                 if (nvme_match(devid, subdevice, ep))
141                         break;
142                 ++ep;
143         }
144
145         if (ep->desc) {
146                 device_set_desc(device, ep->desc);
147                 return (BUS_PROBE_DEFAULT);
148         }
149
150 #if defined(PCIS_STORAGE_NVM)
151         if (pci_get_class(device)    == PCIC_STORAGE &&
152             pci_get_subclass(device) == PCIS_STORAGE_NVM &&
153             pci_get_progif(device)   == PCIP_STORAGE_NVM_ENTERPRISE_NVMHCI_1_0) {
154                 device_set_desc(device, "Generic NVMe Device");
155                 return (BUS_PROBE_GENERIC);
156         }
157 #endif
158
159         return (ENXIO);
160 }
161
162 static void
163 nvme_init(void)
164 {
165         uint32_t        i;
166
167         nvme_request_zone = uma_zcreate("nvme_request",
168             sizeof(struct nvme_request), NULL, NULL, NULL, NULL, 0, 0);
169
170         for (i = 0; i < NVME_MAX_CONSUMERS; i++)
171                 nvme_consumer[i].id = INVALID_CONSUMER_ID;
172 }
173
174 SYSINIT(nvme_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_init, NULL);
175
176 static void
177 nvme_uninit(void)
178 {
179         uma_zdestroy(nvme_request_zone);
180 }
181
182 SYSUNINIT(nvme_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_uninit, NULL);
183
184 static int
185 nvme_shutdown(device_t dev)
186 {
187         struct nvme_controller  *ctrlr;
188
189         ctrlr = DEVICE2SOFTC(dev);
190         nvme_ctrlr_shutdown(ctrlr);
191
192         return (0);
193 }
194
195 void
196 nvme_dump_command(struct nvme_command *cmd)
197 {
198
199         printf(
200 "opc:%x f:%x cid:%x nsid:%x r2:%x r3:%x mptr:%jx prp1:%jx prp2:%jx cdw:%x %x %x %x %x %x\n",
201             cmd->opc, cmd->fuse, cmd->cid, le32toh(cmd->nsid),
202             cmd->rsvd2, cmd->rsvd3,
203             (uintmax_t)le64toh(cmd->mptr), (uintmax_t)le64toh(cmd->prp1), (uintmax_t)le64toh(cmd->prp2),
204             le32toh(cmd->cdw10), le32toh(cmd->cdw11), le32toh(cmd->cdw12),
205             le32toh(cmd->cdw13), le32toh(cmd->cdw14), le32toh(cmd->cdw15));
206 }
207
208 void
209 nvme_dump_completion(struct nvme_completion *cpl)
210 {
211         uint8_t p, sc, sct, m, dnr;
212         uint16_t status;
213
214         status = le16toh(cpl->status);
215
216         p = NVME_STATUS_GET_P(status);
217         sc = NVME_STATUS_GET_SC(status);
218         sct = NVME_STATUS_GET_SCT(status);
219         m = NVME_STATUS_GET_M(status);
220         dnr = NVME_STATUS_GET_DNR(status);
221
222         printf("cdw0:%08x sqhd:%04x sqid:%04x "
223             "cid:%04x p:%x sc:%02x sct:%x m:%x dnr:%x\n",
224             le32toh(cpl->cdw0), le16toh(cpl->sqhd), le16toh(cpl->sqid),
225             cpl->cid, p, sc, sct, m, dnr);
226 }
227
228 static int
229 nvme_attach(device_t dev)
230 {
231         struct nvme_controller  *ctrlr = DEVICE2SOFTC(dev);
232         int                     status;
233         struct _pcsid           *ep;
234         uint32_t                devid;
235         uint16_t                subdevice;
236
237         devid = pci_get_devid(dev);
238         subdevice = pci_get_subdevice(dev);
239         ep = pci_ids;
240         while (ep->devid) {
241                 if (nvme_match(devid, subdevice, ep))
242                         break;
243                 ++ep;
244         }
245         ctrlr->quirks = ep->quirks;
246
247         status = nvme_ctrlr_construct(ctrlr, dev);
248
249         if (status != 0) {
250                 nvme_ctrlr_destruct(ctrlr, dev);
251                 return (status);
252         }
253
254         /*
255          * Some drives do not implement the completion timeout feature
256          * correctly. There's a WAR from the manufacturer to just disable it.
257          * The driver wouldn't respond correctly to a timeout anyway.
258          */
259         if (ep->quirks & QUIRK_DISABLE_TIMEOUT) {
260                 int ptr;
261                 uint16_t devctl2;
262
263                 status = pci_find_cap(dev, PCIY_EXPRESS, &ptr);
264                 if (status) {
265                         device_printf(dev, "Can't locate PCIe capability?");
266                         return (status);
267                 }
268                 devctl2 = pci_read_config(dev, ptr + PCIER_DEVICE_CTL2, sizeof(devctl2));
269                 devctl2 |= PCIEM_CTL2_COMP_TIMO_DISABLE;
270                 pci_write_config(dev, ptr + PCIER_DEVICE_CTL2, devctl2, sizeof(devctl2));
271         }
272
273         /*
274          * Enable busmastering so the completion status messages can
275          * be busmastered back to the host.
276          */
277         pci_enable_busmaster(dev);
278
279         /*
280          * Reset controller twice to ensure we do a transition from cc.en==1
281          *  to cc.en==0.  This is because we don't really know what status
282          *  the controller was left in when boot handed off to OS.
283          */
284         status = nvme_ctrlr_hw_reset(ctrlr);
285         if (status != 0) {
286                 nvme_ctrlr_destruct(ctrlr, dev);
287                 return (status);
288         }
289
290         status = nvme_ctrlr_hw_reset(ctrlr);
291         if (status != 0) {
292                 nvme_ctrlr_destruct(ctrlr, dev);
293                 return (status);
294         }
295
296         ctrlr->config_hook.ich_func = nvme_ctrlr_start_config_hook;
297         ctrlr->config_hook.ich_arg = ctrlr;
298
299         config_intrhook_establish(&ctrlr->config_hook);
300
301         return (0);
302 }
303
304 static int
305 nvme_detach (device_t dev)
306 {
307         struct nvme_controller  *ctrlr = DEVICE2SOFTC(dev);
308
309         nvme_ctrlr_destruct(ctrlr, dev);
310         pci_disable_busmaster(dev);
311         return (0);
312 }
313
314 static void
315 nvme_notify(struct nvme_consumer *cons,
316             struct nvme_controller *ctrlr)
317 {
318         struct nvme_namespace   *ns;
319         void                    *ctrlr_cookie;
320         int                     cmpset, ns_idx;
321
322         /*
323          * The consumer may register itself after the nvme devices
324          *  have registered with the kernel, but before the
325          *  driver has completed initialization.  In that case,
326          *  return here, and when initialization completes, the
327          *  controller will make sure the consumer gets notified.
328          */
329         if (!ctrlr->is_initialized)
330                 return;
331
332         cmpset = atomic_cmpset_32(&ctrlr->notification_sent, 0, 1);
333
334         if (cmpset == 0)
335                 return;
336
337         if (cons->ctrlr_fn != NULL)
338                 ctrlr_cookie = (*cons->ctrlr_fn)(ctrlr);
339         else
340                 ctrlr_cookie = NULL;
341         ctrlr->cons_cookie[cons->id] = ctrlr_cookie;
342         if (ctrlr->is_failed) {
343                 if (cons->fail_fn != NULL)
344                         (*cons->fail_fn)(ctrlr_cookie);
345                 /*
346                  * Do not notify consumers about the namespaces of a
347                  *  failed controller.
348                  */
349                 return;
350         }
351         for (ns_idx = 0; ns_idx < min(ctrlr->cdata.nn, NVME_MAX_NAMESPACES); ns_idx++) {
352                 ns = &ctrlr->ns[ns_idx];
353                 if (ns->data.nsze == 0)
354                         continue;
355                 if (cons->ns_fn != NULL)
356                         ns->cons_cookie[cons->id] =
357                             (*cons->ns_fn)(ns, ctrlr_cookie);
358         }
359 }
360
361 void
362 nvme_notify_new_controller(struct nvme_controller *ctrlr)
363 {
364         int i;
365
366         for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
367                 if (nvme_consumer[i].id != INVALID_CONSUMER_ID) {
368                         nvme_notify(&nvme_consumer[i], ctrlr);
369                 }
370         }
371 }
372
373 static void
374 nvme_notify_new_consumer(struct nvme_consumer *cons)
375 {
376         device_t                *devlist;
377         struct nvme_controller  *ctrlr;
378         int                     dev_idx, devcount;
379
380         if (devclass_get_devices(nvme_devclass, &devlist, &devcount))
381                 return;
382
383         for (dev_idx = 0; dev_idx < devcount; dev_idx++) {
384                 ctrlr = DEVICE2SOFTC(devlist[dev_idx]);
385                 nvme_notify(cons, ctrlr);
386         }
387
388         free(devlist, M_TEMP);
389 }
390
391 void
392 nvme_notify_async_consumers(struct nvme_controller *ctrlr,
393                             const struct nvme_completion *async_cpl,
394                             uint32_t log_page_id, void *log_page_buffer,
395                             uint32_t log_page_size)
396 {
397         struct nvme_consumer    *cons;
398         uint32_t                i;
399
400         for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
401                 cons = &nvme_consumer[i];
402                 if (cons->id != INVALID_CONSUMER_ID && cons->async_fn != NULL)
403                         (*cons->async_fn)(ctrlr->cons_cookie[i], async_cpl,
404                             log_page_id, log_page_buffer, log_page_size);
405         }
406 }
407
408 void
409 nvme_notify_fail_consumers(struct nvme_controller *ctrlr)
410 {
411         struct nvme_consumer    *cons;
412         uint32_t                i;
413
414         /*
415          * This controller failed during initialization (i.e. IDENTIFY
416          *  command failed or timed out).  Do not notify any nvme
417          *  consumers of the failure here, since the consumer does not
418          *  even know about the controller yet.
419          */
420         if (!ctrlr->is_initialized)
421                 return;
422
423         for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
424                 cons = &nvme_consumer[i];
425                 if (cons->id != INVALID_CONSUMER_ID && cons->fail_fn != NULL)
426                         cons->fail_fn(ctrlr->cons_cookie[i]);
427         }
428 }
429
430 void
431 nvme_notify_ns(struct nvme_controller *ctrlr, int nsid)
432 {
433         struct nvme_consumer    *cons;
434         struct nvme_namespace   *ns = &ctrlr->ns[nsid - 1];
435         uint32_t                i;
436
437         if (!ctrlr->is_initialized)
438                 return;
439
440         for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
441                 cons = &nvme_consumer[i];
442                 if (cons->id != INVALID_CONSUMER_ID && cons->ns_fn != NULL)
443                         ns->cons_cookie[cons->id] =
444                             (*cons->ns_fn)(ns, ctrlr->cons_cookie[cons->id]);
445         }
446 }
447
448 struct nvme_consumer *
449 nvme_register_consumer(nvme_cons_ns_fn_t ns_fn, nvme_cons_ctrlr_fn_t ctrlr_fn,
450                        nvme_cons_async_fn_t async_fn,
451                        nvme_cons_fail_fn_t fail_fn)
452 {
453         int i;
454
455         /*
456          * TODO: add locking around consumer registration.
457          */
458         for (i = 0; i < NVME_MAX_CONSUMERS; i++)
459                 if (nvme_consumer[i].id == INVALID_CONSUMER_ID) {
460                         nvme_consumer[i].id = i;
461                         nvme_consumer[i].ns_fn = ns_fn;
462                         nvme_consumer[i].ctrlr_fn = ctrlr_fn;
463                         nvme_consumer[i].async_fn = async_fn;
464                         nvme_consumer[i].fail_fn = fail_fn;
465
466                         nvme_notify_new_consumer(&nvme_consumer[i]);
467                         return (&nvme_consumer[i]);
468                 }
469
470         printf("nvme(4): consumer not registered - no slots available\n");
471         return (NULL);
472 }
473
474 void
475 nvme_unregister_consumer(struct nvme_consumer *consumer)
476 {
477
478         consumer->id = INVALID_CONSUMER_ID;
479 }
480
481 void
482 nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl)
483 {
484         struct nvme_completion_poll_status      *status = arg;
485
486         /*
487          * Copy status into the argument passed by the caller, so that
488          *  the caller can check the status to determine if the
489          *  the request passed or failed.
490          */
491         memcpy(&status->cpl, cpl, sizeof(*cpl));
492         atomic_store_rel_int(&status->done, 1);
493 }