]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/nvme/nvme_private.h
Expose the get/set features API to nvme consumers.
[FreeBSD/FreeBSD.git] / sys / dev / nvme / nvme_private.h
1 /*-
2  * Copyright (C) 2012 Intel Corporation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28
29 #ifndef __NVME_PRIVATE_H__
30 #define __NVME_PRIVATE_H__
31
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
37 #include <sys/rman.h>
38 #include <sys/systm.h>
39
40 #include <vm/uma.h>
41
42 #include <machine/bus.h>
43
44 #include "nvme.h"
45
46 #define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev))
47
48 MALLOC_DECLARE(M_NVME);
49
50 #define CHATHAM2
51
52 #ifdef CHATHAM2
53 #define CHATHAM_PCI_ID          0x20118086
54 #define CHATHAM_CONTROL_BAR     0
55 #endif
56
57 #define IDT32_PCI_ID            0x80d0111d /* 32 channel board */
58 #define IDT8_PCI_ID             0x80d2111d /* 8 channel board */
59
60 #define NVME_MAX_PRP_LIST_ENTRIES       (32)
61
62 /*
63  * For commands requiring more than 2 PRP entries, one PRP will be
64  *  embedded in the command (prp1), and the rest of the PRP entries
65  *  will be in a list pointed to by the command (prp2).  This means
66  *  that real max number of PRP entries we support is 32+1, which
67  *  results in a max xfer size of 32*PAGE_SIZE.
68  */
69 #define NVME_MAX_XFER_SIZE      NVME_MAX_PRP_LIST_ENTRIES * PAGE_SIZE
70
71 #define NVME_ADMIN_TRACKERS     (16)
72 #define NVME_ADMIN_ENTRIES      (128)
73 /* min and max are defined in admin queue attributes section of spec */
74 #define NVME_MIN_ADMIN_ENTRIES  (2)
75 #define NVME_MAX_ADMIN_ENTRIES  (4096)
76
77 /*
78  * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion
79  *  queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we
80  *  will allow outstanding on an I/O qpair at any time.  The only advantage in
81  *  having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping
82  *  the contents of the submission and completion queues, it will show a longer
83  *  history of data.
84  */
85 #define NVME_IO_ENTRIES         (256)
86 #define NVME_IO_TRACKERS        (128)
87 #define NVME_MIN_IO_TRACKERS    (16)
88 #define NVME_MAX_IO_TRACKERS    (1024)
89
90 /*
91  * NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES
92  *  for each controller.
93  */
94
95 #define NVME_INT_COAL_TIME      (0)     /* disabled */
96 #define NVME_INT_COAL_THRESHOLD (0)     /* 0-based */
97
98 #define NVME_MAX_NAMESPACES     (16)
99 #define NVME_MAX_CONSUMERS      (2)
100 #define NVME_MAX_ASYNC_EVENTS   (8)
101
102 #define NVME_TIMEOUT_IN_SEC     (30)
103
104 #ifndef CACHE_LINE_SIZE
105 #define CACHE_LINE_SIZE         (64)
106 #endif
107
108 extern uma_zone_t nvme_request_zone;
109
110 struct nvme_request {
111
112         struct nvme_command             cmd;
113         void                            *payload;
114         uint32_t                        payload_size;
115         uint32_t                        timeout;
116         struct uio                      *uio;
117         nvme_cb_fn_t                    cb_fn;
118         void                            *cb_arg;
119         STAILQ_ENTRY(nvme_request)      stailq;
120 };
121
122 struct nvme_async_event_request {
123
124         struct nvme_controller          *ctrlr;
125         struct nvme_request             *req;
126 };
127
128 struct nvme_tracker {
129
130         SLIST_ENTRY(nvme_tracker)       slist;
131         struct nvme_request             *req;
132         struct nvme_qpair               *qpair;
133         struct callout                  timer;
134         bus_dmamap_t                    payload_dma_map;
135         uint16_t                        cid;
136
137         uint64_t                        prp[NVME_MAX_PRP_LIST_ENTRIES];
138         bus_addr_t                      prp_bus_addr;
139         bus_dmamap_t                    prp_dma_map;
140 };
141
142 struct nvme_qpair {
143
144         struct nvme_controller  *ctrlr;
145         uint32_t                id;
146         uint32_t                phase;
147
148         uint16_t                vector;
149         int                     rid;
150         struct resource         *res;
151         void                    *tag;
152
153         uint32_t                max_xfer_size;
154         uint32_t                num_entries;
155         uint32_t                num_trackers;
156         uint32_t                sq_tdbl_off;
157         uint32_t                cq_hdbl_off;
158
159         uint32_t                sq_head;
160         uint32_t                sq_tail;
161         uint32_t                cq_head;
162
163         int64_t                 num_cmds;
164         int64_t                 num_intr_handler_calls;
165
166         struct nvme_command     *cmd;
167         struct nvme_completion  *cpl;
168
169         bus_dma_tag_t           dma_tag;
170
171         bus_dmamap_t            cmd_dma_map;
172         uint64_t                cmd_bus_addr;
173
174         bus_dmamap_t            cpl_dma_map;
175         uint64_t                cpl_bus_addr;
176
177         SLIST_HEAD(, nvme_tracker)      free_tr;
178         STAILQ_HEAD(, nvme_request)     queued_req;
179
180         struct nvme_tracker     **act_tr;
181
182         struct mtx              lock __aligned(CACHE_LINE_SIZE);
183
184 } __aligned(CACHE_LINE_SIZE);
185
186 struct nvme_namespace {
187
188         struct nvme_controller          *ctrlr;
189         struct nvme_namespace_data      data;
190         uint16_t                        id;
191         uint16_t                        flags;
192         struct cdev                     *cdev;
193         void                            *cons_cookie[NVME_MAX_CONSUMERS];
194 };
195
196 /*
197  * One of these per allocated PCI device.
198  */
199 struct nvme_controller {
200
201         device_t                dev;
202
203         uint32_t                ready_timeout_in_ms;
204
205         bus_space_tag_t         bus_tag;
206         bus_space_handle_t      bus_handle;
207         int                     resource_id;
208         struct resource         *resource;
209
210         /*
211          * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5,
212          *  separate from the control registers which are in BAR 0/1.  These
213          *  members track the mapping of BAR 4/5 for that reason.
214          */
215         int                     bar4_resource_id;
216         struct resource         *bar4_resource;
217
218 #ifdef CHATHAM2
219         bus_space_tag_t         chatham_bus_tag;
220         bus_space_handle_t      chatham_bus_handle;
221         int                     chatham_resource_id;
222         struct resource         *chatham_resource;
223 #endif
224
225         uint32_t                msix_enabled;
226         uint32_t                force_intx;
227
228         uint32_t                num_io_queues;
229         boolean_t               per_cpu_io_queues;
230
231         /* Fields for tracking progress during controller initialization. */
232         struct intr_config_hook config_hook;
233         uint32_t                ns_identified;
234         uint32_t                queues_created;
235
236         /* For shared legacy interrupt. */
237         int                     rid;
238         struct resource         *res;
239         void                    *tag;
240
241         bus_dma_tag_t           hw_desc_tag;
242         bus_dmamap_t            hw_desc_map;
243
244         /** maximum i/o size in bytes */
245         uint32_t                max_xfer_size;
246
247         /** interrupt coalescing time period (in microseconds) */
248         uint32_t                int_coal_time;
249
250         /** interrupt coalescing threshold */
251         uint32_t                int_coal_threshold;
252
253         struct nvme_qpair       adminq;
254         struct nvme_qpair       *ioq;
255
256         struct nvme_registers           *regs;
257
258         struct nvme_controller_data     cdata;
259         struct nvme_namespace           ns[NVME_MAX_NAMESPACES];
260
261         struct cdev                     *cdev;
262
263         boolean_t                       is_started;
264
265         uint32_t                        num_aers;
266         struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS];
267
268         void                            *cons_cookie[NVME_MAX_CONSUMERS];
269
270 #ifdef CHATHAM2
271         uint64_t                chatham_size;
272         uint64_t                chatham_lbas;
273 #endif
274 };
275
276 #define nvme_mmio_offsetof(reg)                                                \
277         offsetof(struct nvme_registers, reg)
278
279 #define nvme_mmio_read_4(sc, reg)                                              \
280         bus_space_read_4((sc)->bus_tag, (sc)->bus_handle,                      \
281             nvme_mmio_offsetof(reg))
282
283 #define nvme_mmio_write_4(sc, reg, val)                                        \
284         bus_space_write_4((sc)->bus_tag, (sc)->bus_handle,                     \
285             nvme_mmio_offsetof(reg), val)
286
287 #define nvme_mmio_write_8(sc, reg, val) \
288         do {                                                                   \
289                 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle,             \
290                     nvme_mmio_offsetof(reg), val & 0xFFFFFFFF);                \
291                 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle,             \
292                     nvme_mmio_offsetof(reg)+4,                                 \
293                     (val & 0xFFFFFFFF00000000UL) >> 32);                       \
294         } while (0);
295
296 #ifdef CHATHAM2
297 #define chatham_read_4(softc, reg) \
298         bus_space_read_4((softc)->chatham_bus_tag,                             \
299             (softc)->chatham_bus_handle, reg)
300
301 #define chatham_write_8(sc, reg, val)                                          \
302         do {                                                                   \
303                 bus_space_write_4((sc)->chatham_bus_tag,                       \
304                     (sc)->chatham_bus_handle, reg, val & 0xffffffff);          \
305                 bus_space_write_4((sc)->chatham_bus_tag,                       \
306                     (sc)->chatham_bus_handle, reg+4,                           \
307                     (val & 0xFFFFFFFF00000000UL) >> 32);                       \
308         } while (0);
309
310 #endif /* CHATHAM2 */
311
312 #if __FreeBSD_version < 800054
313 #define wmb()   __asm volatile("sfence" ::: "memory")
314 #define mb()    __asm volatile("mfence" ::: "memory")
315 #endif
316
317 void    nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg);
318
319 void    nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,
320                                            void *payload,
321                                            nvme_cb_fn_t cb_fn, void *cb_arg);
322 void    nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr,
323                                           uint16_t nsid, void *payload,
324                                           nvme_cb_fn_t cb_fn, void *cb_arg);
325 void    nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr,
326                                                 uint32_t microseconds,
327                                                 uint32_t threshold,
328                                                 nvme_cb_fn_t cb_fn,
329                                                 void *cb_arg);
330 void    nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
331                                                    uint32_t nsid,
332                                                    struct nvme_health_information_page *payload,
333                                                    nvme_cb_fn_t cb_fn,
334                                                    void *cb_arg);
335 void    nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
336                                     struct nvme_qpair *io_que, uint16_t vector,
337                                     nvme_cb_fn_t cb_fn, void *cb_arg);
338 void    nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
339                                     struct nvme_qpair *io_que,
340                                     nvme_cb_fn_t cb_fn, void *cb_arg);
341 void    nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
342                                     struct nvme_qpair *io_que,
343                                     nvme_cb_fn_t cb_fn, void *cb_arg);
344 void    nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
345                                     struct nvme_qpair *io_que,
346                                     nvme_cb_fn_t cb_fn, void *cb_arg);
347 void    nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
348                                       uint32_t num_queues, nvme_cb_fn_t cb_fn,
349                                       void *cb_arg);
350 void    nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
351                                               union nvme_critical_warning_state state,
352                                               nvme_cb_fn_t cb_fn, void *cb_arg);
353 void    nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
354                              uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg);
355
356 void    nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg,
357                          int error);
358 void    nvme_payload_map_uio(void *arg, bus_dma_segment_t *seg, int nseg,
359                              bus_size_t mapsize, int error);
360
361 int     nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev);
362 void    nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev);
363 int     nvme_ctrlr_reset(struct nvme_controller *ctrlr);
364 /* ctrlr defined as void * to allow use with config_intrhook. */
365 void    nvme_ctrlr_start(void *ctrlr_arg);
366 void    nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
367                                         struct nvme_request *req);
368 void    nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
369                                      struct nvme_request *req);
370
371 void    nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id,
372                              uint16_t vector, uint32_t num_entries,
373                              uint32_t num_trackers, uint32_t max_xfer_size,
374                              struct nvme_controller *ctrlr);
375 void    nvme_qpair_submit_cmd(struct nvme_qpair *qpair,
376                               struct nvme_tracker *tr);
377 void    nvme_qpair_process_completions(struct nvme_qpair *qpair);
378 void    nvme_qpair_submit_request(struct nvme_qpair *qpair,
379                                   struct nvme_request *req);
380 void    nvme_qpair_manual_abort_request(struct nvme_qpair *qpair,
381                                         struct nvme_request *req, uint32_t sct,
382                                         uint32_t sc, boolean_t print_on_error);
383
384 void    nvme_admin_qpair_destroy(struct nvme_qpair *qpair);
385
386 void    nvme_io_qpair_destroy(struct nvme_qpair *qpair);
387
388 int     nvme_ns_construct(struct nvme_namespace *ns, uint16_t id,
389                           struct nvme_controller *ctrlr);
390
391 int     nvme_ns_physio(struct cdev *dev, struct uio *uio, int ioflag);
392
393 void    nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr);
394
395 void    nvme_dump_command(struct nvme_command *cmd);
396 void    nvme_dump_completion(struct nvme_completion *cpl);
397
398 static __inline void
399 nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
400 {
401         uint64_t *bus_addr = (uint64_t *)arg;
402
403         *bus_addr = seg[0].ds_addr;
404 }
405
406 static __inline struct nvme_request *
407 nvme_allocate_request(void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn, 
408                       void *cb_arg)
409 {
410         struct nvme_request *req;
411
412         req = uma_zalloc(nvme_request_zone, M_NOWAIT | M_ZERO);
413         if (req == NULL)
414                 return (NULL);
415
416         req->payload = payload;
417         req->payload_size = payload_size;
418         req->cb_fn = cb_fn;
419         req->cb_arg = cb_arg;
420         req->timeout = NVME_TIMEOUT_IN_SEC;
421
422         return (req);
423 }
424
425 static __inline struct nvme_request *
426 nvme_allocate_request_uio(struct uio *uio, nvme_cb_fn_t cb_fn, void *cb_arg)
427 {
428         struct nvme_request *req;
429
430         req = uma_zalloc(nvme_request_zone, M_NOWAIT | M_ZERO);
431         if (req == NULL)
432                 return (NULL);
433
434         req->uio = uio;
435         req->cb_fn = cb_fn;
436         req->cb_arg = cb_arg;
437         req->timeout = NVME_TIMEOUT_IN_SEC;
438
439         return (req);
440 }
441
442 #define nvme_free_request(req)  uma_zfree(nvme_request_zone, req)
443
444 void    nvme_notify_async_consumers(struct nvme_controller *ctrlr,
445                                     const struct nvme_completion *async_cpl);
446
447 #endif /* __NVME_PRIVATE_H__ */