]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/nvme/nvme_private.h
Upgrade to OpenSSH 6.2p1. The most important new features are support
[FreeBSD/FreeBSD.git] / sys / dev / nvme / nvme_private.h
1 /*-
2  * Copyright (C) 2012 Intel Corporation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28
29 #ifndef __NVME_PRIVATE_H__
30 #define __NVME_PRIVATE_H__
31
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
37 #include <sys/rman.h>
38 #include <sys/systm.h>
39
40 #include <vm/uma.h>
41
42 #include <machine/bus.h>
43
44 #include "nvme.h"
45
46 #define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev))
47
48 MALLOC_DECLARE(M_NVME);
49
50 #define CHATHAM2
51
52 #ifdef CHATHAM2
53 #define CHATHAM_PCI_ID          0x20118086
54 #define CHATHAM_CONTROL_BAR     0
55 #endif
56
57 #define IDT32_PCI_ID            0x80d0111d /* 32 channel board */
58 #define IDT8_PCI_ID             0x80d2111d /* 8 channel board */
59
60 #define NVME_MAX_PRP_LIST_ENTRIES       (32)
61
62 /*
63  * For commands requiring more than 2 PRP entries, one PRP will be
64  *  embedded in the command (prp1), and the rest of the PRP entries
65  *  will be in a list pointed to by the command (prp2).  This means
66  *  that real max number of PRP entries we support is 32+1, which
67  *  results in a max xfer size of 32*PAGE_SIZE.
68  */
69 #define NVME_MAX_XFER_SIZE      NVME_MAX_PRP_LIST_ENTRIES * PAGE_SIZE
70
71 #define NVME_ADMIN_TRACKERS     (16)
72 #define NVME_ADMIN_ENTRIES      (128)
73 /* min and max are defined in admin queue attributes section of spec */
74 #define NVME_MIN_ADMIN_ENTRIES  (2)
75 #define NVME_MAX_ADMIN_ENTRIES  (4096)
76
77 /*
78  * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion
79  *  queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we
80  *  will allow outstanding on an I/O qpair at any time.  The only advantage in
81  *  having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping
82  *  the contents of the submission and completion queues, it will show a longer
83  *  history of data.
84  */
85 #define NVME_IO_ENTRIES         (256)
86 #define NVME_IO_TRACKERS        (128)
87 #define NVME_MIN_IO_TRACKERS    (16)
88 #define NVME_MAX_IO_TRACKERS    (1024)
89
90 /*
91  * NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES
92  *  for each controller.
93  */
94
95 #define NVME_INT_COAL_TIME      (0)     /* disabled */
96 #define NVME_INT_COAL_THRESHOLD (0)     /* 0-based */
97
98 #define NVME_MAX_NAMESPACES     (16)
99 #define NVME_MAX_CONSUMERS      (2)
100 #define NVME_MAX_ASYNC_EVENTS   (4)
101
102 #define NVME_TIMEOUT_IN_SEC     (30)
103
104 #ifndef CACHE_LINE_SIZE
105 #define CACHE_LINE_SIZE         (64)
106 #endif
107
108 extern uma_zone_t nvme_request_zone;
109
110 struct nvme_request {
111
112         struct nvme_command             cmd;
113         void                            *payload;
114         uint32_t                        payload_size;
115         struct uio                      *uio;
116         nvme_cb_fn_t                    cb_fn;
117         void                            *cb_arg;
118         STAILQ_ENTRY(nvme_request)      stailq;
119 };
120
121 struct nvme_tracker {
122
123         SLIST_ENTRY(nvme_tracker)       slist;
124         struct nvme_request             *req;
125         struct nvme_qpair               *qpair;
126         struct callout                  timer;
127         bus_dmamap_t                    payload_dma_map;
128         uint16_t                        cid;
129
130         uint64_t                        prp[NVME_MAX_PRP_LIST_ENTRIES];
131         bus_addr_t                      prp_bus_addr;
132         bus_dmamap_t                    prp_dma_map;
133 };
134
135 struct nvme_qpair {
136
137         struct nvme_controller  *ctrlr;
138         uint32_t                id;
139         uint32_t                phase;
140
141         uint16_t                vector;
142         int                     rid;
143         struct resource         *res;
144         void                    *tag;
145
146         uint32_t                max_xfer_size;
147         uint32_t                num_entries;
148         uint32_t                num_trackers;
149         uint32_t                sq_tdbl_off;
150         uint32_t                cq_hdbl_off;
151
152         uint32_t                sq_head;
153         uint32_t                sq_tail;
154         uint32_t                cq_head;
155
156         int64_t                 num_cmds;
157         int64_t                 num_intr_handler_calls;
158
159         struct nvme_command     *cmd;
160         struct nvme_completion  *cpl;
161
162         bus_dma_tag_t           dma_tag;
163
164         bus_dmamap_t            cmd_dma_map;
165         uint64_t                cmd_bus_addr;
166
167         bus_dmamap_t            cpl_dma_map;
168         uint64_t                cpl_bus_addr;
169
170         SLIST_HEAD(, nvme_tracker)      free_tr;
171         STAILQ_HEAD(, nvme_request)     queued_req;
172
173         struct nvme_tracker     **act_tr;
174
175         struct mtx              lock __aligned(CACHE_LINE_SIZE);
176
177 } __aligned(CACHE_LINE_SIZE);
178
179 struct nvme_namespace {
180
181         struct nvme_controller          *ctrlr;
182         struct nvme_namespace_data      data;
183         uint16_t                        id;
184         uint16_t                        flags;
185         struct cdev                     *cdev;
186 };
187
188 /*
189  * One of these per allocated PCI device.
190  */
191 struct nvme_controller {
192
193         device_t                dev;
194
195         uint32_t                ready_timeout_in_ms;
196
197         bus_space_tag_t         bus_tag;
198         bus_space_handle_t      bus_handle;
199         int                     resource_id;
200         struct resource         *resource;
201
202         /*
203          * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5,
204          *  separate from the control registers which are in BAR 0/1.  These
205          *  members track the mapping of BAR 4/5 for that reason.
206          */
207         int                     bar4_resource_id;
208         struct resource         *bar4_resource;
209
210 #ifdef CHATHAM2
211         bus_space_tag_t         chatham_bus_tag;
212         bus_space_handle_t      chatham_bus_handle;
213         int                     chatham_resource_id;
214         struct resource         *chatham_resource;
215 #endif
216
217         uint32_t                msix_enabled;
218         uint32_t                force_intx;
219
220         uint32_t                num_io_queues;
221         boolean_t               per_cpu_io_queues;
222
223         /* Fields for tracking progress during controller initialization. */
224         struct intr_config_hook config_hook;
225         uint32_t                ns_identified;
226         uint32_t                queues_created;
227
228         /* For shared legacy interrupt. */
229         int                     rid;
230         struct resource         *res;
231         void                    *tag;
232
233         bus_dma_tag_t           hw_desc_tag;
234         bus_dmamap_t            hw_desc_map;
235
236         /** maximum i/o size in bytes */
237         uint32_t                max_xfer_size;
238
239         /** interrupt coalescing time period (in microseconds) */
240         uint32_t                int_coal_time;
241
242         /** interrupt coalescing threshold */
243         uint32_t                int_coal_threshold;
244
245         struct nvme_qpair       adminq;
246         struct nvme_qpair       *ioq;
247
248         struct nvme_registers           *regs;
249
250         struct nvme_controller_data     cdata;
251         struct nvme_namespace           ns[NVME_MAX_NAMESPACES];
252
253         struct cdev                     *cdev;
254
255         boolean_t                       is_started;
256
257 #ifdef CHATHAM2
258         uint64_t                chatham_size;
259         uint64_t                chatham_lbas;
260 #endif
261 };
262
263 #define nvme_mmio_offsetof(reg)                                                \
264         offsetof(struct nvme_registers, reg)
265
266 #define nvme_mmio_read_4(sc, reg)                                              \
267         bus_space_read_4((sc)->bus_tag, (sc)->bus_handle,                      \
268             nvme_mmio_offsetof(reg))
269
270 #define nvme_mmio_write_4(sc, reg, val)                                        \
271         bus_space_write_4((sc)->bus_tag, (sc)->bus_handle,                     \
272             nvme_mmio_offsetof(reg), val)
273
274 #define nvme_mmio_write_8(sc, reg, val) \
275         do {                                                                   \
276                 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle,             \
277                     nvme_mmio_offsetof(reg), val & 0xFFFFFFFF);                \
278                 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle,             \
279                     nvme_mmio_offsetof(reg)+4,                                 \
280                     (val & 0xFFFFFFFF00000000UL) >> 32);                       \
281         } while (0);
282
283 #ifdef CHATHAM2
284 #define chatham_read_4(softc, reg) \
285         bus_space_read_4((softc)->chatham_bus_tag,                             \
286             (softc)->chatham_bus_handle, reg)
287
288 #define chatham_write_8(sc, reg, val)                                          \
289         do {                                                                   \
290                 bus_space_write_4((sc)->chatham_bus_tag,                       \
291                     (sc)->chatham_bus_handle, reg, val & 0xffffffff);          \
292                 bus_space_write_4((sc)->chatham_bus_tag,                       \
293                     (sc)->chatham_bus_handle, reg+4,                           \
294                     (val & 0xFFFFFFFF00000000UL) >> 32);                       \
295         } while (0);
296
297 #endif /* CHATHAM2 */
298
299 #if __FreeBSD_version < 800054
300 #define wmb()   __asm volatile("sfence" ::: "memory")
301 #define mb()    __asm volatile("mfence" ::: "memory")
302 #endif
303
304 void    nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg);
305
306 void    nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr,
307                                    uint8_t feature, uint32_t cdw11,
308                                    void *payload, uint32_t payload_size,
309                                    nvme_cb_fn_t cb_fn, void *cb_arg);
310 void    nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr,
311                                    uint8_t feature, uint32_t cdw11,
312                                    void *payload, uint32_t payload_size,
313                                    nvme_cb_fn_t cb_fn, void *cb_arg);
314 void    nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,
315                                            void *payload,
316                                            nvme_cb_fn_t cb_fn, void *cb_arg);
317 void    nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr,
318                                           uint16_t nsid, void *payload,
319                                           nvme_cb_fn_t cb_fn, void *cb_arg);
320 void    nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr,
321                                                 uint32_t microseconds,
322                                                 uint32_t threshold,
323                                                 nvme_cb_fn_t cb_fn,
324                                                 void *cb_arg);
325 void    nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
326                                                    uint32_t nsid,
327                                                    struct nvme_health_information_page *payload,
328                                                    nvme_cb_fn_t cb_fn,
329                                                    void *cb_arg);
330 void    nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
331                                     struct nvme_qpair *io_que, uint16_t vector,
332                                     nvme_cb_fn_t cb_fn, void *cb_arg);
333 void    nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
334                                     struct nvme_qpair *io_que,
335                                     nvme_cb_fn_t cb_fn, void *cb_arg);
336 void    nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
337                                     struct nvme_qpair *io_que,
338                                     nvme_cb_fn_t cb_fn, void *cb_arg);
339 void    nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
340                                     struct nvme_qpair *io_que,
341                                     nvme_cb_fn_t cb_fn, void *cb_arg);
342 void    nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
343                                       uint32_t num_queues, nvme_cb_fn_t cb_fn,
344                                       void *cb_arg);
345 void    nvme_ctrlr_cmd_set_asynchronous_event_config(struct nvme_controller *ctrlr,
346                                            union nvme_critical_warning_state state,
347                                            nvme_cb_fn_t cb_fn, void *cb_arg);
348 void    nvme_ctrlr_cmd_asynchronous_event_request(struct nvme_controller *ctrlr,
349                                                   nvme_cb_fn_t cb_fn,
350                                                   void *cb_arg);
351
352 void    nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg,
353                          int error);
354 void    nvme_payload_map_uio(void *arg, bus_dma_segment_t *seg, int nseg,
355                              bus_size_t mapsize, int error);
356
357 int     nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev);
358 int     nvme_ctrlr_reset(struct nvme_controller *ctrlr);
359 /* ctrlr defined as void * to allow use with config_intrhook. */
360 void    nvme_ctrlr_start(void *ctrlr_arg);
361 void    nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
362                                         struct nvme_request *req);
363 void    nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
364                                      struct nvme_request *req);
365
366 void    nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id,
367                              uint16_t vector, uint32_t num_entries,
368                              uint32_t num_trackers, uint32_t max_xfer_size,
369                              struct nvme_controller *ctrlr);
370 void    nvme_qpair_submit_cmd(struct nvme_qpair *qpair,
371                               struct nvme_tracker *tr);
372 void    nvme_qpair_process_completions(struct nvme_qpair *qpair);
373 void    nvme_qpair_submit_request(struct nvme_qpair *qpair,
374                                   struct nvme_request *req);
375
376 void    nvme_admin_qpair_destroy(struct nvme_qpair *qpair);
377
378 void    nvme_io_qpair_destroy(struct nvme_qpair *qpair);
379
380 int     nvme_ns_construct(struct nvme_namespace *ns, uint16_t id,
381                           struct nvme_controller *ctrlr);
382
383 int     nvme_ns_physio(struct cdev *dev, struct uio *uio, int ioflag);
384
385 void    nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr);
386
387 void    nvme_dump_command(struct nvme_command *cmd);
388 void    nvme_dump_completion(struct nvme_completion *cpl);
389
390 static __inline void
391 nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
392 {
393         uint64_t *bus_addr = (uint64_t *)arg;
394
395         *bus_addr = seg[0].ds_addr;
396 }
397
398 static __inline struct nvme_request *
399 nvme_allocate_request(void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn, 
400                       void *cb_arg)
401 {
402         struct nvme_request *req;
403
404         req = uma_zalloc(nvme_request_zone, M_NOWAIT | M_ZERO);
405         if (req == NULL)
406                 return (NULL);
407
408         req->payload = payload;
409         req->payload_size = payload_size;
410         req->cb_fn = cb_fn;
411         req->cb_arg = cb_arg;
412
413         return (req);
414 }
415
416 static __inline struct nvme_request *
417 nvme_allocate_request_uio(struct uio *uio, nvme_cb_fn_t cb_fn, void *cb_arg)
418 {
419         struct nvme_request *req;
420
421         req = uma_zalloc(nvme_request_zone, M_NOWAIT | M_ZERO);
422         if (req == NULL)
423                 return (NULL);
424
425         req->uio = uio;
426         req->cb_fn = cb_fn;
427         req->cb_arg = cb_arg;
428
429         return (req);
430 }
431
432 #define nvme_free_request(req)  uma_zfree(nvme_request_zone, req)
433
434 #endif /* __NVME_PRIVATE_H__ */