2 * Copyright (C) 2012 Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #ifndef __NVME_PRIVATE_H__
30 #define __NVME_PRIVATE_H__
32 #include <sys/param.h>
35 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/mutex.h>
40 #include <sys/systm.h>
41 #include <sys/taskqueue.h>
45 #include <machine/bus.h>
49 #define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev))
51 MALLOC_DECLARE(M_NVME);
56 #define CHATHAM_PCI_ID 0x20118086
57 #define CHATHAM_CONTROL_BAR 0
60 #define IDT32_PCI_ID 0x80d0111d /* 32 channel board */
61 #define IDT8_PCI_ID 0x80d2111d /* 8 channel board */
64 * For commands requiring more than 2 PRP entries, one PRP will be
65 * embedded in the command (prp1), and the rest of the PRP entries
66 * will be in a list pointed to by the command (prp2). This means
67 * that real max number of PRP entries we support is 32+1, which
68 * results in a max xfer size of 32*PAGE_SIZE.
70 #define NVME_MAX_PRP_LIST_ENTRIES (NVME_MAX_XFER_SIZE / PAGE_SIZE)
72 #define NVME_ADMIN_TRACKERS (16)
73 #define NVME_ADMIN_ENTRIES (128)
74 /* min and max are defined in admin queue attributes section of spec */
75 #define NVME_MIN_ADMIN_ENTRIES (2)
76 #define NVME_MAX_ADMIN_ENTRIES (4096)
79 * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion
80 * queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we
81 * will allow outstanding on an I/O qpair at any time. The only advantage in
82 * having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping
83 * the contents of the submission and completion queues, it will show a longer
86 #define NVME_IO_ENTRIES (256)
87 #define NVME_IO_TRACKERS (128)
88 #define NVME_MIN_IO_TRACKERS (4)
89 #define NVME_MAX_IO_TRACKERS (1024)
92 * NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES
93 * for each controller.
96 #define NVME_INT_COAL_TIME (0) /* disabled */
97 #define NVME_INT_COAL_THRESHOLD (0) /* 0-based */
99 #define NVME_MAX_NAMESPACES (16)
100 #define NVME_MAX_CONSUMERS (2)
101 #define NVME_MAX_ASYNC_EVENTS (8)
103 #define NVME_DEFAULT_TIMEOUT_PERIOD (30) /* in seconds */
104 #define NVME_MIN_TIMEOUT_PERIOD (5)
105 #define NVME_MAX_TIMEOUT_PERIOD (120)
107 #define NVME_DEFAULT_RETRY_COUNT (4)
109 /* Maximum log page size to fetch for AERs. */
110 #define NVME_MAX_AER_LOG_SIZE (4096)
112 #ifndef CACHE_LINE_SIZE
113 #define CACHE_LINE_SIZE (64)
117 * Use presence of the BIO_UNMAPPED flag to determine whether unmapped I/O
118 * support and the bus_dmamap_load_bio API are available on the target
119 * kernel. This will ease porting back to earlier stable branches at a
123 #define NVME_UNMAPPED_BIO_SUPPORT
126 extern uma_zone_t nvme_request_zone;
127 extern int32_t nvme_retry_count;
129 struct nvme_completion_poll_status {
131 struct nvme_completion cpl;
135 #define NVME_REQUEST_VADDR 1
136 #define NVME_REQUEST_NULL 2 /* For requests with no payload. */
137 #define NVME_REQUEST_UIO 3
138 #ifdef NVME_UNMAPPED_BIO_SUPPORT
139 #define NVME_REQUEST_BIO 4
142 struct nvme_request {
144 struct nvme_command cmd;
145 struct nvme_qpair *qpair;
151 uint32_t payload_size;
156 STAILQ_ENTRY(nvme_request) stailq;
159 struct nvme_async_event_request {
161 struct nvme_controller *ctrlr;
162 struct nvme_request *req;
163 struct nvme_completion cpl;
164 uint32_t log_page_id;
165 uint32_t log_page_size;
166 uint8_t log_page_buffer[NVME_MAX_AER_LOG_SIZE];
169 struct nvme_tracker {
171 TAILQ_ENTRY(nvme_tracker) tailq;
172 struct nvme_request *req;
173 struct nvme_qpair *qpair;
174 struct callout timer;
175 bus_dmamap_t payload_dma_map;
178 uint64_t prp[NVME_MAX_PRP_LIST_ENTRIES];
179 bus_addr_t prp_bus_addr;
180 bus_dmamap_t prp_dma_map;
185 struct nvme_controller *ctrlr;
191 struct resource *res;
194 uint32_t num_entries;
195 uint32_t num_trackers;
196 uint32_t sq_tdbl_off;
197 uint32_t cq_hdbl_off;
204 int64_t num_intr_handler_calls;
206 struct nvme_command *cmd;
207 struct nvme_completion *cpl;
209 bus_dma_tag_t dma_tag;
211 bus_dmamap_t cmd_dma_map;
212 uint64_t cmd_bus_addr;
214 bus_dmamap_t cpl_dma_map;
215 uint64_t cpl_bus_addr;
217 TAILQ_HEAD(, nvme_tracker) free_tr;
218 TAILQ_HEAD(, nvme_tracker) outstanding_tr;
219 STAILQ_HEAD(, nvme_request) queued_req;
221 struct nvme_tracker **act_tr;
223 boolean_t is_enabled;
225 struct mtx lock __aligned(CACHE_LINE_SIZE);
227 } __aligned(CACHE_LINE_SIZE);
229 struct nvme_namespace {
231 struct nvme_controller *ctrlr;
232 struct nvme_namespace_data data;
236 void *cons_cookie[NVME_MAX_CONSUMERS];
241 * One of these per allocated PCI device.
243 struct nvme_controller {
249 uint32_t ready_timeout_in_ms;
251 bus_space_tag_t bus_tag;
252 bus_space_handle_t bus_handle;
254 struct resource *resource;
257 * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5,
258 * separate from the control registers which are in BAR 0/1. These
259 * members track the mapping of BAR 4/5 for that reason.
261 int bar4_resource_id;
262 struct resource *bar4_resource;
265 bus_space_tag_t chatham_bus_tag;
266 bus_space_handle_t chatham_bus_handle;
267 int chatham_resource_id;
268 struct resource *chatham_resource;
271 uint32_t msix_enabled;
273 uint32_t enable_aborts;
275 uint32_t num_io_queues;
276 boolean_t per_cpu_io_queues;
278 /* Fields for tracking progress during controller initialization. */
279 struct intr_config_hook config_hook;
280 uint32_t ns_identified;
281 uint32_t queues_created;
283 struct task reset_task;
284 struct task fail_req_task;
285 struct taskqueue *taskqueue;
287 /* For shared legacy interrupt. */
289 struct resource *res;
292 bus_dma_tag_t hw_desc_tag;
293 bus_dmamap_t hw_desc_map;
295 /** maximum i/o size in bytes */
296 uint32_t max_xfer_size;
298 /** minimum page size supported by this controller in bytes */
299 uint32_t min_page_size;
301 /** interrupt coalescing time period (in microseconds) */
302 uint32_t int_coal_time;
304 /** interrupt coalescing threshold */
305 uint32_t int_coal_threshold;
307 /** timeout period in seconds */
308 uint32_t timeout_period;
310 struct nvme_qpair adminq;
311 struct nvme_qpair *ioq;
313 struct nvme_registers *regs;
315 struct nvme_controller_data cdata;
316 struct nvme_namespace ns[NVME_MAX_NAMESPACES];
321 struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS];
323 void *cons_cookie[NVME_MAX_CONSUMERS];
325 uint32_t is_resetting;
328 STAILQ_HEAD(, nvme_request) fail_req;
331 uint64_t chatham_size;
332 uint64_t chatham_lbas;
336 #define nvme_mmio_offsetof(reg) \
337 offsetof(struct nvme_registers, reg)
339 #define nvme_mmio_read_4(sc, reg) \
340 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \
341 nvme_mmio_offsetof(reg))
343 #define nvme_mmio_write_4(sc, reg, val) \
344 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
345 nvme_mmio_offsetof(reg), val)
347 #define nvme_mmio_write_8(sc, reg, val) \
349 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
350 nvme_mmio_offsetof(reg), val & 0xFFFFFFFF); \
351 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
352 nvme_mmio_offsetof(reg)+4, \
353 (val & 0xFFFFFFFF00000000UL) >> 32); \
357 #define chatham_read_4(softc, reg) \
358 bus_space_read_4((softc)->chatham_bus_tag, \
359 (softc)->chatham_bus_handle, reg)
361 #define chatham_write_8(sc, reg, val) \
363 bus_space_write_4((sc)->chatham_bus_tag, \
364 (sc)->chatham_bus_handle, reg, val & 0xffffffff); \
365 bus_space_write_4((sc)->chatham_bus_tag, \
366 (sc)->chatham_bus_handle, reg+4, \
367 (val & 0xFFFFFFFF00000000UL) >> 32); \
370 #endif /* CHATHAM2 */
372 #if __FreeBSD_version < 800054
373 #define wmb() __asm volatile("sfence" ::: "memory")
374 #define mb() __asm volatile("mfence" ::: "memory")
377 #define nvme_printf(ctrlr, fmt, args...) \
378 device_printf(ctrlr->dev, fmt, ##args)
380 void nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg);
382 void nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,
384 nvme_cb_fn_t cb_fn, void *cb_arg);
385 void nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr,
386 uint16_t nsid, void *payload,
387 nvme_cb_fn_t cb_fn, void *cb_arg);
388 void nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr,
389 uint32_t microseconds,
393 void nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
394 struct nvme_error_information_entry *payload,
395 uint32_t num_entries, /* 0 = max */
398 void nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
400 struct nvme_health_information_page *payload,
403 void nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
404 struct nvme_firmware_page *payload,
407 void nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
408 struct nvme_qpair *io_que, uint16_t vector,
409 nvme_cb_fn_t cb_fn, void *cb_arg);
410 void nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
411 struct nvme_qpair *io_que,
412 nvme_cb_fn_t cb_fn, void *cb_arg);
413 void nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
414 struct nvme_qpair *io_que,
415 nvme_cb_fn_t cb_fn, void *cb_arg);
416 void nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
417 struct nvme_qpair *io_que,
418 nvme_cb_fn_t cb_fn, void *cb_arg);
419 void nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
420 uint32_t num_queues, nvme_cb_fn_t cb_fn,
422 void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
423 union nvme_critical_warning_state state,
424 nvme_cb_fn_t cb_fn, void *cb_arg);
425 void nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
426 uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg);
428 void nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl);
430 int nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev);
431 void nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev);
432 int nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr);
433 void nvme_ctrlr_reset(struct nvme_controller *ctrlr);
434 /* ctrlr defined as void * to allow use with config_intrhook. */
435 void nvme_ctrlr_start_config_hook(void *ctrlr_arg);
436 void nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
437 struct nvme_request *req);
438 void nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
439 struct nvme_request *req);
440 void nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
441 struct nvme_request *req);
443 void nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id,
444 uint16_t vector, uint32_t num_entries,
445 uint32_t num_trackers,
446 struct nvme_controller *ctrlr);
447 void nvme_qpair_submit_tracker(struct nvme_qpair *qpair,
448 struct nvme_tracker *tr);
449 void nvme_qpair_process_completions(struct nvme_qpair *qpair);
450 void nvme_qpair_submit_request(struct nvme_qpair *qpair,
451 struct nvme_request *req);
452 void nvme_qpair_reset(struct nvme_qpair *qpair);
453 void nvme_qpair_fail(struct nvme_qpair *qpair);
454 void nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
455 struct nvme_request *req,
456 uint32_t sct, uint32_t sc,
457 boolean_t print_on_error);
459 void nvme_admin_qpair_enable(struct nvme_qpair *qpair);
460 void nvme_admin_qpair_disable(struct nvme_qpair *qpair);
461 void nvme_admin_qpair_destroy(struct nvme_qpair *qpair);
463 void nvme_io_qpair_enable(struct nvme_qpair *qpair);
464 void nvme_io_qpair_disable(struct nvme_qpair *qpair);
465 void nvme_io_qpair_destroy(struct nvme_qpair *qpair);
467 int nvme_ns_construct(struct nvme_namespace *ns, uint16_t id,
468 struct nvme_controller *ctrlr);
469 void nvme_ns_destruct(struct nvme_namespace *ns);
471 void nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr);
473 void nvme_dump_command(struct nvme_command *cmd);
474 void nvme_dump_completion(struct nvme_completion *cpl);
477 nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
479 uint64_t *bus_addr = (uint64_t *)arg;
481 *bus_addr = seg[0].ds_addr;
484 static __inline struct nvme_request *
485 _nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg)
487 struct nvme_request *req;
489 req = uma_zalloc(nvme_request_zone, M_NOWAIT | M_ZERO);
492 req->cb_arg = cb_arg;
498 static __inline struct nvme_request *
499 nvme_allocate_request_vaddr(void *payload, uint32_t payload_size,
500 nvme_cb_fn_t cb_fn, void *cb_arg)
502 struct nvme_request *req;
504 req = _nvme_allocate_request(cb_fn, cb_arg);
506 req->type = NVME_REQUEST_VADDR;
507 req->u.payload = payload;
508 req->payload_size = payload_size;
513 static __inline struct nvme_request *
514 nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
516 struct nvme_request *req;
518 req = _nvme_allocate_request(cb_fn, cb_arg);
520 req->type = NVME_REQUEST_NULL;
524 static __inline struct nvme_request *
525 nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg)
527 struct nvme_request *req;
529 req = _nvme_allocate_request(cb_fn, cb_arg);
531 #ifdef NVME_UNMAPPED_BIO_SUPPORT
532 req->type = NVME_REQUEST_BIO;
535 req->type = NVME_REQUEST_VADDR;
536 req->u.payload = bio->bio_data;
537 req->payload_size = bio->bio_bcount;
543 #define nvme_free_request(req) uma_zfree(nvme_request_zone, req)
545 void nvme_notify_async_consumers(struct nvme_controller *ctrlr,
546 const struct nvme_completion *async_cpl,
547 uint32_t log_page_id, void *log_page_buffer,
548 uint32_t log_page_size);
549 void nvme_notify_fail_consumers(struct nvme_controller *ctrlr);
551 #endif /* __NVME_PRIVATE_H__ */