2 * Copyright (C) 2012 Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #ifndef __NVME_PRIVATE_H__
30 #define __NVME_PRIVATE_H__
32 #include <sys/param.h>
35 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/mutex.h>
40 #include <sys/systm.h>
41 #include <sys/taskqueue.h>
45 #include <machine/bus.h>
49 #define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev))
51 MALLOC_DECLARE(M_NVME);
56 #define CHATHAM_PCI_ID 0x20118086
57 #define CHATHAM_CONTROL_BAR 0
60 #define IDT32_PCI_ID 0x80d0111d /* 32 channel board */
61 #define IDT8_PCI_ID 0x80d2111d /* 8 channel board */
63 #define NVME_MAX_PRP_LIST_ENTRIES (32)
66 * For commands requiring more than 2 PRP entries, one PRP will be
67 * embedded in the command (prp1), and the rest of the PRP entries
68 * will be in a list pointed to by the command (prp2). This means
69 * that real max number of PRP entries we support is 32+1, which
70 * results in a max xfer size of 32*PAGE_SIZE.
72 #define NVME_MAX_XFER_SIZE NVME_MAX_PRP_LIST_ENTRIES * PAGE_SIZE
74 #define NVME_ADMIN_TRACKERS (16)
75 #define NVME_ADMIN_ENTRIES (128)
76 /* min and max are defined in admin queue attributes section of spec */
77 #define NVME_MIN_ADMIN_ENTRIES (2)
78 #define NVME_MAX_ADMIN_ENTRIES (4096)
81 * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion
82 * queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we
83 * will allow outstanding on an I/O qpair at any time. The only advantage in
84 * having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping
85 * the contents of the submission and completion queues, it will show a longer
88 #define NVME_IO_ENTRIES (256)
89 #define NVME_IO_TRACKERS (128)
90 #define NVME_MIN_IO_TRACKERS (4)
91 #define NVME_MAX_IO_TRACKERS (1024)
94 * NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES
95 * for each controller.
98 #define NVME_INT_COAL_TIME (0) /* disabled */
99 #define NVME_INT_COAL_THRESHOLD (0) /* 0-based */
101 #define NVME_MAX_NAMESPACES (16)
102 #define NVME_MAX_CONSUMERS (2)
103 #define NVME_MAX_ASYNC_EVENTS (8)
105 #define NVME_DEFAULT_TIMEOUT_PERIOD (30) /* in seconds */
106 #define NVME_MIN_TIMEOUT_PERIOD (5)
107 #define NVME_MAX_TIMEOUT_PERIOD (120)
109 #define NVME_DEFAULT_RETRY_COUNT (4)
111 /* Maximum log page size to fetch for AERs. */
112 #define NVME_MAX_AER_LOG_SIZE (4096)
114 #ifndef CACHE_LINE_SIZE
115 #define CACHE_LINE_SIZE (64)
119 * Use presence of the BIO_UNMAPPED flag to determine whether unmapped I/O
120 * support and the bus_dmamap_load_bio API are available on the target
121 * kernel. This will ease porting back to earlier stable branches at a
125 #define NVME_UNMAPPED_BIO_SUPPORT
128 extern uma_zone_t nvme_request_zone;
129 extern int32_t nvme_retry_count;
131 struct nvme_completion_poll_status {
133 struct nvme_completion cpl;
137 #define NVME_REQUEST_VADDR 1
138 #define NVME_REQUEST_NULL 2 /* For requests with no payload. */
139 #define NVME_REQUEST_UIO 3
140 #ifdef NVME_UNMAPPED_BIO_SUPPORT
141 #define NVME_REQUEST_BIO 4
144 struct nvme_request {
146 struct nvme_command cmd;
147 struct nvme_qpair *qpair;
154 uint32_t payload_size;
159 STAILQ_ENTRY(nvme_request) stailq;
162 struct nvme_async_event_request {
164 struct nvme_controller *ctrlr;
165 struct nvme_request *req;
166 struct nvme_completion cpl;
167 uint32_t log_page_id;
168 uint32_t log_page_size;
169 uint8_t log_page_buffer[NVME_MAX_AER_LOG_SIZE];
172 struct nvme_tracker {
174 TAILQ_ENTRY(nvme_tracker) tailq;
175 struct nvme_request *req;
176 struct nvme_qpair *qpair;
177 struct callout timer;
178 bus_dmamap_t payload_dma_map;
181 uint64_t prp[NVME_MAX_PRP_LIST_ENTRIES];
182 bus_addr_t prp_bus_addr;
183 bus_dmamap_t prp_dma_map;
188 struct nvme_controller *ctrlr;
194 struct resource *res;
197 uint32_t max_xfer_size;
198 uint32_t num_entries;
199 uint32_t num_trackers;
200 uint32_t sq_tdbl_off;
201 uint32_t cq_hdbl_off;
208 int64_t num_intr_handler_calls;
210 struct nvme_command *cmd;
211 struct nvme_completion *cpl;
213 bus_dma_tag_t dma_tag;
215 bus_dmamap_t cmd_dma_map;
216 uint64_t cmd_bus_addr;
218 bus_dmamap_t cpl_dma_map;
219 uint64_t cpl_bus_addr;
221 TAILQ_HEAD(, nvme_tracker) free_tr;
222 TAILQ_HEAD(, nvme_tracker) outstanding_tr;
223 STAILQ_HEAD(, nvme_request) queued_req;
225 struct nvme_tracker **act_tr;
227 boolean_t is_enabled;
229 struct mtx lock __aligned(CACHE_LINE_SIZE);
231 } __aligned(CACHE_LINE_SIZE);
233 struct nvme_namespace {
235 struct nvme_controller *ctrlr;
236 struct nvme_namespace_data data;
240 void *cons_cookie[NVME_MAX_CONSUMERS];
245 * One of these per allocated PCI device.
247 struct nvme_controller {
253 uint32_t ready_timeout_in_ms;
255 bus_space_tag_t bus_tag;
256 bus_space_handle_t bus_handle;
258 struct resource *resource;
261 * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5,
262 * separate from the control registers which are in BAR 0/1. These
263 * members track the mapping of BAR 4/5 for that reason.
265 int bar4_resource_id;
266 struct resource *bar4_resource;
269 bus_space_tag_t chatham_bus_tag;
270 bus_space_handle_t chatham_bus_handle;
271 int chatham_resource_id;
272 struct resource *chatham_resource;
275 uint32_t msix_enabled;
277 uint32_t enable_aborts;
279 uint32_t num_io_queues;
280 boolean_t per_cpu_io_queues;
282 /* Fields for tracking progress during controller initialization. */
283 struct intr_config_hook config_hook;
284 uint32_t ns_identified;
285 uint32_t queues_created;
287 struct task reset_task;
288 struct task fail_req_task;
289 struct taskqueue *taskqueue;
291 /* For shared legacy interrupt. */
293 struct resource *res;
296 bus_dma_tag_t hw_desc_tag;
297 bus_dmamap_t hw_desc_map;
299 /** maximum i/o size in bytes */
300 uint32_t max_xfer_size;
302 /** minimum page size supported by this controller in bytes */
303 uint32_t min_page_size;
305 /** interrupt coalescing time period (in microseconds) */
306 uint32_t int_coal_time;
308 /** interrupt coalescing threshold */
309 uint32_t int_coal_threshold;
311 /** timeout period in seconds */
312 uint32_t timeout_period;
314 struct nvme_qpair adminq;
315 struct nvme_qpair *ioq;
317 struct nvme_registers *regs;
319 struct nvme_controller_data cdata;
320 struct nvme_namespace ns[NVME_MAX_NAMESPACES];
325 struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS];
327 void *cons_cookie[NVME_MAX_CONSUMERS];
329 uint32_t is_resetting;
332 STAILQ_HEAD(, nvme_request) fail_req;
335 uint64_t chatham_size;
336 uint64_t chatham_lbas;
340 #define nvme_mmio_offsetof(reg) \
341 offsetof(struct nvme_registers, reg)
343 #define nvme_mmio_read_4(sc, reg) \
344 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \
345 nvme_mmio_offsetof(reg))
347 #define nvme_mmio_write_4(sc, reg, val) \
348 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
349 nvme_mmio_offsetof(reg), val)
351 #define nvme_mmio_write_8(sc, reg, val) \
353 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
354 nvme_mmio_offsetof(reg), val & 0xFFFFFFFF); \
355 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
356 nvme_mmio_offsetof(reg)+4, \
357 (val & 0xFFFFFFFF00000000UL) >> 32); \
361 #define chatham_read_4(softc, reg) \
362 bus_space_read_4((softc)->chatham_bus_tag, \
363 (softc)->chatham_bus_handle, reg)
365 #define chatham_write_8(sc, reg, val) \
367 bus_space_write_4((sc)->chatham_bus_tag, \
368 (sc)->chatham_bus_handle, reg, val & 0xffffffff); \
369 bus_space_write_4((sc)->chatham_bus_tag, \
370 (sc)->chatham_bus_handle, reg+4, \
371 (val & 0xFFFFFFFF00000000UL) >> 32); \
374 #endif /* CHATHAM2 */
376 #if __FreeBSD_version < 800054
377 #define wmb() __asm volatile("sfence" ::: "memory")
378 #define mb() __asm volatile("mfence" ::: "memory")
381 #define nvme_printf(ctrlr, fmt, args...) \
382 device_printf(ctrlr->dev, fmt, ##args)
384 void nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg);
386 void nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,
388 nvme_cb_fn_t cb_fn, void *cb_arg);
389 void nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr,
390 uint16_t nsid, void *payload,
391 nvme_cb_fn_t cb_fn, void *cb_arg);
392 void nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr,
393 uint32_t microseconds,
397 void nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
398 struct nvme_error_information_entry *payload,
399 uint32_t num_entries, /* 0 = max */
402 void nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
404 struct nvme_health_information_page *payload,
407 void nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
408 struct nvme_firmware_page *payload,
411 void nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
412 struct nvme_qpair *io_que, uint16_t vector,
413 nvme_cb_fn_t cb_fn, void *cb_arg);
414 void nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
415 struct nvme_qpair *io_que,
416 nvme_cb_fn_t cb_fn, void *cb_arg);
417 void nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
418 struct nvme_qpair *io_que,
419 nvme_cb_fn_t cb_fn, void *cb_arg);
420 void nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
421 struct nvme_qpair *io_que,
422 nvme_cb_fn_t cb_fn, void *cb_arg);
423 void nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
424 uint32_t num_queues, nvme_cb_fn_t cb_fn,
426 void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
427 union nvme_critical_warning_state state,
428 nvme_cb_fn_t cb_fn, void *cb_arg);
429 void nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
430 uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg);
432 void nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg,
434 void nvme_payload_map_uio(void *arg, bus_dma_segment_t *seg, int nseg,
435 bus_size_t mapsize, int error);
436 void nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl);
438 int nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev);
439 void nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev);
440 int nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr);
441 void nvme_ctrlr_reset(struct nvme_controller *ctrlr);
442 /* ctrlr defined as void * to allow use with config_intrhook. */
443 void nvme_ctrlr_start_config_hook(void *ctrlr_arg);
444 void nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
445 struct nvme_request *req);
446 void nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
447 struct nvme_request *req);
448 void nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
449 struct nvme_request *req);
451 void nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id,
452 uint16_t vector, uint32_t num_entries,
453 uint32_t num_trackers, uint32_t max_xfer_size,
454 struct nvme_controller *ctrlr);
455 void nvme_qpair_submit_tracker(struct nvme_qpair *qpair,
456 struct nvme_tracker *tr);
457 void nvme_qpair_process_completions(struct nvme_qpair *qpair);
458 void nvme_qpair_submit_request(struct nvme_qpair *qpair,
459 struct nvme_request *req);
460 void nvme_qpair_reset(struct nvme_qpair *qpair);
461 void nvme_qpair_fail(struct nvme_qpair *qpair);
462 void nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
463 struct nvme_request *req,
464 uint32_t sct, uint32_t sc,
465 boolean_t print_on_error);
467 void nvme_admin_qpair_enable(struct nvme_qpair *qpair);
468 void nvme_admin_qpair_disable(struct nvme_qpair *qpair);
469 void nvme_admin_qpair_destroy(struct nvme_qpair *qpair);
471 void nvme_io_qpair_enable(struct nvme_qpair *qpair);
472 void nvme_io_qpair_disable(struct nvme_qpair *qpair);
473 void nvme_io_qpair_destroy(struct nvme_qpair *qpair);
475 int nvme_ns_construct(struct nvme_namespace *ns, uint16_t id,
476 struct nvme_controller *ctrlr);
477 void nvme_ns_destruct(struct nvme_namespace *ns);
479 int nvme_ns_physio(struct cdev *dev, struct uio *uio, int ioflag);
481 void nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr);
483 void nvme_dump_command(struct nvme_command *cmd);
484 void nvme_dump_completion(struct nvme_completion *cpl);
487 nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
489 uint64_t *bus_addr = (uint64_t *)arg;
491 *bus_addr = seg[0].ds_addr;
494 static __inline struct nvme_request *
495 _nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg)
497 struct nvme_request *req;
499 req = uma_zalloc(nvme_request_zone, M_NOWAIT | M_ZERO);
502 req->cb_arg = cb_arg;
508 static __inline struct nvme_request *
509 nvme_allocate_request_vaddr(void *payload, uint32_t payload_size,
510 nvme_cb_fn_t cb_fn, void *cb_arg)
512 struct nvme_request *req;
514 req = _nvme_allocate_request(cb_fn, cb_arg);
516 req->type = NVME_REQUEST_VADDR;
517 req->u.payload = payload;
518 req->payload_size = payload_size;
523 static __inline struct nvme_request *
524 nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
526 struct nvme_request *req;
528 req = _nvme_allocate_request(cb_fn, cb_arg);
530 req->type = NVME_REQUEST_NULL;
534 static __inline struct nvme_request *
535 nvme_allocate_request_uio(struct uio *uio, nvme_cb_fn_t cb_fn, void *cb_arg)
537 struct nvme_request *req;
539 req = _nvme_allocate_request(cb_fn, cb_arg);
541 req->type = NVME_REQUEST_UIO;
547 static __inline struct nvme_request *
548 nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg)
550 struct nvme_request *req;
552 req = _nvme_allocate_request(cb_fn, cb_arg);
554 #ifdef NVME_UNMAPPED_BIO_SUPPORT
555 req->type = NVME_REQUEST_BIO;
558 req->type = NVME_REQUEST_VADDR;
559 req->u.payload = bio->bio_data;
560 req->payload_size = bio->bio_bcount;
566 #define nvme_free_request(req) uma_zfree(nvme_request_zone, req)
568 void nvme_notify_async_consumers(struct nvme_controller *ctrlr,
569 const struct nvme_completion *async_cpl,
570 uint32_t log_page_id, void *log_page_buffer,
571 uint32_t log_page_size);
572 void nvme_notify_fail_consumers(struct nvme_controller *ctrlr);
574 #endif /* __NVME_PRIVATE_H__ */