2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2012-2014 Intel Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #ifndef __NVME_PRIVATE_H__
32 #define __NVME_PRIVATE_H__
34 #include <sys/param.h>
37 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
43 #include <sys/systm.h>
44 #include <sys/taskqueue.h>
48 #include <machine/bus.h>
52 #define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev))
54 MALLOC_DECLARE(M_NVME);
56 #define IDT32_PCI_ID 0x80d0111d /* 32 channel board */
57 #define IDT8_PCI_ID 0x80d2111d /* 8 channel board */
60 * For commands requiring more than 2 PRP entries, one PRP will be
61 * embedded in the command (prp1), and the rest of the PRP entries
62 * will be in a list pointed to by the command (prp2). This means
63 * that real max number of PRP entries we support is 32+1, which
64 * results in a max xfer size of 32*PAGE_SIZE.
66 #define NVME_MAX_PRP_LIST_ENTRIES (NVME_MAX_XFER_SIZE / PAGE_SIZE)
68 #define NVME_ADMIN_TRACKERS (16)
69 #define NVME_ADMIN_ENTRIES (128)
70 /* min and max are defined in admin queue attributes section of spec */
71 #define NVME_MIN_ADMIN_ENTRIES (2)
72 #define NVME_MAX_ADMIN_ENTRIES (4096)
75 * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion
76 * queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we
77 * will allow outstanding on an I/O qpair at any time. The only advantage in
78 * having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping
79 * the contents of the submission and completion queues, it will show a longer
82 #define NVME_IO_ENTRIES (256)
83 #define NVME_IO_TRACKERS (128)
84 #define NVME_MIN_IO_TRACKERS (4)
85 #define NVME_MAX_IO_TRACKERS (1024)
88 * NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES
89 * for each controller.
92 #define NVME_INT_COAL_TIME (0) /* disabled */
93 #define NVME_INT_COAL_THRESHOLD (0) /* 0-based */
95 #define NVME_MAX_NAMESPACES (16)
96 #define NVME_MAX_CONSUMERS (2)
97 #define NVME_MAX_ASYNC_EVENTS (8)
99 #define NVME_DEFAULT_TIMEOUT_PERIOD (30) /* in seconds */
100 #define NVME_MIN_TIMEOUT_PERIOD (5)
101 #define NVME_MAX_TIMEOUT_PERIOD (120)
103 #define NVME_DEFAULT_RETRY_COUNT (4)
105 /* Maximum log page size to fetch for AERs. */
106 #define NVME_MAX_AER_LOG_SIZE (4096)
109 * Define CACHE_LINE_SIZE here for older FreeBSD versions that do not define
112 #ifndef CACHE_LINE_SIZE
113 #define CACHE_LINE_SIZE (64)
116 extern uma_zone_t nvme_request_zone;
117 extern int32_t nvme_retry_count;
118 extern bool nvme_verbose_cmd_dump;
120 struct nvme_completion_poll_status {
122 struct nvme_completion cpl;
126 extern devclass_t nvme_devclass;
128 #define NVME_REQUEST_VADDR 1
129 #define NVME_REQUEST_NULL 2 /* For requests with no payload. */
130 #define NVME_REQUEST_UIO 3
131 #define NVME_REQUEST_BIO 4
132 #define NVME_REQUEST_CCB 5
134 struct nvme_request {
136 struct nvme_command cmd;
137 struct nvme_qpair *qpair;
143 uint32_t payload_size;
148 STAILQ_ENTRY(nvme_request) stailq;
151 struct nvme_async_event_request {
153 struct nvme_controller *ctrlr;
154 struct nvme_request *req;
155 struct nvme_completion cpl;
156 uint32_t log_page_id;
157 uint32_t log_page_size;
158 uint8_t log_page_buffer[NVME_MAX_AER_LOG_SIZE];
161 struct nvme_tracker {
163 TAILQ_ENTRY(nvme_tracker) tailq;
164 struct nvme_request *req;
165 struct nvme_qpair *qpair;
166 struct callout timer;
167 bus_dmamap_t payload_dma_map;
171 bus_addr_t prp_bus_addr;
176 struct nvme_controller *ctrlr;
183 struct resource *res;
186 uint32_t num_entries;
187 uint32_t num_trackers;
188 uint32_t sq_tdbl_off;
189 uint32_t cq_hdbl_off;
197 int64_t num_intr_handler_calls;
199 int64_t num_failures;
201 struct nvme_command *cmd;
202 struct nvme_completion *cpl;
204 bus_dma_tag_t dma_tag;
205 bus_dma_tag_t dma_tag_payload;
207 bus_dmamap_t queuemem_map;
208 uint64_t cmd_bus_addr;
209 uint64_t cpl_bus_addr;
211 TAILQ_HEAD(, nvme_tracker) free_tr;
212 TAILQ_HEAD(, nvme_tracker) outstanding_tr;
213 STAILQ_HEAD(, nvme_request) queued_req;
215 struct nvme_tracker **act_tr;
219 struct mtx lock __aligned(CACHE_LINE_SIZE);
221 } __aligned(CACHE_LINE_SIZE);
223 struct nvme_namespace {
225 struct nvme_controller *ctrlr;
226 struct nvme_namespace_data data;
230 void *cons_cookie[NVME_MAX_CONSUMERS];
236 * One of these per allocated PCI device.
238 struct nvme_controller {
244 uint32_t ready_timeout_in_ms;
246 #define QUIRK_DELAY_B4_CHK_RDY 1 /* Can't touch MMIO on disable */
247 #define QUIRK_DISABLE_TIMEOUT 2 /* Disable broken completion timeout feature */
249 bus_space_tag_t bus_tag;
250 bus_space_handle_t bus_handle;
252 struct resource *resource;
255 * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5,
256 * separate from the control registers which are in BAR 0/1. These
257 * members track the mapping of BAR 4/5 for that reason.
259 int bar4_resource_id;
260 struct resource *bar4_resource;
262 uint32_t msix_enabled;
263 uint32_t enable_aborts;
265 uint32_t num_io_queues;
266 uint32_t max_hw_pend_io;
268 /* Fields for tracking progress during controller initialization. */
269 struct intr_config_hook config_hook;
270 uint32_t ns_identified;
271 uint32_t queues_created;
273 struct task reset_task;
274 struct task fail_req_task;
275 struct taskqueue *taskqueue;
277 /* For shared legacy interrupt. */
279 struct resource *res;
282 /** maximum i/o size in bytes */
283 uint32_t max_xfer_size;
285 /** minimum page size supported by this controller in bytes */
286 uint32_t min_page_size;
288 /** interrupt coalescing time period (in microseconds) */
289 uint32_t int_coal_time;
291 /** interrupt coalescing threshold */
292 uint32_t int_coal_threshold;
294 /** timeout period in seconds */
295 uint32_t timeout_period;
297 /** doorbell stride */
300 struct nvme_qpair adminq;
301 struct nvme_qpair *ioq;
303 struct nvme_registers *regs;
305 struct nvme_controller_data cdata;
306 struct nvme_namespace ns[NVME_MAX_NAMESPACES];
310 /** bit mask of event types currently enabled for async events */
311 uint32_t async_event_config;
314 struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS];
316 void *cons_cookie[NVME_MAX_CONSUMERS];
318 uint32_t is_resetting;
319 uint32_t is_initialized;
320 uint32_t notification_sent;
323 STAILQ_HEAD(, nvme_request) fail_req;
325 /* Host Memory Buffer */
328 bus_dma_tag_t hmb_tag;
329 struct nvme_hmb_chunk {
330 bus_dmamap_t hmbc_map;
334 bus_dma_tag_t hmb_desc_tag;
335 bus_dmamap_t hmb_desc_map;
336 struct nvme_hmb_desc *hmb_desc_vaddr;
337 uint64_t hmb_desc_paddr;
340 #define nvme_mmio_offsetof(reg) \
341 offsetof(struct nvme_registers, reg)
343 #define nvme_mmio_read_4(sc, reg) \
344 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \
345 nvme_mmio_offsetof(reg))
347 #define nvme_mmio_write_4(sc, reg, val) \
348 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
349 nvme_mmio_offsetof(reg), val)
351 #define nvme_mmio_write_8(sc, reg, val) \
353 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
354 nvme_mmio_offsetof(reg), val & 0xFFFFFFFF); \
355 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
356 nvme_mmio_offsetof(reg)+4, \
357 (val & 0xFFFFFFFF00000000ULL) >> 32); \
360 #define nvme_printf(ctrlr, fmt, args...) \
361 device_printf(ctrlr->dev, fmt, ##args)
363 void nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg);
365 void nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,
367 nvme_cb_fn_t cb_fn, void *cb_arg);
368 void nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr,
369 uint32_t nsid, void *payload,
370 nvme_cb_fn_t cb_fn, void *cb_arg);
371 void nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr,
372 uint32_t microseconds,
376 void nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
377 struct nvme_error_information_entry *payload,
378 uint32_t num_entries, /* 0 = max */
381 void nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
383 struct nvme_health_information_page *payload,
386 void nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
387 struct nvme_firmware_page *payload,
390 void nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
391 struct nvme_qpair *io_que,
392 nvme_cb_fn_t cb_fn, void *cb_arg);
393 void nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
394 struct nvme_qpair *io_que,
395 nvme_cb_fn_t cb_fn, void *cb_arg);
396 void nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
397 struct nvme_qpair *io_que,
398 nvme_cb_fn_t cb_fn, void *cb_arg);
399 void nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
400 struct nvme_qpair *io_que,
401 nvme_cb_fn_t cb_fn, void *cb_arg);
402 void nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
403 uint32_t num_queues, nvme_cb_fn_t cb_fn,
405 void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
407 nvme_cb_fn_t cb_fn, void *cb_arg);
408 void nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
409 uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg);
411 void nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl);
413 int nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev);
414 void nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev);
415 void nvme_ctrlr_shutdown(struct nvme_controller *ctrlr);
416 int nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr);
417 void nvme_ctrlr_reset(struct nvme_controller *ctrlr);
418 /* ctrlr defined as void * to allow use with config_intrhook. */
419 void nvme_ctrlr_start_config_hook(void *ctrlr_arg);
420 void nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
421 struct nvme_request *req);
422 void nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
423 struct nvme_request *req);
424 void nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
425 struct nvme_request *req);
427 int nvme_qpair_construct(struct nvme_qpair *qpair,
428 uint32_t num_entries, uint32_t num_trackers,
429 struct nvme_controller *ctrlr);
430 void nvme_qpair_submit_tracker(struct nvme_qpair *qpair,
431 struct nvme_tracker *tr);
432 bool nvme_qpair_process_completions(struct nvme_qpair *qpair);
433 void nvme_qpair_submit_request(struct nvme_qpair *qpair,
434 struct nvme_request *req);
435 void nvme_qpair_reset(struct nvme_qpair *qpair);
436 void nvme_qpair_fail(struct nvme_qpair *qpair);
437 void nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
438 struct nvme_request *req,
439 uint32_t sct, uint32_t sc);
441 void nvme_admin_qpair_enable(struct nvme_qpair *qpair);
442 void nvme_admin_qpair_disable(struct nvme_qpair *qpair);
443 void nvme_admin_qpair_destroy(struct nvme_qpair *qpair);
445 void nvme_io_qpair_enable(struct nvme_qpair *qpair);
446 void nvme_io_qpair_disable(struct nvme_qpair *qpair);
447 void nvme_io_qpair_destroy(struct nvme_qpair *qpair);
449 int nvme_ns_construct(struct nvme_namespace *ns, uint32_t id,
450 struct nvme_controller *ctrlr);
451 void nvme_ns_destruct(struct nvme_namespace *ns);
453 void nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr);
455 void nvme_dump_command(struct nvme_command *cmd);
456 void nvme_dump_completion(struct nvme_completion *cpl);
458 int nvme_attach(device_t dev);
459 int nvme_shutdown(device_t dev);
460 int nvme_detach(device_t dev);
463 * Wait for a command to complete using the nvme_completion_poll_cb.
464 * Used in limited contexts where the caller knows it's OK to block
465 * briefly while the command runs. The ISR will run the callback which
466 * will set status->done to true, usually within microseconds. If not,
467 * then after one second timeout handler should reset the controller
468 * and abort all outstanding requests including this polled one. If
469 * still not after ten seconds, then something is wrong with the driver,
470 * and panic is the only way to recover.
474 nvme_completion_poll(struct nvme_completion_poll_status *status)
476 int sanity = hz * 10;
478 while (!atomic_load_acq_int(&status->done) && --sanity > 0)
481 panic("NVME polled command failed to complete within 10s.");
485 nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
487 uint64_t *bus_addr = (uint64_t *)arg;
490 printf("nvme_single_map err %d\n", error);
491 *bus_addr = seg[0].ds_addr;
494 static __inline struct nvme_request *
495 _nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg)
497 struct nvme_request *req;
499 req = uma_zalloc(nvme_request_zone, M_NOWAIT | M_ZERO);
502 req->cb_arg = cb_arg;
508 static __inline struct nvme_request *
509 nvme_allocate_request_vaddr(void *payload, uint32_t payload_size,
510 nvme_cb_fn_t cb_fn, void *cb_arg)
512 struct nvme_request *req;
514 req = _nvme_allocate_request(cb_fn, cb_arg);
516 req->type = NVME_REQUEST_VADDR;
517 req->u.payload = payload;
518 req->payload_size = payload_size;
523 static __inline struct nvme_request *
524 nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
526 struct nvme_request *req;
528 req = _nvme_allocate_request(cb_fn, cb_arg);
530 req->type = NVME_REQUEST_NULL;
534 static __inline struct nvme_request *
535 nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg)
537 struct nvme_request *req;
539 req = _nvme_allocate_request(cb_fn, cb_arg);
541 req->type = NVME_REQUEST_BIO;
547 static __inline struct nvme_request *
548 nvme_allocate_request_ccb(union ccb *ccb, nvme_cb_fn_t cb_fn, void *cb_arg)
550 struct nvme_request *req;
552 req = _nvme_allocate_request(cb_fn, cb_arg);
554 req->type = NVME_REQUEST_CCB;
555 req->u.payload = ccb;
561 #define nvme_free_request(req) uma_zfree(nvme_request_zone, req)
563 void nvme_notify_async_consumers(struct nvme_controller *ctrlr,
564 const struct nvme_completion *async_cpl,
565 uint32_t log_page_id, void *log_page_buffer,
566 uint32_t log_page_size);
567 void nvme_notify_fail_consumers(struct nvme_controller *ctrlr);
568 void nvme_notify_new_controller(struct nvme_controller *ctrlr);
569 void nvme_notify_ns(struct nvme_controller *ctrlr, int nsid);
571 void nvme_ctrlr_intx_handler(void *arg);
572 void nvme_ctrlr_poll(struct nvme_controller *ctrlr);
574 int nvme_ctrlr_suspend(struct nvme_controller *ctrlr);
575 int nvme_ctrlr_resume(struct nvme_controller *ctrlr);
577 #endif /* __NVME_PRIVATE_H__ */