2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2012-2014 Intel Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #ifndef __NVME_PRIVATE_H__
32 #define __NVME_PRIVATE_H__
34 #include <sys/param.h>
37 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
43 #include <sys/systm.h>
44 #include <sys/taskqueue.h>
48 #include <machine/bus.h>
52 #define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev))
54 MALLOC_DECLARE(M_NVME);
56 #define IDT32_PCI_ID 0x80d0111d /* 32 channel board */
57 #define IDT8_PCI_ID 0x80d2111d /* 8 channel board */
60 * For commands requiring more than 2 PRP entries, one PRP will be
61 * embedded in the command (prp1), and the rest of the PRP entries
62 * will be in a list pointed to by the command (prp2). This means
63 * that real max number of PRP entries we support is 32+1, which
64 * results in a max xfer size of 32*PAGE_SIZE.
66 #define NVME_MAX_PRP_LIST_ENTRIES (NVME_MAX_XFER_SIZE / PAGE_SIZE)
68 #define NVME_ADMIN_TRACKERS (16)
69 #define NVME_ADMIN_ENTRIES (128)
70 /* min and max are defined in admin queue attributes section of spec */
71 #define NVME_MIN_ADMIN_ENTRIES (2)
72 #define NVME_MAX_ADMIN_ENTRIES (4096)
75 * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion
76 * queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we
77 * will allow outstanding on an I/O qpair at any time. The only advantage in
78 * having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping
79 * the contents of the submission and completion queues, it will show a longer
82 #define NVME_IO_ENTRIES (256)
83 #define NVME_IO_TRACKERS (128)
84 #define NVME_MIN_IO_TRACKERS (4)
85 #define NVME_MAX_IO_TRACKERS (1024)
88 * NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES
89 * for each controller.
92 #define NVME_INT_COAL_TIME (0) /* disabled */
93 #define NVME_INT_COAL_THRESHOLD (0) /* 0-based */
95 #define NVME_MAX_NAMESPACES (16)
96 #define NVME_MAX_CONSUMERS (2)
97 #define NVME_MAX_ASYNC_EVENTS (8)
99 #define NVME_DEFAULT_TIMEOUT_PERIOD (30) /* in seconds */
100 #define NVME_MIN_TIMEOUT_PERIOD (5)
101 #define NVME_MAX_TIMEOUT_PERIOD (120)
103 #define NVME_DEFAULT_RETRY_COUNT (4)
105 /* Maximum log page size to fetch for AERs. */
106 #define NVME_MAX_AER_LOG_SIZE (4096)
109 * Define CACHE_LINE_SIZE here for older FreeBSD versions that do not define
112 #ifndef CACHE_LINE_SIZE
113 #define CACHE_LINE_SIZE (64)
116 extern int32_t nvme_retry_count;
117 extern bool nvme_verbose_cmd_dump;
119 struct nvme_completion_poll_status {
120 struct nvme_completion cpl;
124 extern devclass_t nvme_devclass;
126 #define NVME_REQUEST_VADDR 1
127 #define NVME_REQUEST_NULL 2 /* For requests with no payload. */
128 #define NVME_REQUEST_UIO 3
129 #define NVME_REQUEST_BIO 4
130 #define NVME_REQUEST_CCB 5
132 struct nvme_request {
133 struct nvme_command cmd;
134 struct nvme_qpair *qpair;
140 uint32_t payload_size;
145 STAILQ_ENTRY(nvme_request) stailq;
148 struct nvme_async_event_request {
149 struct nvme_controller *ctrlr;
150 struct nvme_request *req;
151 struct nvme_completion cpl;
152 uint32_t log_page_id;
153 uint32_t log_page_size;
154 uint8_t log_page_buffer[NVME_MAX_AER_LOG_SIZE];
157 struct nvme_tracker {
158 TAILQ_ENTRY(nvme_tracker) tailq;
159 struct nvme_request *req;
160 struct nvme_qpair *qpair;
161 struct callout timer;
162 bus_dmamap_t payload_dma_map;
166 bus_addr_t prp_bus_addr;
170 struct nvme_controller *ctrlr;
177 struct resource *res;
180 uint32_t num_entries;
181 uint32_t num_trackers;
182 uint32_t sq_tdbl_off;
183 uint32_t cq_hdbl_off;
191 int64_t num_intr_handler_calls;
193 int64_t num_failures;
195 struct nvme_command *cmd;
196 struct nvme_completion *cpl;
198 bus_dma_tag_t dma_tag;
199 bus_dma_tag_t dma_tag_payload;
201 bus_dmamap_t queuemem_map;
202 uint64_t cmd_bus_addr;
203 uint64_t cpl_bus_addr;
205 TAILQ_HEAD(, nvme_tracker) free_tr;
206 TAILQ_HEAD(, nvme_tracker) outstanding_tr;
207 STAILQ_HEAD(, nvme_request) queued_req;
209 struct nvme_tracker **act_tr;
213 struct mtx lock __aligned(CACHE_LINE_SIZE);
215 } __aligned(CACHE_LINE_SIZE);
217 struct nvme_namespace {
218 struct nvme_controller *ctrlr;
219 struct nvme_namespace_data data;
223 void *cons_cookie[NVME_MAX_CONSUMERS];
229 * One of these per allocated PCI device.
231 struct nvme_controller {
236 uint32_t ready_timeout_in_ms;
238 #define QUIRK_DELAY_B4_CHK_RDY 1 /* Can't touch MMIO on disable */
239 #define QUIRK_DISABLE_TIMEOUT 2 /* Disable broken completion timeout feature */
241 bus_space_tag_t bus_tag;
242 bus_space_handle_t bus_handle;
244 struct resource *resource;
247 * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5,
248 * separate from the control registers which are in BAR 0/1. These
249 * members track the mapping of BAR 4/5 for that reason.
251 int bar4_resource_id;
252 struct resource *bar4_resource;
254 uint32_t msix_enabled;
255 uint32_t enable_aborts;
257 uint32_t num_io_queues;
258 uint32_t max_hw_pend_io;
260 /* Fields for tracking progress during controller initialization. */
261 struct intr_config_hook config_hook;
262 uint32_t ns_identified;
263 uint32_t queues_created;
265 struct task reset_task;
266 struct task fail_req_task;
267 struct taskqueue *taskqueue;
269 /* For shared legacy interrupt. */
271 struct resource *res;
274 /** maximum i/o size in bytes */
275 uint32_t max_xfer_size;
277 /** minimum page size supported by this controller in bytes */
278 uint32_t min_page_size;
280 /** interrupt coalescing time period (in microseconds) */
281 uint32_t int_coal_time;
283 /** interrupt coalescing threshold */
284 uint32_t int_coal_threshold;
286 /** timeout period in seconds */
287 uint32_t timeout_period;
289 /** doorbell stride */
292 struct nvme_qpair adminq;
293 struct nvme_qpair *ioq;
295 struct nvme_registers *regs;
297 struct nvme_controller_data cdata;
298 struct nvme_namespace ns[NVME_MAX_NAMESPACES];
302 /** bit mask of event types currently enabled for async events */
303 uint32_t async_event_config;
306 struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS];
308 void *cons_cookie[NVME_MAX_CONSUMERS];
310 uint32_t is_resetting;
311 uint32_t is_initialized;
312 uint32_t notification_sent;
315 STAILQ_HEAD(, nvme_request) fail_req;
317 /* Host Memory Buffer */
320 bus_dma_tag_t hmb_tag;
321 struct nvme_hmb_chunk {
322 bus_dmamap_t hmbc_map;
326 bus_dma_tag_t hmb_desc_tag;
327 bus_dmamap_t hmb_desc_map;
328 struct nvme_hmb_desc *hmb_desc_vaddr;
329 uint64_t hmb_desc_paddr;
332 #define nvme_mmio_offsetof(reg) \
333 offsetof(struct nvme_registers, reg)
335 #define nvme_mmio_read_4(sc, reg) \
336 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \
337 nvme_mmio_offsetof(reg))
339 #define nvme_mmio_write_4(sc, reg, val) \
340 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
341 nvme_mmio_offsetof(reg), val)
343 #define nvme_mmio_write_8(sc, reg, val) \
345 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
346 nvme_mmio_offsetof(reg), val & 0xFFFFFFFF); \
347 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
348 nvme_mmio_offsetof(reg)+4, \
349 (val & 0xFFFFFFFF00000000ULL) >> 32); \
352 #define nvme_printf(ctrlr, fmt, args...) \
353 device_printf(ctrlr->dev, fmt, ##args)
355 void nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg);
357 void nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,
359 nvme_cb_fn_t cb_fn, void *cb_arg);
360 void nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr,
361 uint32_t nsid, void *payload,
362 nvme_cb_fn_t cb_fn, void *cb_arg);
363 void nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr,
364 uint32_t microseconds,
368 void nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
369 struct nvme_error_information_entry *payload,
370 uint32_t num_entries, /* 0 = max */
373 void nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
375 struct nvme_health_information_page *payload,
378 void nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
379 struct nvme_firmware_page *payload,
382 void nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
383 struct nvme_qpair *io_que,
384 nvme_cb_fn_t cb_fn, void *cb_arg);
385 void nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
386 struct nvme_qpair *io_que,
387 nvme_cb_fn_t cb_fn, void *cb_arg);
388 void nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
389 struct nvme_qpair *io_que,
390 nvme_cb_fn_t cb_fn, void *cb_arg);
391 void nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
392 struct nvme_qpair *io_que,
393 nvme_cb_fn_t cb_fn, void *cb_arg);
394 void nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
395 uint32_t num_queues, nvme_cb_fn_t cb_fn,
397 void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
399 nvme_cb_fn_t cb_fn, void *cb_arg);
400 void nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
401 uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg);
403 void nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl);
405 int nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev);
406 void nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev);
407 void nvme_ctrlr_shutdown(struct nvme_controller *ctrlr);
408 int nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr);
409 void nvme_ctrlr_reset(struct nvme_controller *ctrlr);
410 /* ctrlr defined as void * to allow use with config_intrhook. */
411 void nvme_ctrlr_start_config_hook(void *ctrlr_arg);
412 void nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
413 struct nvme_request *req);
414 void nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
415 struct nvme_request *req);
416 void nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
417 struct nvme_request *req);
419 int nvme_qpair_construct(struct nvme_qpair *qpair,
420 uint32_t num_entries, uint32_t num_trackers,
421 struct nvme_controller *ctrlr);
422 void nvme_qpair_submit_tracker(struct nvme_qpair *qpair,
423 struct nvme_tracker *tr);
424 bool nvme_qpair_process_completions(struct nvme_qpair *qpair);
425 void nvme_qpair_submit_request(struct nvme_qpair *qpair,
426 struct nvme_request *req);
427 void nvme_qpair_reset(struct nvme_qpair *qpair);
428 void nvme_qpair_fail(struct nvme_qpair *qpair);
429 void nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
430 struct nvme_request *req,
431 uint32_t sct, uint32_t sc);
433 void nvme_admin_qpair_enable(struct nvme_qpair *qpair);
434 void nvme_admin_qpair_disable(struct nvme_qpair *qpair);
435 void nvme_admin_qpair_destroy(struct nvme_qpair *qpair);
437 void nvme_io_qpair_enable(struct nvme_qpair *qpair);
438 void nvme_io_qpair_disable(struct nvme_qpair *qpair);
439 void nvme_io_qpair_destroy(struct nvme_qpair *qpair);
441 int nvme_ns_construct(struct nvme_namespace *ns, uint32_t id,
442 struct nvme_controller *ctrlr);
443 void nvme_ns_destruct(struct nvme_namespace *ns);
445 void nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr);
447 void nvme_dump_command(struct nvme_command *cmd);
448 void nvme_dump_completion(struct nvme_completion *cpl);
450 int nvme_attach(device_t dev);
451 int nvme_shutdown(device_t dev);
452 int nvme_detach(device_t dev);
455 * Wait for a command to complete using the nvme_completion_poll_cb.
456 * Used in limited contexts where the caller knows it's OK to block
457 * briefly while the command runs. The ISR will run the callback which
458 * will set status->done to true, usually within microseconds. If not,
459 * then after one second timeout handler should reset the controller
460 * and abort all outstanding requests including this polled one. If
461 * still not after ten seconds, then something is wrong with the driver,
462 * and panic is the only way to recover.
466 nvme_completion_poll(struct nvme_completion_poll_status *status)
468 int sanity = hz * 10;
470 while (!atomic_load_acq_int(&status->done) && --sanity > 0)
473 panic("NVME polled command failed to complete within 10s.");
477 nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
479 uint64_t *bus_addr = (uint64_t *)arg;
482 printf("nvme_single_map err %d\n", error);
483 *bus_addr = seg[0].ds_addr;
486 static __inline struct nvme_request *
487 _nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg)
489 struct nvme_request *req;
491 req = malloc(sizeof(*req), M_NVME, M_NOWAIT | M_ZERO);
494 req->cb_arg = cb_arg;
500 static __inline struct nvme_request *
501 nvme_allocate_request_vaddr(void *payload, uint32_t payload_size,
502 nvme_cb_fn_t cb_fn, void *cb_arg)
504 struct nvme_request *req;
506 req = _nvme_allocate_request(cb_fn, cb_arg);
508 req->type = NVME_REQUEST_VADDR;
509 req->u.payload = payload;
510 req->payload_size = payload_size;
515 static __inline struct nvme_request *
516 nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
518 struct nvme_request *req;
520 req = _nvme_allocate_request(cb_fn, cb_arg);
522 req->type = NVME_REQUEST_NULL;
526 static __inline struct nvme_request *
527 nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg)
529 struct nvme_request *req;
531 req = _nvme_allocate_request(cb_fn, cb_arg);
533 req->type = NVME_REQUEST_BIO;
539 static __inline struct nvme_request *
540 nvme_allocate_request_ccb(union ccb *ccb, nvme_cb_fn_t cb_fn, void *cb_arg)
542 struct nvme_request *req;
544 req = _nvme_allocate_request(cb_fn, cb_arg);
546 req->type = NVME_REQUEST_CCB;
547 req->u.payload = ccb;
553 #define nvme_free_request(req) free(req, M_NVME)
555 void nvme_notify_async_consumers(struct nvme_controller *ctrlr,
556 const struct nvme_completion *async_cpl,
557 uint32_t log_page_id, void *log_page_buffer,
558 uint32_t log_page_size);
559 void nvme_notify_fail_consumers(struct nvme_controller *ctrlr);
560 void nvme_notify_new_controller(struct nvme_controller *ctrlr);
561 void nvme_notify_ns(struct nvme_controller *ctrlr, int nsid);
563 void nvme_ctrlr_intx_handler(void *arg);
564 void nvme_ctrlr_poll(struct nvme_controller *ctrlr);
566 int nvme_ctrlr_suspend(struct nvme_controller *ctrlr);
567 int nvme_ctrlr_resume(struct nvme_controller *ctrlr);
569 #endif /* __NVME_PRIVATE_H__ */