2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2012-2014 Intel Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #ifndef __NVME_PRIVATE_H__
32 #define __NVME_PRIVATE_H__
34 #include <sys/param.h>
37 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
43 #include <sys/systm.h>
44 #include <sys/taskqueue.h>
48 #include <machine/bus.h>
52 #define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev))
54 MALLOC_DECLARE(M_NVME);
56 #define IDT32_PCI_ID 0x80d0111d /* 32 channel board */
57 #define IDT8_PCI_ID 0x80d2111d /* 8 channel board */
59 #define NVME_ADMIN_TRACKERS (16)
60 #define NVME_ADMIN_ENTRIES (128)
61 /* min and max are defined in admin queue attributes section of spec */
62 #define NVME_MIN_ADMIN_ENTRIES (2)
63 #define NVME_MAX_ADMIN_ENTRIES (4096)
66 * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion
67 * queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we
68 * will allow outstanding on an I/O qpair at any time. The only advantage in
69 * having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping
70 * the contents of the submission and completion queues, it will show a longer
73 #define NVME_IO_ENTRIES (256)
74 #define NVME_IO_TRACKERS (128)
75 #define NVME_MIN_IO_TRACKERS (4)
76 #define NVME_MAX_IO_TRACKERS (1024)
79 * NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES
80 * for each controller.
83 #define NVME_INT_COAL_TIME (0) /* disabled */
84 #define NVME_INT_COAL_THRESHOLD (0) /* 0-based */
86 #define NVME_MAX_NAMESPACES (16)
87 #define NVME_MAX_CONSUMERS (2)
88 #define NVME_MAX_ASYNC_EVENTS (8)
90 #define NVME_DEFAULT_TIMEOUT_PERIOD (30) /* in seconds */
91 #define NVME_MIN_TIMEOUT_PERIOD (5)
92 #define NVME_MAX_TIMEOUT_PERIOD (120)
94 #define NVME_DEFAULT_RETRY_COUNT (4)
96 /* Maximum log page size to fetch for AERs. */
97 #define NVME_MAX_AER_LOG_SIZE (4096)
100 * Page size parameters
102 #define NVME_BASE_SHIFT 12 /* Several parameters (MSP) are 2^(12+x) */
105 * Define CACHE_LINE_SIZE here for older FreeBSD versions that do not define
108 #ifndef CACHE_LINE_SIZE
109 #define CACHE_LINE_SIZE (64)
112 #define NVME_GONE 0xfffffffful
114 extern int32_t nvme_retry_count;
115 extern bool nvme_verbose_cmd_dump;
117 struct nvme_completion_poll_status {
118 struct nvme_completion cpl;
122 extern devclass_t nvme_devclass;
124 #define NVME_REQUEST_VADDR 1
125 #define NVME_REQUEST_NULL 2 /* For requests with no payload. */
126 #define NVME_REQUEST_UIO 3
127 #define NVME_REQUEST_BIO 4
128 #define NVME_REQUEST_CCB 5
130 struct nvme_request {
131 struct nvme_command cmd;
132 struct nvme_qpair *qpair;
138 uint32_t payload_size;
143 STAILQ_ENTRY(nvme_request) stailq;
146 struct nvme_async_event_request {
147 struct nvme_controller *ctrlr;
148 struct nvme_request *req;
149 struct nvme_completion cpl;
150 uint32_t log_page_id;
151 uint32_t log_page_size;
152 uint8_t log_page_buffer[NVME_MAX_AER_LOG_SIZE];
155 struct nvme_tracker {
156 TAILQ_ENTRY(nvme_tracker) tailq;
157 struct nvme_request *req;
158 struct nvme_qpair *qpair;
160 bus_dmamap_t payload_dma_map;
164 bus_addr_t prp_bus_addr;
168 RECOVERY_NONE = 0, /* Normal operations */
169 RECOVERY_START, /* Deadline has passed, start recovering */
170 RECOVERY_RESET, /* This pass, initiate reset of controller */
171 RECOVERY_WAITING, /* waiting for the reset to complete */
174 struct nvme_controller *ctrlr;
181 struct resource *res;
184 struct callout timer;
187 enum nvme_recovery recovery_state;
189 uint32_t num_entries;
190 uint32_t num_trackers;
191 uint32_t sq_tdbl_off;
192 uint32_t cq_hdbl_off;
200 int64_t num_intr_handler_calls;
202 int64_t num_failures;
205 struct nvme_command *cmd;
206 struct nvme_completion *cpl;
208 bus_dma_tag_t dma_tag;
209 bus_dma_tag_t dma_tag_payload;
211 bus_dmamap_t queuemem_map;
212 uint64_t cmd_bus_addr;
213 uint64_t cpl_bus_addr;
215 TAILQ_HEAD(, nvme_tracker) free_tr;
216 TAILQ_HEAD(, nvme_tracker) outstanding_tr;
217 STAILQ_HEAD(, nvme_request) queued_req;
219 struct nvme_tracker **act_tr;
221 struct mtx lock __aligned(CACHE_LINE_SIZE);
223 } __aligned(CACHE_LINE_SIZE);
225 struct nvme_namespace {
226 struct nvme_controller *ctrlr;
227 struct nvme_namespace_data data;
231 void *cons_cookie[NVME_MAX_CONSUMERS];
237 * One of these per allocated PCI device.
239 struct nvme_controller {
244 uint32_t ready_timeout_in_ms;
246 #define QUIRK_DELAY_B4_CHK_RDY 1 /* Can't touch MMIO on disable */
247 #define QUIRK_DISABLE_TIMEOUT 2 /* Disable broken completion timeout feature */
248 #define QUIRK_INTEL_ALIGNMENT 4 /* Pre NVMe 1.3 performance alignment */
249 #define QUIRK_AHCI 8 /* Attached via AHCI redirect */
251 bus_space_tag_t bus_tag;
252 bus_space_handle_t bus_handle;
254 struct resource *resource;
257 * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5,
258 * separate from the control registers which are in BAR 0/1. These
259 * members track the mapping of BAR 4/5 for that reason.
261 int bar4_resource_id;
262 struct resource *bar4_resource;
265 uint32_t enable_aborts;
267 uint32_t num_io_queues;
268 uint32_t max_hw_pend_io;
270 /* Fields for tracking progress during controller initialization. */
271 struct intr_config_hook config_hook;
272 uint32_t ns_identified;
273 uint32_t queues_created;
275 struct task reset_task;
276 struct task fail_req_task;
277 struct taskqueue *taskqueue;
279 /* For shared legacy interrupt. */
281 struct resource *res;
284 /** maximum i/o size in bytes */
285 uint32_t max_xfer_size;
287 /** LO and HI capacity mask */
291 /** Page size and log2(page_size) - 12 that we're currently using */
295 /** interrupt coalescing time period (in microseconds) */
296 uint32_t int_coal_time;
298 /** interrupt coalescing threshold */
299 uint32_t int_coal_threshold;
301 /** timeout period in seconds */
302 uint32_t timeout_period;
304 /** doorbell stride */
307 struct nvme_qpair adminq;
308 struct nvme_qpair *ioq;
310 struct nvme_registers *regs;
312 struct nvme_controller_data cdata;
313 struct nvme_namespace ns[NVME_MAX_NAMESPACES];
317 /** bit mask of event types currently enabled for async events */
318 uint32_t async_event_config;
321 struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS];
323 void *cons_cookie[NVME_MAX_CONSUMERS];
325 uint32_t is_resetting;
326 uint32_t is_initialized;
327 uint32_t notification_sent;
331 STAILQ_HEAD(, nvme_request) fail_req;
333 /* Host Memory Buffer */
336 bus_dma_tag_t hmb_tag;
337 struct nvme_hmb_chunk {
338 bus_dmamap_t hmbc_map;
342 bus_dma_tag_t hmb_desc_tag;
343 bus_dmamap_t hmb_desc_map;
344 struct nvme_hmb_desc *hmb_desc_vaddr;
345 uint64_t hmb_desc_paddr;
348 #define nvme_mmio_offsetof(reg) \
349 offsetof(struct nvme_registers, reg)
351 #define nvme_mmio_read_4(sc, reg) \
352 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \
353 nvme_mmio_offsetof(reg))
355 #define nvme_mmio_write_4(sc, reg, val) \
356 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
357 nvme_mmio_offsetof(reg), val)
359 #define nvme_mmio_write_8(sc, reg, val) \
361 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
362 nvme_mmio_offsetof(reg), val & 0xFFFFFFFF); \
363 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
364 nvme_mmio_offsetof(reg)+4, \
365 (val & 0xFFFFFFFF00000000ULL) >> 32); \
368 #define nvme_printf(ctrlr, fmt, args...) \
369 device_printf(ctrlr->dev, fmt, ##args)
371 void nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg);
373 void nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,
375 nvme_cb_fn_t cb_fn, void *cb_arg);
376 void nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr,
377 uint32_t nsid, void *payload,
378 nvme_cb_fn_t cb_fn, void *cb_arg);
379 void nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr,
380 uint32_t microseconds,
384 void nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
385 struct nvme_error_information_entry *payload,
386 uint32_t num_entries, /* 0 = max */
389 void nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
391 struct nvme_health_information_page *payload,
394 void nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
395 struct nvme_firmware_page *payload,
398 void nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
399 struct nvme_qpair *io_que,
400 nvme_cb_fn_t cb_fn, void *cb_arg);
401 void nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
402 struct nvme_qpair *io_que,
403 nvme_cb_fn_t cb_fn, void *cb_arg);
404 void nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
405 struct nvme_qpair *io_que,
406 nvme_cb_fn_t cb_fn, void *cb_arg);
407 void nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
408 struct nvme_qpair *io_que,
409 nvme_cb_fn_t cb_fn, void *cb_arg);
410 void nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
411 uint32_t num_queues, nvme_cb_fn_t cb_fn,
413 void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
415 nvme_cb_fn_t cb_fn, void *cb_arg);
416 void nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
417 uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg);
419 void nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl);
421 int nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev);
422 void nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev);
423 void nvme_ctrlr_shutdown(struct nvme_controller *ctrlr);
424 void nvme_ctrlr_reset(struct nvme_controller *ctrlr);
425 /* ctrlr defined as void * to allow use with config_intrhook. */
426 void nvme_ctrlr_start_config_hook(void *ctrlr_arg);
427 void nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
428 struct nvme_request *req);
429 void nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
430 struct nvme_request *req);
431 void nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
432 struct nvme_request *req);
434 int nvme_qpair_construct(struct nvme_qpair *qpair,
435 uint32_t num_entries, uint32_t num_trackers,
436 struct nvme_controller *ctrlr);
437 void nvme_qpair_submit_tracker(struct nvme_qpair *qpair,
438 struct nvme_tracker *tr);
439 bool nvme_qpair_process_completions(struct nvme_qpair *qpair);
440 void nvme_qpair_submit_request(struct nvme_qpair *qpair,
441 struct nvme_request *req);
442 void nvme_qpair_reset(struct nvme_qpair *qpair);
443 void nvme_qpair_fail(struct nvme_qpair *qpair);
444 void nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
445 struct nvme_request *req,
446 uint32_t sct, uint32_t sc);
448 void nvme_admin_qpair_enable(struct nvme_qpair *qpair);
449 void nvme_admin_qpair_disable(struct nvme_qpair *qpair);
450 void nvme_admin_qpair_destroy(struct nvme_qpair *qpair);
452 void nvme_io_qpair_enable(struct nvme_qpair *qpair);
453 void nvme_io_qpair_disable(struct nvme_qpair *qpair);
454 void nvme_io_qpair_destroy(struct nvme_qpair *qpair);
456 int nvme_ns_construct(struct nvme_namespace *ns, uint32_t id,
457 struct nvme_controller *ctrlr);
458 void nvme_ns_destruct(struct nvme_namespace *ns);
460 void nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr);
462 void nvme_dump_command(struct nvme_command *cmd);
463 void nvme_dump_completion(struct nvme_completion *cpl);
465 int nvme_attach(device_t dev);
466 int nvme_shutdown(device_t dev);
467 int nvme_detach(device_t dev);
470 * Wait for a command to complete using the nvme_completion_poll_cb. Used in
471 * limited contexts where the caller knows it's OK to block briefly while the
472 * command runs. The ISR will run the callback which will set status->done to
473 * true, usually within microseconds. If not, then after one second timeout
474 * handler should reset the controller and abort all outstanding requests
475 * including this polled one. If still not after ten seconds, then something is
476 * wrong with the driver, and panic is the only way to recover.
478 * Most commands using this interface aren't actual I/O to the drive's media so
479 * complete within a few microseconds. Adaptively spin for one tick to catch the
480 * vast majority of these without waiting for a tick plus scheduling delays. Since
481 * these are on startup, this drastically reduces startup time.
485 nvme_completion_poll(struct nvme_completion_poll_status *status)
487 int timeout = ticks + 10 * hz;
488 sbintime_t delta_t = SBT_1US;
490 while (!atomic_load_acq_int(&status->done)) {
491 if (timeout - ticks < 0)
492 panic("NVME polled command failed to complete within 10s.");
493 pause_sbt("nvme", delta_t, 0, C_PREL(1));
494 delta_t = min(SBT_1MS, delta_t * 3 / 2);
499 nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
501 uint64_t *bus_addr = (uint64_t *)arg;
503 KASSERT(nseg == 1, ("number of segments (%d) is not 1", nseg));
505 printf("nvme_single_map err %d\n", error);
506 *bus_addr = seg[0].ds_addr;
509 static __inline struct nvme_request *
510 _nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg)
512 struct nvme_request *req;
514 req = malloc(sizeof(*req), M_NVME, M_NOWAIT | M_ZERO);
517 req->cb_arg = cb_arg;
523 static __inline struct nvme_request *
524 nvme_allocate_request_vaddr(void *payload, uint32_t payload_size,
525 nvme_cb_fn_t cb_fn, void *cb_arg)
527 struct nvme_request *req;
529 req = _nvme_allocate_request(cb_fn, cb_arg);
531 req->type = NVME_REQUEST_VADDR;
532 req->u.payload = payload;
533 req->payload_size = payload_size;
538 static __inline struct nvme_request *
539 nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
541 struct nvme_request *req;
543 req = _nvme_allocate_request(cb_fn, cb_arg);
545 req->type = NVME_REQUEST_NULL;
549 static __inline struct nvme_request *
550 nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg)
552 struct nvme_request *req;
554 req = _nvme_allocate_request(cb_fn, cb_arg);
556 req->type = NVME_REQUEST_BIO;
562 static __inline struct nvme_request *
563 nvme_allocate_request_ccb(union ccb *ccb, nvme_cb_fn_t cb_fn, void *cb_arg)
565 struct nvme_request *req;
567 req = _nvme_allocate_request(cb_fn, cb_arg);
569 req->type = NVME_REQUEST_CCB;
570 req->u.payload = ccb;
576 #define nvme_free_request(req) free(req, M_NVME)
578 void nvme_notify_async_consumers(struct nvme_controller *ctrlr,
579 const struct nvme_completion *async_cpl,
580 uint32_t log_page_id, void *log_page_buffer,
581 uint32_t log_page_size);
582 void nvme_notify_fail_consumers(struct nvme_controller *ctrlr);
583 void nvme_notify_new_controller(struct nvme_controller *ctrlr);
584 void nvme_notify_ns(struct nvme_controller *ctrlr, int nsid);
586 void nvme_ctrlr_shared_handler(void *arg);
587 void nvme_ctrlr_poll(struct nvme_controller *ctrlr);
589 int nvme_ctrlr_suspend(struct nvme_controller *ctrlr);
590 int nvme_ctrlr_resume(struct nvme_controller *ctrlr);
592 #endif /* __NVME_PRIVATE_H__ */