2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2012-2014 Intel Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #ifndef __NVME_PRIVATE_H__
32 #define __NVME_PRIVATE_H__
34 #include <sys/param.h>
37 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
43 #include <sys/systm.h>
44 #include <sys/taskqueue.h>
48 #include <machine/bus.h>
52 #define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev))
54 MALLOC_DECLARE(M_NVME);
56 #define IDT32_PCI_ID 0x80d0111d /* 32 channel board */
57 #define IDT8_PCI_ID 0x80d2111d /* 8 channel board */
59 #define NVME_ADMIN_TRACKERS (16)
60 #define NVME_ADMIN_ENTRIES (128)
61 /* min and max are defined in admin queue attributes section of spec */
62 #define NVME_MIN_ADMIN_ENTRIES (2)
63 #define NVME_MAX_ADMIN_ENTRIES (4096)
66 * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion
67 * queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we
68 * will allow outstanding on an I/O qpair at any time. The only advantage in
69 * having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping
70 * the contents of the submission and completion queues, it will show a longer
73 #define NVME_IO_ENTRIES (256)
74 #define NVME_IO_TRACKERS (128)
75 #define NVME_MIN_IO_TRACKERS (4)
76 #define NVME_MAX_IO_TRACKERS (1024)
79 * NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES
80 * for each controller.
83 #define NVME_INT_COAL_TIME (0) /* disabled */
84 #define NVME_INT_COAL_THRESHOLD (0) /* 0-based */
86 #define NVME_MAX_NAMESPACES (16)
87 #define NVME_MAX_CONSUMERS (2)
88 #define NVME_MAX_ASYNC_EVENTS (8)
90 #define NVME_DEFAULT_TIMEOUT_PERIOD (30) /* in seconds */
91 #define NVME_MIN_TIMEOUT_PERIOD (5)
92 #define NVME_MAX_TIMEOUT_PERIOD (120)
94 #define NVME_DEFAULT_RETRY_COUNT (4)
96 /* Maximum log page size to fetch for AERs. */
97 #define NVME_MAX_AER_LOG_SIZE (4096)
100 * Define CACHE_LINE_SIZE here for older FreeBSD versions that do not define
103 #ifndef CACHE_LINE_SIZE
104 #define CACHE_LINE_SIZE (64)
107 extern int32_t nvme_retry_count;
108 extern bool nvme_verbose_cmd_dump;
110 struct nvme_completion_poll_status {
111 struct nvme_completion cpl;
115 extern devclass_t nvme_devclass;
117 #define NVME_REQUEST_VADDR 1
118 #define NVME_REQUEST_NULL 2 /* For requests with no payload. */
119 #define NVME_REQUEST_UIO 3
120 #define NVME_REQUEST_BIO 4
121 #define NVME_REQUEST_CCB 5
123 struct nvme_request {
124 struct nvme_command cmd;
125 struct nvme_qpair *qpair;
131 uint32_t payload_size;
136 STAILQ_ENTRY(nvme_request) stailq;
139 struct nvme_async_event_request {
140 struct nvme_controller *ctrlr;
141 struct nvme_request *req;
142 struct nvme_completion cpl;
143 uint32_t log_page_id;
144 uint32_t log_page_size;
145 uint8_t log_page_buffer[NVME_MAX_AER_LOG_SIZE];
148 struct nvme_tracker {
149 TAILQ_ENTRY(nvme_tracker) tailq;
150 struct nvme_request *req;
151 struct nvme_qpair *qpair;
152 struct callout timer;
153 bus_dmamap_t payload_dma_map;
157 bus_addr_t prp_bus_addr;
161 struct nvme_controller *ctrlr;
168 struct resource *res;
171 uint32_t num_entries;
172 uint32_t num_trackers;
173 uint32_t sq_tdbl_off;
174 uint32_t cq_hdbl_off;
182 int64_t num_intr_handler_calls;
184 int64_t num_failures;
186 struct nvme_command *cmd;
187 struct nvme_completion *cpl;
189 bus_dma_tag_t dma_tag;
190 bus_dma_tag_t dma_tag_payload;
192 bus_dmamap_t queuemem_map;
193 uint64_t cmd_bus_addr;
194 uint64_t cpl_bus_addr;
196 TAILQ_HEAD(, nvme_tracker) free_tr;
197 TAILQ_HEAD(, nvme_tracker) outstanding_tr;
198 STAILQ_HEAD(, nvme_request) queued_req;
200 struct nvme_tracker **act_tr;
204 struct mtx lock __aligned(CACHE_LINE_SIZE);
206 } __aligned(CACHE_LINE_SIZE);
208 struct nvme_namespace {
209 struct nvme_controller *ctrlr;
210 struct nvme_namespace_data data;
214 void *cons_cookie[NVME_MAX_CONSUMERS];
220 * One of these per allocated PCI device.
222 struct nvme_controller {
227 uint32_t ready_timeout_in_ms;
229 #define QUIRK_DELAY_B4_CHK_RDY 1 /* Can't touch MMIO on disable */
230 #define QUIRK_DISABLE_TIMEOUT 2 /* Disable broken completion timeout feature */
232 bus_space_tag_t bus_tag;
233 bus_space_handle_t bus_handle;
235 struct resource *resource;
238 * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5,
239 * separate from the control registers which are in BAR 0/1. These
240 * members track the mapping of BAR 4/5 for that reason.
242 int bar4_resource_id;
243 struct resource *bar4_resource;
245 uint32_t msix_enabled;
246 uint32_t enable_aborts;
248 uint32_t num_io_queues;
249 uint32_t max_hw_pend_io;
251 /* Fields for tracking progress during controller initialization. */
252 struct intr_config_hook config_hook;
253 uint32_t ns_identified;
254 uint32_t queues_created;
256 struct task reset_task;
257 struct task fail_req_task;
258 struct taskqueue *taskqueue;
260 /* For shared legacy interrupt. */
262 struct resource *res;
265 /** maximum i/o size in bytes */
266 uint32_t max_xfer_size;
268 /** minimum page size supported by this controller in bytes */
269 uint32_t min_page_size;
271 /** interrupt coalescing time period (in microseconds) */
272 uint32_t int_coal_time;
274 /** interrupt coalescing threshold */
275 uint32_t int_coal_threshold;
277 /** timeout period in seconds */
278 uint32_t timeout_period;
280 /** doorbell stride */
283 struct nvme_qpair adminq;
284 struct nvme_qpair *ioq;
286 struct nvme_registers *regs;
288 struct nvme_controller_data cdata;
289 struct nvme_namespace ns[NVME_MAX_NAMESPACES];
293 /** bit mask of event types currently enabled for async events */
294 uint32_t async_event_config;
297 struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS];
299 void *cons_cookie[NVME_MAX_CONSUMERS];
301 uint32_t is_resetting;
302 uint32_t is_initialized;
303 uint32_t notification_sent;
306 STAILQ_HEAD(, nvme_request) fail_req;
308 /* Host Memory Buffer */
311 bus_dma_tag_t hmb_tag;
312 struct nvme_hmb_chunk {
313 bus_dmamap_t hmbc_map;
317 bus_dma_tag_t hmb_desc_tag;
318 bus_dmamap_t hmb_desc_map;
319 struct nvme_hmb_desc *hmb_desc_vaddr;
320 uint64_t hmb_desc_paddr;
323 #define nvme_mmio_offsetof(reg) \
324 offsetof(struct nvme_registers, reg)
326 #define nvme_mmio_read_4(sc, reg) \
327 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \
328 nvme_mmio_offsetof(reg))
330 #define nvme_mmio_write_4(sc, reg, val) \
331 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
332 nvme_mmio_offsetof(reg), val)
334 #define nvme_mmio_write_8(sc, reg, val) \
336 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
337 nvme_mmio_offsetof(reg), val & 0xFFFFFFFF); \
338 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
339 nvme_mmio_offsetof(reg)+4, \
340 (val & 0xFFFFFFFF00000000ULL) >> 32); \
343 #define nvme_printf(ctrlr, fmt, args...) \
344 device_printf(ctrlr->dev, fmt, ##args)
346 void nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg);
348 void nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,
350 nvme_cb_fn_t cb_fn, void *cb_arg);
351 void nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr,
352 uint32_t nsid, void *payload,
353 nvme_cb_fn_t cb_fn, void *cb_arg);
354 void nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr,
355 uint32_t microseconds,
359 void nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
360 struct nvme_error_information_entry *payload,
361 uint32_t num_entries, /* 0 = max */
364 void nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
366 struct nvme_health_information_page *payload,
369 void nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
370 struct nvme_firmware_page *payload,
373 void nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
374 struct nvme_qpair *io_que,
375 nvme_cb_fn_t cb_fn, void *cb_arg);
376 void nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
377 struct nvme_qpair *io_que,
378 nvme_cb_fn_t cb_fn, void *cb_arg);
379 void nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
380 struct nvme_qpair *io_que,
381 nvme_cb_fn_t cb_fn, void *cb_arg);
382 void nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
383 struct nvme_qpair *io_que,
384 nvme_cb_fn_t cb_fn, void *cb_arg);
385 void nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
386 uint32_t num_queues, nvme_cb_fn_t cb_fn,
388 void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
390 nvme_cb_fn_t cb_fn, void *cb_arg);
391 void nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
392 uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg);
394 void nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl);
396 int nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev);
397 void nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev);
398 void nvme_ctrlr_shutdown(struct nvme_controller *ctrlr);
399 int nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr);
400 void nvme_ctrlr_reset(struct nvme_controller *ctrlr);
401 /* ctrlr defined as void * to allow use with config_intrhook. */
402 void nvme_ctrlr_start_config_hook(void *ctrlr_arg);
403 void nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
404 struct nvme_request *req);
405 void nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
406 struct nvme_request *req);
407 void nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
408 struct nvme_request *req);
410 int nvme_qpair_construct(struct nvme_qpair *qpair,
411 uint32_t num_entries, uint32_t num_trackers,
412 struct nvme_controller *ctrlr);
413 void nvme_qpair_submit_tracker(struct nvme_qpair *qpair,
414 struct nvme_tracker *tr);
415 bool nvme_qpair_process_completions(struct nvme_qpair *qpair);
416 void nvme_qpair_submit_request(struct nvme_qpair *qpair,
417 struct nvme_request *req);
418 void nvme_qpair_reset(struct nvme_qpair *qpair);
419 void nvme_qpair_fail(struct nvme_qpair *qpair);
420 void nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
421 struct nvme_request *req,
422 uint32_t sct, uint32_t sc);
424 void nvme_admin_qpair_enable(struct nvme_qpair *qpair);
425 void nvme_admin_qpair_disable(struct nvme_qpair *qpair);
426 void nvme_admin_qpair_destroy(struct nvme_qpair *qpair);
428 void nvme_io_qpair_enable(struct nvme_qpair *qpair);
429 void nvme_io_qpair_disable(struct nvme_qpair *qpair);
430 void nvme_io_qpair_destroy(struct nvme_qpair *qpair);
432 int nvme_ns_construct(struct nvme_namespace *ns, uint32_t id,
433 struct nvme_controller *ctrlr);
434 void nvme_ns_destruct(struct nvme_namespace *ns);
436 void nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr);
438 void nvme_dump_command(struct nvme_command *cmd);
439 void nvme_dump_completion(struct nvme_completion *cpl);
441 int nvme_attach(device_t dev);
442 int nvme_shutdown(device_t dev);
443 int nvme_detach(device_t dev);
446 * Wait for a command to complete using the nvme_completion_poll_cb.
447 * Used in limited contexts where the caller knows it's OK to block
448 * briefly while the command runs. The ISR will run the callback which
449 * will set status->done to true, usually within microseconds. If not,
450 * then after one second timeout handler should reset the controller
451 * and abort all outstanding requests including this polled one. If
452 * still not after ten seconds, then something is wrong with the driver,
453 * and panic is the only way to recover.
457 nvme_completion_poll(struct nvme_completion_poll_status *status)
459 int sanity = hz * 10;
461 while (!atomic_load_acq_int(&status->done) && --sanity > 0)
464 panic("NVME polled command failed to complete within 10s.");
468 nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
470 uint64_t *bus_addr = (uint64_t *)arg;
472 KASSERT(nseg == 1, ("number of segments (%d) is not 1", nseg));
474 printf("nvme_single_map err %d\n", error);
475 *bus_addr = seg[0].ds_addr;
478 static __inline struct nvme_request *
479 _nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg)
481 struct nvme_request *req;
483 req = malloc(sizeof(*req), M_NVME, M_NOWAIT | M_ZERO);
486 req->cb_arg = cb_arg;
492 static __inline struct nvme_request *
493 nvme_allocate_request_vaddr(void *payload, uint32_t payload_size,
494 nvme_cb_fn_t cb_fn, void *cb_arg)
496 struct nvme_request *req;
498 req = _nvme_allocate_request(cb_fn, cb_arg);
500 req->type = NVME_REQUEST_VADDR;
501 req->u.payload = payload;
502 req->payload_size = payload_size;
507 static __inline struct nvme_request *
508 nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
510 struct nvme_request *req;
512 req = _nvme_allocate_request(cb_fn, cb_arg);
514 req->type = NVME_REQUEST_NULL;
518 static __inline struct nvme_request *
519 nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg)
521 struct nvme_request *req;
523 req = _nvme_allocate_request(cb_fn, cb_arg);
525 req->type = NVME_REQUEST_BIO;
531 static __inline struct nvme_request *
532 nvme_allocate_request_ccb(union ccb *ccb, nvme_cb_fn_t cb_fn, void *cb_arg)
534 struct nvme_request *req;
536 req = _nvme_allocate_request(cb_fn, cb_arg);
538 req->type = NVME_REQUEST_CCB;
539 req->u.payload = ccb;
545 #define nvme_free_request(req) free(req, M_NVME)
547 void nvme_notify_async_consumers(struct nvme_controller *ctrlr,
548 const struct nvme_completion *async_cpl,
549 uint32_t log_page_id, void *log_page_buffer,
550 uint32_t log_page_size);
551 void nvme_notify_fail_consumers(struct nvme_controller *ctrlr);
552 void nvme_notify_new_controller(struct nvme_controller *ctrlr);
553 void nvme_notify_ns(struct nvme_controller *ctrlr, int nsid);
555 void nvme_ctrlr_intx_handler(void *arg);
556 void nvme_ctrlr_poll(struct nvme_controller *ctrlr);
558 int nvme_ctrlr_suspend(struct nvme_controller *ctrlr);
559 int nvme_ctrlr_resume(struct nvme_controller *ctrlr);
561 #endif /* __NVME_PRIVATE_H__ */