2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2012-2016 Intel Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
36 #include <sys/sysctl.h>
38 #include "nvme_private.h"
41 #define NVME_USE_NVD 1
44 int nvme_use_nvd = NVME_USE_NVD;
45 bool nvme_verbose_cmd_dump = false;
47 SYSCTL_NODE(_hw, OID_AUTO, nvme, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
48 "NVMe sysctl tunables");
49 SYSCTL_INT(_hw_nvme, OID_AUTO, use_nvd, CTLFLAG_RDTUN,
50 &nvme_use_nvd, 1, "1 = Create NVD devices, 0 = Create NDA devices");
51 SYSCTL_BOOL(_hw_nvme, OID_AUTO, verbose_cmd_dump, CTLFLAG_RWTUN,
52 &nvme_verbose_cmd_dump, 0,
53 "enable verbose command printing when a command fails");
56 nvme_dump_queue(struct nvme_qpair *qpair)
58 struct nvme_completion *cpl;
59 struct nvme_command *cmd;
62 printf("id:%04Xh phase:%d\n", qpair->id, qpair->phase);
64 printf("Completion queue:\n");
65 for (i = 0; i < qpair->num_entries; i++) {
68 nvme_dump_completion(cpl);
71 printf("Submission queue:\n");
72 for (i = 0; i < qpair->num_entries; i++) {
75 nvme_dump_command(cmd);
80 nvme_sysctl_dump_debug(SYSCTL_HANDLER_ARGS)
82 struct nvme_qpair *qpair = arg1;
85 int error = sysctl_handle_int(oidp, &val, 0, req);
91 nvme_dump_queue(qpair);
97 nvme_sysctl_int_coal_time(SYSCTL_HANDLER_ARGS)
99 struct nvme_controller *ctrlr = arg1;
100 uint32_t oldval = ctrlr->int_coal_time;
101 int error = sysctl_handle_int(oidp, &ctrlr->int_coal_time, 0,
107 if (oldval != ctrlr->int_coal_time)
108 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
109 ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
116 nvme_sysctl_int_coal_threshold(SYSCTL_HANDLER_ARGS)
118 struct nvme_controller *ctrlr = arg1;
119 uint32_t oldval = ctrlr->int_coal_threshold;
120 int error = sysctl_handle_int(oidp, &ctrlr->int_coal_threshold, 0,
126 if (oldval != ctrlr->int_coal_threshold)
127 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
128 ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
135 nvme_sysctl_timeout_period(SYSCTL_HANDLER_ARGS)
137 struct nvme_controller *ctrlr = arg1;
138 uint32_t newval = ctrlr->timeout_period;
139 int error = sysctl_handle_int(oidp, &newval, 0, req);
141 if (error || (req->newptr == NULL))
144 if (newval > NVME_MAX_TIMEOUT_PERIOD ||
145 newval < NVME_MIN_TIMEOUT_PERIOD) {
148 ctrlr->timeout_period = newval;
155 nvme_qpair_reset_stats(struct nvme_qpair *qpair)
159 * Reset the values. Due to sanity checks in
160 * nvme_qpair_process_completions, we reset the number of interrupt
164 qpair->num_intr_handler_calls = 1;
165 qpair->num_retries = 0;
166 qpair->num_failures = 0;
167 qpair->num_ignored = 0;
171 nvme_sysctl_num_cmds(SYSCTL_HANDLER_ARGS)
173 struct nvme_controller *ctrlr = arg1;
174 int64_t num_cmds = 0;
177 num_cmds = ctrlr->adminq.num_cmds;
179 for (i = 0; i < ctrlr->num_io_queues; i++)
180 num_cmds += ctrlr->ioq[i].num_cmds;
182 return (sysctl_handle_64(oidp, &num_cmds, 0, req));
186 nvme_sysctl_num_intr_handler_calls(SYSCTL_HANDLER_ARGS)
188 struct nvme_controller *ctrlr = arg1;
189 int64_t num_intr_handler_calls = 0;
192 num_intr_handler_calls = ctrlr->adminq.num_intr_handler_calls;
194 for (i = 0; i < ctrlr->num_io_queues; i++)
195 num_intr_handler_calls += ctrlr->ioq[i].num_intr_handler_calls;
197 return (sysctl_handle_64(oidp, &num_intr_handler_calls, 0, req));
201 nvme_sysctl_num_retries(SYSCTL_HANDLER_ARGS)
203 struct nvme_controller *ctrlr = arg1;
204 int64_t num_retries = 0;
207 num_retries = ctrlr->adminq.num_retries;
209 for (i = 0; i < ctrlr->num_io_queues; i++)
210 num_retries += ctrlr->ioq[i].num_retries;
212 return (sysctl_handle_64(oidp, &num_retries, 0, req));
216 nvme_sysctl_num_failures(SYSCTL_HANDLER_ARGS)
218 struct nvme_controller *ctrlr = arg1;
219 int64_t num_failures = 0;
222 num_failures = ctrlr->adminq.num_failures;
224 for (i = 0; i < ctrlr->num_io_queues; i++)
225 num_failures += ctrlr->ioq[i].num_failures;
227 return (sysctl_handle_64(oidp, &num_failures, 0, req));
231 nvme_sysctl_num_ignored(SYSCTL_HANDLER_ARGS)
233 struct nvme_controller *ctrlr = arg1;
234 int64_t num_ignored = 0;
237 num_ignored = ctrlr->adminq.num_ignored;
239 for (i = 0; i < ctrlr->num_io_queues; i++)
240 num_ignored += ctrlr->ioq[i].num_ignored;
242 return (sysctl_handle_64(oidp, &num_ignored, 0, req));
246 nvme_sysctl_reset_stats(SYSCTL_HANDLER_ARGS)
248 struct nvme_controller *ctrlr = arg1;
251 int error = sysctl_handle_int(oidp, &val, 0, req);
257 nvme_qpair_reset_stats(&ctrlr->adminq);
259 for (i = 0; i < ctrlr->num_io_queues; i++)
260 nvme_qpair_reset_stats(&ctrlr->ioq[i]);
267 nvme_sysctl_initialize_queue(struct nvme_qpair *qpair,
268 struct sysctl_ctx_list *ctrlr_ctx, struct sysctl_oid *que_tree)
270 struct sysctl_oid_list *que_list = SYSCTL_CHILDREN(que_tree);
272 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_entries",
273 CTLFLAG_RD, &qpair->num_entries, 0,
274 "Number of entries in hardware queue");
275 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_trackers",
276 CTLFLAG_RD, &qpair->num_trackers, 0,
277 "Number of trackers pre-allocated for this queue pair");
278 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_head",
279 CTLFLAG_RD, &qpair->sq_head, 0,
280 "Current head of submission queue (as observed by driver)");
281 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_tail",
282 CTLFLAG_RD, &qpair->sq_tail, 0,
283 "Current tail of submission queue (as observed by driver)");
284 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "cq_head",
285 CTLFLAG_RD, &qpair->cq_head, 0,
286 "Current head of completion queue (as observed by driver)");
288 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_cmds",
289 CTLFLAG_RD, &qpair->num_cmds, "Number of commands submitted");
290 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_intr_handler_calls",
291 CTLFLAG_RD, &qpair->num_intr_handler_calls,
292 "Number of times interrupt handler was invoked (will typically be "
293 "less than number of actual interrupts generated due to "
295 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_retries",
296 CTLFLAG_RD, &qpair->num_retries, "Number of commands retried");
297 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_failures",
298 CTLFLAG_RD, &qpair->num_failures,
299 "Number of commands ending in failure after all retries");
300 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_ignored",
301 CTLFLAG_RD, &qpair->num_ignored,
302 "Number of interrupts posted, but were administratively ignored");
304 SYSCTL_ADD_PROC(ctrlr_ctx, que_list, OID_AUTO,
305 "dump_debug", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
306 qpair, 0, nvme_sysctl_dump_debug, "IU", "Dump debug data");
310 nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr)
312 struct sysctl_ctx_list *ctrlr_ctx;
313 struct sysctl_oid *ctrlr_tree, *que_tree;
314 struct sysctl_oid_list *ctrlr_list;
315 #define QUEUE_NAME_LENGTH 16
316 char queue_name[QUEUE_NAME_LENGTH];
319 ctrlr_ctx = device_get_sysctl_ctx(ctrlr->dev);
320 ctrlr_tree = device_get_sysctl_tree(ctrlr->dev);
321 ctrlr_list = SYSCTL_CHILDREN(ctrlr_tree);
323 SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_io_queues",
324 CTLFLAG_RD, &ctrlr->num_io_queues, 0,
325 "Number of I/O queue pairs");
327 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
328 "int_coal_time", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
329 ctrlr, 0, nvme_sysctl_int_coal_time, "IU",
330 "Interrupt coalescing timeout (in microseconds)");
332 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
333 "int_coal_threshold",
334 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, ctrlr, 0,
335 nvme_sysctl_int_coal_threshold, "IU",
336 "Interrupt coalescing threshold");
338 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
339 "timeout_period", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
340 ctrlr, 0, nvme_sysctl_timeout_period, "IU",
341 "Timeout period (in seconds)");
343 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
344 "num_cmds", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
345 ctrlr, 0, nvme_sysctl_num_cmds, "IU",
346 "Number of commands submitted");
348 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
349 "num_intr_handler_calls",
350 CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0,
351 nvme_sysctl_num_intr_handler_calls, "IU",
352 "Number of times interrupt handler was invoked (will "
353 "typically be less than number of actual interrupts "
354 "generated due to coalescing)");
356 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
357 "num_retries", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
358 ctrlr, 0, nvme_sysctl_num_retries, "IU",
359 "Number of commands retried");
361 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
362 "num_failures", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
363 ctrlr, 0, nvme_sysctl_num_failures, "IU",
364 "Number of commands ending in failure after all retries");
366 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
367 "num_ignored", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
368 ctrlr, 0, nvme_sysctl_num_ignored, "IU",
369 "Number of interrupts ignored administratively");
371 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
372 "reset_stats", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, ctrlr,
373 0, nvme_sysctl_reset_stats, "IU", "Reset statistics to zero");
375 que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "adminq",
376 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Admin Queue");
378 nvme_sysctl_initialize_queue(&ctrlr->adminq, ctrlr_ctx, que_tree);
380 for (i = 0; i < ctrlr->num_io_queues; i++) {
381 snprintf(queue_name, QUEUE_NAME_LENGTH, "ioq%d", i);
382 que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO,
383 queue_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "IO Queue");
384 nvme_sysctl_initialize_queue(&ctrlr->ioq[i], ctrlr_ctx,