2 * Copyright (C) 2012-2016 Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
32 #include <sys/sysctl.h>
34 #include "nvme_private.h"
38 SYSCTL_NODE(_hw, OID_AUTO, nvme, CTLFLAG_RD, 0, "NVMe sysctl tunables");
39 SYSCTL_INT(_hw_nvme, OID_AUTO, use_nvd, CTLFLAG_RDTUN,
40 &nvme_use_nvd, 1, "1 = Create NVD devices, 0 = Create NDA devices");
43 * CTLTYPE_S64 and sysctl_handle_64 were added in r217616. Define these
44 * explicitly here for older kernels that don't include the r217616
48 #define CTLTYPE_S64 CTLTYPE_QUAD
49 #define sysctl_handle_64 sysctl_handle_quad
53 nvme_dump_queue(struct nvme_qpair *qpair)
55 struct nvme_completion *cpl;
56 struct nvme_command *cmd;
59 printf("id:%04Xh phase:%d\n", qpair->id, qpair->phase);
61 printf("Completion queue:\n");
62 for (i = 0; i < qpair->num_entries; i++) {
65 nvme_dump_completion(cpl);
68 printf("Submission queue:\n");
69 for (i = 0; i < qpair->num_entries; i++) {
72 nvme_dump_command(cmd);
78 nvme_sysctl_dump_debug(SYSCTL_HANDLER_ARGS)
80 struct nvme_qpair *qpair = arg1;
83 int error = sysctl_handle_int(oidp, &val, 0, req);
89 nvme_dump_queue(qpair);
95 nvme_sysctl_int_coal_time(SYSCTL_HANDLER_ARGS)
97 struct nvme_controller *ctrlr = arg1;
98 uint32_t oldval = ctrlr->int_coal_time;
99 int error = sysctl_handle_int(oidp, &ctrlr->int_coal_time, 0,
105 if (oldval != ctrlr->int_coal_time)
106 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
107 ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
114 nvme_sysctl_int_coal_threshold(SYSCTL_HANDLER_ARGS)
116 struct nvme_controller *ctrlr = arg1;
117 uint32_t oldval = ctrlr->int_coal_threshold;
118 int error = sysctl_handle_int(oidp, &ctrlr->int_coal_threshold, 0,
124 if (oldval != ctrlr->int_coal_threshold)
125 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
126 ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
133 nvme_sysctl_timeout_period(SYSCTL_HANDLER_ARGS)
135 struct nvme_controller *ctrlr = arg1;
136 uint32_t oldval = ctrlr->timeout_period;
137 int error = sysctl_handle_int(oidp, &ctrlr->timeout_period, 0, req);
142 if (ctrlr->timeout_period > NVME_MAX_TIMEOUT_PERIOD ||
143 ctrlr->timeout_period < NVME_MIN_TIMEOUT_PERIOD) {
144 ctrlr->timeout_period = oldval;
152 nvme_qpair_reset_stats(struct nvme_qpair *qpair)
156 qpair->num_intr_handler_calls = 0;
160 nvme_sysctl_num_cmds(SYSCTL_HANDLER_ARGS)
162 struct nvme_controller *ctrlr = arg1;
163 int64_t num_cmds = 0;
166 num_cmds = ctrlr->adminq.num_cmds;
168 for (i = 0; i < ctrlr->num_io_queues; i++)
169 num_cmds += ctrlr->ioq[i].num_cmds;
171 return (sysctl_handle_64(oidp, &num_cmds, 0, req));
175 nvme_sysctl_num_intr_handler_calls(SYSCTL_HANDLER_ARGS)
177 struct nvme_controller *ctrlr = arg1;
178 int64_t num_intr_handler_calls = 0;
181 num_intr_handler_calls = ctrlr->adminq.num_intr_handler_calls;
183 for (i = 0; i < ctrlr->num_io_queues; i++)
184 num_intr_handler_calls += ctrlr->ioq[i].num_intr_handler_calls;
186 return (sysctl_handle_64(oidp, &num_intr_handler_calls, 0, req));
190 nvme_sysctl_reset_stats(SYSCTL_HANDLER_ARGS)
192 struct nvme_controller *ctrlr = arg1;
195 int error = sysctl_handle_int(oidp, &val, 0, req);
201 nvme_qpair_reset_stats(&ctrlr->adminq);
203 for (i = 0; i < ctrlr->num_io_queues; i++)
204 nvme_qpair_reset_stats(&ctrlr->ioq[i]);
212 nvme_sysctl_initialize_queue(struct nvme_qpair *qpair,
213 struct sysctl_ctx_list *ctrlr_ctx, struct sysctl_oid *que_tree)
215 struct sysctl_oid_list *que_list = SYSCTL_CHILDREN(que_tree);
217 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_entries",
218 CTLFLAG_RD, &qpair->num_entries, 0,
219 "Number of entries in hardware queue");
220 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_trackers",
221 CTLFLAG_RD, &qpair->num_trackers, 0,
222 "Number of trackers pre-allocated for this queue pair");
223 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_head",
224 CTLFLAG_RD, &qpair->sq_head, 0,
225 "Current head of submission queue (as observed by driver)");
226 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_tail",
227 CTLFLAG_RD, &qpair->sq_tail, 0,
228 "Current tail of submission queue (as observed by driver)");
229 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "cq_head",
230 CTLFLAG_RD, &qpair->cq_head, 0,
231 "Current head of completion queue (as observed by driver)");
233 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_cmds",
234 CTLFLAG_RD, &qpair->num_cmds, "Number of commands submitted");
235 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_intr_handler_calls",
236 CTLFLAG_RD, &qpair->num_intr_handler_calls,
237 "Number of times interrupt handler was invoked (will typically be "
238 "less than number of actual interrupts generated due to "
241 SYSCTL_ADD_PROC(ctrlr_ctx, que_list, OID_AUTO,
242 "dump_debug", CTLTYPE_UINT | CTLFLAG_RW, qpair, 0,
243 nvme_sysctl_dump_debug, "IU", "Dump debug data");
247 nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr)
249 struct sysctl_ctx_list *ctrlr_ctx;
250 struct sysctl_oid *ctrlr_tree, *que_tree;
251 struct sysctl_oid_list *ctrlr_list;
252 #define QUEUE_NAME_LENGTH 16
253 char queue_name[QUEUE_NAME_LENGTH];
256 ctrlr_ctx = device_get_sysctl_ctx(ctrlr->dev);
257 ctrlr_tree = device_get_sysctl_tree(ctrlr->dev);
258 ctrlr_list = SYSCTL_CHILDREN(ctrlr_tree);
260 SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_cpus_per_ioq",
261 CTLFLAG_RD, &ctrlr->num_cpus_per_ioq, 0,
262 "Number of CPUs assigned per I/O queue pair");
264 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
265 "int_coal_time", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
266 nvme_sysctl_int_coal_time, "IU",
267 "Interrupt coalescing timeout (in microseconds)");
269 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
270 "int_coal_threshold", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
271 nvme_sysctl_int_coal_threshold, "IU",
272 "Interrupt coalescing threshold");
274 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
275 "timeout_period", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
276 nvme_sysctl_timeout_period, "IU",
277 "Timeout period (in seconds)");
279 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
280 "num_cmds", CTLTYPE_S64 | CTLFLAG_RD,
281 ctrlr, 0, nvme_sysctl_num_cmds, "IU",
282 "Number of commands submitted");
284 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
285 "num_intr_handler_calls", CTLTYPE_S64 | CTLFLAG_RD,
286 ctrlr, 0, nvme_sysctl_num_intr_handler_calls, "IU",
287 "Number of times interrupt handler was invoked (will "
288 "typically be less than number of actual interrupts "
289 "generated due to coalescing)");
291 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
292 "reset_stats", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
293 nvme_sysctl_reset_stats, "IU", "Reset statistics to zero");
295 que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "adminq",
296 CTLFLAG_RD, NULL, "Admin Queue");
298 nvme_sysctl_initialize_queue(&ctrlr->adminq, ctrlr_ctx, que_tree);
300 for (i = 0; i < ctrlr->num_io_queues; i++) {
301 snprintf(queue_name, QUEUE_NAME_LENGTH, "ioq%d", i);
302 que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO,
303 queue_name, CTLFLAG_RD, NULL, "IO Queue");
304 nvme_sysctl_initialize_queue(&ctrlr->ioq[i], ctrlr_ctx,