2 * Copyright (C) 2012 Intel Corporation
4 * Copyright (C) 2018 Alexander Motin <mav@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
38 #include <sys/ioccom.h>
39 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/mutex.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
50 #include <dev/pci/pcireg.h>
51 #include <dev/pci/pcivar.h>
52 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <machine/stdarg.h>
62 #include "ioat_internal.h"
64 #ifndef BUS_SPACE_MAXADDR_40BIT
65 #define BUS_SPACE_MAXADDR_40BIT 0xFFFFFFFFFFULL
68 static int ioat_probe(device_t device);
69 static int ioat_attach(device_t device);
70 static int ioat_detach(device_t device);
71 static int ioat_setup_intr(struct ioat_softc *ioat);
72 static int ioat_teardown_intr(struct ioat_softc *ioat);
73 static int ioat3_attach(device_t device);
74 static int ioat_start_channel(struct ioat_softc *ioat);
75 static int ioat_map_pci_bar(struct ioat_softc *ioat);
76 static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg,
78 static void ioat_interrupt_handler(void *arg);
79 static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat);
80 static int chanerr_to_errno(uint32_t);
81 static void ioat_process_events(struct ioat_softc *ioat, boolean_t intr);
82 static inline uint32_t ioat_get_active(struct ioat_softc *ioat);
83 static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat);
84 static void ioat_free_ring(struct ioat_softc *, uint32_t size,
85 struct ioat_descriptor *);
86 static int ioat_reserve_space(struct ioat_softc *, uint32_t, int mflags);
87 static union ioat_hw_descriptor *ioat_get_descriptor(struct ioat_softc *,
89 static struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *,
91 static void ioat_halted_debug(struct ioat_softc *, uint32_t);
92 static void ioat_poll_timer_callback(void *arg);
93 static void dump_descriptor(void *hw_desc);
94 static void ioat_submit_single(struct ioat_softc *ioat);
95 static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg,
97 static int ioat_reset_hw(struct ioat_softc *ioat);
98 static void ioat_reset_hw_task(void *, int);
99 static void ioat_setup_sysctl(device_t device);
100 static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS);
101 static void ioat_get(struct ioat_softc *);
102 static void ioat_put(struct ioat_softc *);
103 static void ioat_drain_locked(struct ioat_softc *);
105 #define ioat_log_message(v, ...) do { \
106 if ((v) <= g_ioat_debug_level) { \
107 device_printf(ioat->device, __VA_ARGS__); \
111 MALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations");
112 SYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node");
114 static int g_force_legacy_interrupts;
115 SYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN,
116 &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled");
118 int g_ioat_debug_level = 0;
119 SYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level,
120 0, "Set log level (0-3) for ioat(4). Higher is more verbose.");
122 unsigned g_ioat_ring_order = 13;
123 SYSCTL_UINT(_hw_ioat, OID_AUTO, ring_order, CTLFLAG_RDTUN, &g_ioat_ring_order,
124 0, "Set IOAT ring order. (1 << this) == ring size.");
127 * OS <-> Driver interface structures
129 static device_method_t ioat_pci_methods[] = {
130 /* Device interface */
131 DEVMETHOD(device_probe, ioat_probe),
132 DEVMETHOD(device_attach, ioat_attach),
133 DEVMETHOD(device_detach, ioat_detach),
137 static driver_t ioat_pci_driver = {
140 sizeof(struct ioat_softc),
143 static devclass_t ioat_devclass;
144 DRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0);
145 MODULE_VERSION(ioat, 1);
148 * Private data structures
150 static struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS];
151 static unsigned ioat_channel_index = 0;
152 SYSCTL_UINT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0,
153 "Number of IOAT channels attached");
154 static struct mtx ioat_list_mtx;
155 MTX_SYSINIT(ioat_list_mtx, &ioat_list_mtx, "ioat list mtx", MTX_DEF);
162 { 0x34308086, "TBG IOAT Ch0" },
163 { 0x34318086, "TBG IOAT Ch1" },
164 { 0x34328086, "TBG IOAT Ch2" },
165 { 0x34338086, "TBG IOAT Ch3" },
166 { 0x34298086, "TBG IOAT Ch4" },
167 { 0x342a8086, "TBG IOAT Ch5" },
168 { 0x342b8086, "TBG IOAT Ch6" },
169 { 0x342c8086, "TBG IOAT Ch7" },
171 { 0x37108086, "JSF IOAT Ch0" },
172 { 0x37118086, "JSF IOAT Ch1" },
173 { 0x37128086, "JSF IOAT Ch2" },
174 { 0x37138086, "JSF IOAT Ch3" },
175 { 0x37148086, "JSF IOAT Ch4" },
176 { 0x37158086, "JSF IOAT Ch5" },
177 { 0x37168086, "JSF IOAT Ch6" },
178 { 0x37178086, "JSF IOAT Ch7" },
179 { 0x37188086, "JSF IOAT Ch0 (RAID)" },
180 { 0x37198086, "JSF IOAT Ch1 (RAID)" },
182 { 0x3c208086, "SNB IOAT Ch0" },
183 { 0x3c218086, "SNB IOAT Ch1" },
184 { 0x3c228086, "SNB IOAT Ch2" },
185 { 0x3c238086, "SNB IOAT Ch3" },
186 { 0x3c248086, "SNB IOAT Ch4" },
187 { 0x3c258086, "SNB IOAT Ch5" },
188 { 0x3c268086, "SNB IOAT Ch6" },
189 { 0x3c278086, "SNB IOAT Ch7" },
190 { 0x3c2e8086, "SNB IOAT Ch0 (RAID)" },
191 { 0x3c2f8086, "SNB IOAT Ch1 (RAID)" },
193 { 0x0e208086, "IVB IOAT Ch0" },
194 { 0x0e218086, "IVB IOAT Ch1" },
195 { 0x0e228086, "IVB IOAT Ch2" },
196 { 0x0e238086, "IVB IOAT Ch3" },
197 { 0x0e248086, "IVB IOAT Ch4" },
198 { 0x0e258086, "IVB IOAT Ch5" },
199 { 0x0e268086, "IVB IOAT Ch6" },
200 { 0x0e278086, "IVB IOAT Ch7" },
201 { 0x0e2e8086, "IVB IOAT Ch0 (RAID)" },
202 { 0x0e2f8086, "IVB IOAT Ch1 (RAID)" },
204 { 0x2f208086, "HSW IOAT Ch0" },
205 { 0x2f218086, "HSW IOAT Ch1" },
206 { 0x2f228086, "HSW IOAT Ch2" },
207 { 0x2f238086, "HSW IOAT Ch3" },
208 { 0x2f248086, "HSW IOAT Ch4" },
209 { 0x2f258086, "HSW IOAT Ch5" },
210 { 0x2f268086, "HSW IOAT Ch6" },
211 { 0x2f278086, "HSW IOAT Ch7" },
212 { 0x2f2e8086, "HSW IOAT Ch0 (RAID)" },
213 { 0x2f2f8086, "HSW IOAT Ch1 (RAID)" },
215 { 0x0c508086, "BWD IOAT Ch0" },
216 { 0x0c518086, "BWD IOAT Ch1" },
217 { 0x0c528086, "BWD IOAT Ch2" },
218 { 0x0c538086, "BWD IOAT Ch3" },
220 { 0x6f508086, "BDXDE IOAT Ch0" },
221 { 0x6f518086, "BDXDE IOAT Ch1" },
222 { 0x6f528086, "BDXDE IOAT Ch2" },
223 { 0x6f538086, "BDXDE IOAT Ch3" },
225 { 0x6f208086, "BDX IOAT Ch0" },
226 { 0x6f218086, "BDX IOAT Ch1" },
227 { 0x6f228086, "BDX IOAT Ch2" },
228 { 0x6f238086, "BDX IOAT Ch3" },
229 { 0x6f248086, "BDX IOAT Ch4" },
230 { 0x6f258086, "BDX IOAT Ch5" },
231 { 0x6f268086, "BDX IOAT Ch6" },
232 { 0x6f278086, "BDX IOAT Ch7" },
233 { 0x6f2e8086, "BDX IOAT Ch0 (RAID)" },
234 { 0x6f2f8086, "BDX IOAT Ch1 (RAID)" },
236 { 0x20218086, "SKX IOAT" },
239 MODULE_PNP_INFO("W32:vendor/device;D:#", pci, ioat, pci_ids,
243 * OS <-> Driver linkage functions
246 ioat_probe(device_t device)
251 type = pci_get_devid(device);
252 for (ep = pci_ids; ep < &pci_ids[nitems(pci_ids)]; ep++) {
253 if (ep->type == type) {
254 device_set_desc(device, ep->desc);
262 ioat_attach(device_t device)
264 struct ioat_softc *ioat;
267 ioat = DEVICE2SOFTC(device);
268 ioat->device = device;
270 error = ioat_map_pci_bar(ioat);
274 ioat->version = ioat_read_cbver(ioat);
275 if (ioat->version < IOAT_VER_3_0) {
280 error = ioat3_attach(device);
284 error = pci_enable_busmaster(device);
288 error = ioat_setup_intr(ioat);
292 error = ioat_reset_hw(ioat);
296 ioat_process_events(ioat, FALSE);
297 ioat_setup_sysctl(device);
299 mtx_lock(&ioat_list_mtx);
300 for (i = 0; i < IOAT_MAX_CHANNELS; i++) {
301 if (ioat_channel[i] == NULL)
304 if (i >= IOAT_MAX_CHANNELS) {
305 mtx_unlock(&ioat_list_mtx);
306 device_printf(device, "Too many I/OAT devices in system\n");
311 ioat_channel[i] = ioat;
312 if (i >= ioat_channel_index)
313 ioat_channel_index = i + 1;
314 mtx_unlock(&ioat_list_mtx);
325 ioat_bus_dmamap_destroy(struct ioat_softc *ioat, const char *func,
326 bus_dma_tag_t dmat, bus_dmamap_t map)
330 error = bus_dmamap_destroy(dmat, map);
333 "%s: bus_dmamap_destroy failed %d\n", func, error);
340 ioat_detach(device_t device)
342 struct ioat_softc *ioat;
345 ioat = DEVICE2SOFTC(device);
347 mtx_lock(&ioat_list_mtx);
348 ioat_channel[ioat->chan_idx] = NULL;
349 while (ioat_channel_index > 0 &&
350 ioat_channel[ioat_channel_index - 1] == NULL)
351 ioat_channel_index--;
352 mtx_unlock(&ioat_list_mtx);
355 taskqueue_drain(taskqueue_thread, &ioat->reset_task);
357 mtx_lock(&ioat->submit_lock);
358 ioat->quiescing = TRUE;
359 ioat->destroying = TRUE;
360 wakeup(&ioat->quiescing);
361 wakeup(&ioat->resetting);
363 ioat_drain_locked(ioat);
364 mtx_unlock(&ioat->submit_lock);
365 mtx_lock(&ioat->cleanup_lock);
366 while (ioat_get_active(ioat) > 0)
367 msleep(&ioat->tail, &ioat->cleanup_lock, 0, "ioat_drain", 1);
368 mtx_unlock(&ioat->cleanup_lock);
370 ioat_teardown_intr(ioat);
371 callout_drain(&ioat->poll_timer);
373 pci_disable_busmaster(device);
375 if (ioat->pci_resource != NULL)
376 bus_release_resource(device, SYS_RES_MEMORY,
377 ioat->pci_resource_id, ioat->pci_resource);
379 if (ioat->data_tag != NULL) {
380 for (i = 0; i < 1 << ioat->ring_size_order; i++) {
381 error = ioat_bus_dmamap_destroy(ioat, __func__,
382 ioat->data_tag, ioat->ring[i].src_dmamap);
386 for (i = 0; i < 1 << ioat->ring_size_order; i++) {
387 error = ioat_bus_dmamap_destroy(ioat, __func__,
388 ioat->data_tag, ioat->ring[i].dst_dmamap);
393 for (i = 0; i < 1 << ioat->ring_size_order; i++) {
394 error = ioat_bus_dmamap_destroy(ioat, __func__,
395 ioat->data_tag, ioat->ring[i].src2_dmamap);
399 for (i = 0; i < 1 << ioat->ring_size_order; i++) {
400 error = ioat_bus_dmamap_destroy(ioat, __func__,
401 ioat->data_tag, ioat->ring[i].dst2_dmamap);
406 bus_dma_tag_destroy(ioat->data_tag);
409 if (ioat->data_crc_tag != NULL) {
410 for (i = 0; i < 1 << ioat->ring_size_order; i++) {
411 error = ioat_bus_dmamap_destroy(ioat, __func__,
412 ioat->data_crc_tag, ioat->ring[i].crc_dmamap);
417 bus_dma_tag_destroy(ioat->data_crc_tag);
420 if (ioat->ring != NULL)
421 ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring);
423 if (ioat->comp_update != NULL) {
424 bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map);
425 bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update,
426 ioat->comp_update_map);
427 bus_dma_tag_destroy(ioat->comp_update_tag);
430 if (ioat->hw_desc_ring != NULL) {
431 bus_dmamap_unload(ioat->hw_desc_tag, ioat->hw_desc_map);
432 bus_dmamem_free(ioat->hw_desc_tag, ioat->hw_desc_ring,
434 bus_dma_tag_destroy(ioat->hw_desc_tag);
441 ioat_teardown_intr(struct ioat_softc *ioat)
444 if (ioat->tag != NULL)
445 bus_teardown_intr(ioat->device, ioat->res, ioat->tag);
447 if (ioat->res != NULL)
448 bus_release_resource(ioat->device, SYS_RES_IRQ,
449 rman_get_rid(ioat->res), ioat->res);
451 pci_release_msi(ioat->device);
456 ioat_start_channel(struct ioat_softc *ioat)
458 struct ioat_dma_hw_descriptor *hw_desc;
459 struct ioat_descriptor *desc;
460 struct bus_dmadesc *dmadesc;
465 ioat_acquire(&ioat->dmaengine);
467 /* Submit 'NULL' operation manually to avoid quiescing flag */
468 desc = ioat_get_ring_entry(ioat, ioat->head);
469 hw_desc = &ioat_get_descriptor(ioat, ioat->head)->dma;
470 dmadesc = &desc->bus_dmadesc;
472 dmadesc->callback_fn = NULL;
473 dmadesc->callback_arg = NULL;
475 hw_desc->u.control_raw = 0;
476 hw_desc->u.control_generic.op = IOAT_OP_COPY;
477 hw_desc->u.control_generic.completion_update = 1;
479 hw_desc->src_addr = 0;
480 hw_desc->dest_addr = 0;
481 hw_desc->u.control.null = 1;
483 ioat_submit_single(ioat);
484 ioat_release(&ioat->dmaengine);
486 for (i = 0; i < 100; i++) {
488 status = ioat_get_chansts(ioat);
489 if (is_ioat_idle(status))
493 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
494 ioat_log_message(0, "could not start channel: "
495 "status = %#jx error = %b\n", (uintmax_t)status, (int)chanerr,
501 * Initialize Hardware
504 ioat3_attach(device_t device)
506 struct ioat_softc *ioat;
507 struct ioat_descriptor *ring;
508 struct ioat_dma_hw_descriptor *dma_hw_desc;
511 int i, num_descriptors;
516 ioat = DEVICE2SOFTC(device);
517 ioat->capabilities = ioat_read_dmacapability(ioat);
519 ioat_log_message(0, "Capabilities: %b\n", (int)ioat->capabilities,
522 xfercap = ioat_read_xfercap(ioat);
523 ioat->max_xfer_size = 1 << xfercap;
525 ioat->intrdelay_supported = (ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) &
526 IOAT_INTRDELAY_SUPPORTED) != 0;
527 if (ioat->intrdelay_supported)
528 ioat->intrdelay_max = IOAT_INTRDELAY_US_MASK;
530 /* TODO: need to check DCA here if we ever do XOR/PQ */
532 mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF);
533 mtx_init(&ioat->cleanup_lock, "ioat_cleanup", NULL, MTX_DEF);
534 callout_init(&ioat->poll_timer, 1);
535 TASK_INIT(&ioat->reset_task, 0, ioat_reset_hw_task, ioat);
537 /* Establish lock order for Witness */
538 mtx_lock(&ioat->cleanup_lock);
539 mtx_lock(&ioat->submit_lock);
540 mtx_unlock(&ioat->submit_lock);
541 mtx_unlock(&ioat->cleanup_lock);
543 ioat->is_submitter_processing = FALSE;
545 bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0,
546 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
547 sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL,
548 &ioat->comp_update_tag);
550 error = bus_dmamem_alloc(ioat->comp_update_tag,
551 (void **)&ioat->comp_update, BUS_DMA_ZERO, &ioat->comp_update_map);
552 if (ioat->comp_update == NULL)
555 error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map,
556 ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat,
561 ioat->ring_size_order = g_ioat_ring_order;
562 num_descriptors = 1 << ioat->ring_size_order;
563 ringsz = sizeof(struct ioat_dma_hw_descriptor) * num_descriptors;
565 error = bus_dma_tag_create(bus_get_dma_tag(ioat->device),
566 2 * 1024 * 1024, 0x0, (bus_addr_t)BUS_SPACE_MAXADDR_40BIT,
567 BUS_SPACE_MAXADDR, NULL, NULL, ringsz, 1, ringsz, 0, NULL, NULL,
572 error = bus_dmamem_alloc(ioat->hw_desc_tag, &hw_desc,
573 BUS_DMA_ZERO | BUS_DMA_WAITOK, &ioat->hw_desc_map);
577 error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc,
578 ringsz, ioat_dmamap_cb, &ioat->hw_desc_bus_addr, BUS_DMA_WAITOK);
582 ioat->hw_desc_ring = hw_desc;
584 error = bus_dma_tag_create(bus_get_dma_tag(ioat->device),
585 1, 0, BUS_SPACE_MAXADDR_40BIT, BUS_SPACE_MAXADDR, NULL, NULL,
586 ioat->max_xfer_size, 1, ioat->max_xfer_size, 0, NULL, NULL,
587 &ioat->data_crc_tag);
589 ioat_log_message(0, "%s: bus_dma_tag_create failed %d\n",
594 error = bus_dma_tag_create(bus_get_dma_tag(ioat->device),
595 1, 0, BUS_SPACE_MAXADDR_48BIT, BUS_SPACE_MAXADDR, NULL, NULL,
596 ioat->max_xfer_size, 1, ioat->max_xfer_size, 0, NULL, NULL,
599 ioat_log_message(0, "%s: bus_dma_tag_create failed %d\n",
603 ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT,
607 for (i = 0; i < num_descriptors; i++) {
608 memset(&ring[i].bus_dmadesc, 0, sizeof(ring[i].bus_dmadesc));
610 error = bus_dmamap_create(ioat->data_tag, 0,
611 &ring[i].src_dmamap);
614 "%s: bus_dmamap_create failed %d\n", __func__,
618 error = bus_dmamap_create(ioat->data_tag, 0,
619 &ring[i].dst_dmamap);
622 "%s: bus_dmamap_create failed %d\n", __func__,
626 error = bus_dmamap_create(ioat->data_tag, 0,
627 &ring[i].src2_dmamap);
630 "%s: bus_dmamap_create failed %d\n", __func__,
634 error = bus_dmamap_create(ioat->data_tag, 0,
635 &ring[i].dst2_dmamap);
638 "%s: bus_dmamap_create failed %d\n", __func__,
642 error = bus_dmamap_create(ioat->data_crc_tag, 0,
643 &ring[i].crc_dmamap);
646 "%s: bus_dmamap_create failed %d\n", __func__,
652 for (i = 0; i < num_descriptors; i++) {
653 dma_hw_desc = &ioat->hw_desc_ring[i].dma;
654 dma_hw_desc->next = RING_PHYS_ADDR(ioat, i + 1);
660 *ioat->comp_update = 0;
665 ioat_map_pci_bar(struct ioat_softc *ioat)
668 ioat->pci_resource_id = PCIR_BAR(0);
669 ioat->pci_resource = bus_alloc_resource_any(ioat->device,
670 SYS_RES_MEMORY, &ioat->pci_resource_id, RF_ACTIVE);
672 if (ioat->pci_resource == NULL) {
673 ioat_log_message(0, "unable to allocate pci resource\n");
677 ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource);
678 ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource);
683 ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
685 struct ioat_softc *ioat = arg;
687 KASSERT(error == 0, ("%s: error:%d", __func__, error));
688 ioat->comp_update_bus_addr = seg[0].ds_addr;
692 ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
696 KASSERT(error == 0, ("%s: error:%d", __func__, error));
698 *baddr = segs->ds_addr;
702 * Interrupt setup and handlers
705 ioat_setup_intr(struct ioat_softc *ioat)
707 uint32_t num_vectors;
710 boolean_t force_legacy_interrupts;
713 force_legacy_interrupts = FALSE;
715 if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) {
717 pci_alloc_msix(ioat->device, &num_vectors);
718 if (num_vectors == 1)
724 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
725 &ioat->rid, RF_ACTIVE);
728 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
729 &ioat->rid, RF_SHAREABLE | RF_ACTIVE);
731 if (ioat->res == NULL) {
732 ioat_log_message(0, "bus_alloc_resource failed\n");
737 error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE |
738 INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag);
740 ioat_log_message(0, "bus_setup_intr failed\n");
744 ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN);
749 ioat_model_resets_msix(struct ioat_softc *ioat)
753 pciid = pci_get_devid(ioat->device);
772 ioat_interrupt_handler(void *arg)
774 struct ioat_softc *ioat = arg;
776 ioat->stats.interrupts++;
777 ioat_process_events(ioat, TRUE);
781 chanerr_to_errno(uint32_t chanerr)
786 if ((chanerr & (IOAT_CHANERR_XSADDERR | IOAT_CHANERR_XDADDERR)) != 0)
788 if ((chanerr & (IOAT_CHANERR_RDERR | IOAT_CHANERR_WDERR)) != 0)
790 /* This one is probably our fault: */
791 if ((chanerr & IOAT_CHANERR_NDADDERR) != 0)
797 ioat_process_events(struct ioat_softc *ioat, boolean_t intr)
799 struct ioat_descriptor *desc;
800 struct bus_dmadesc *dmadesc;
801 uint64_t comp_update, status;
802 uint32_t completed, chanerr;
805 mtx_lock(&ioat->cleanup_lock);
808 * Don't run while the hardware is being reset. Reset is responsible
809 * for blocking new work and draining & completing existing work, so
810 * there is nothing to do until new work is queued after reset anyway.
812 if (ioat->resetting_cleanup) {
813 mtx_unlock(&ioat->cleanup_lock);
818 comp_update = *ioat->comp_update;
819 status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
821 if (status < ioat->hw_desc_bus_addr ||
822 status >= ioat->hw_desc_bus_addr + (1 << ioat->ring_size_order) *
823 sizeof(struct ioat_generic_hw_descriptor))
824 panic("Bogus completion address %jx (channel %u)",
825 (uintmax_t)status, ioat->chan_idx);
827 if (status == ioat->last_seen) {
829 * If we landed in process_events and nothing has been
830 * completed, check for a timeout due to channel halt.
834 CTR4(KTR_IOAT, "%s channel=%u hw_status=0x%lx last_seen=0x%lx",
835 __func__, ioat->chan_idx, comp_update, ioat->last_seen);
837 while (RING_PHYS_ADDR(ioat, ioat->tail - 1) != status) {
838 desc = ioat_get_ring_entry(ioat, ioat->tail);
839 dmadesc = &desc->bus_dmadesc;
840 CTR5(KTR_IOAT, "channel=%u completing desc idx %u (%p) ok cb %p(%p)",
841 ioat->chan_idx, ioat->tail, dmadesc, dmadesc->callback_fn,
842 dmadesc->callback_arg);
844 bus_dmamap_unload(ioat->data_tag, desc->src_dmamap);
845 bus_dmamap_unload(ioat->data_tag, desc->dst_dmamap);
846 bus_dmamap_unload(ioat->data_tag, desc->src2_dmamap);
847 bus_dmamap_unload(ioat->data_tag, desc->dst2_dmamap);
848 bus_dmamap_unload(ioat->data_crc_tag, desc->crc_dmamap);
850 if (dmadesc->callback_fn != NULL)
851 dmadesc->callback_fn(dmadesc->callback_arg, 0);
856 CTR5(KTR_IOAT, "%s channel=%u head=%u tail=%u active=%u", __func__,
857 ioat->chan_idx, ioat->head, ioat->tail, ioat_get_active(ioat));
859 if (completed != 0) {
860 ioat->last_seen = RING_PHYS_ADDR(ioat, ioat->tail - 1);
861 ioat->stats.descriptors_processed += completed;
866 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
867 mtx_unlock(&ioat->cleanup_lock);
870 * The device doesn't seem to reliably push suspend/halt statuses to
871 * the channel completion memory address, so poll the device register
872 * here. For performance reasons skip it on interrupts, do it only
873 * on much more rare polling events.
876 comp_update = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS;
877 if (!is_ioat_halted(comp_update) && !is_ioat_suspended(comp_update))
880 ioat->stats.channel_halts++;
883 * Fatal programming error on this DMA channel. Flush any outstanding
884 * work with error status and restart the engine.
886 mtx_lock(&ioat->submit_lock);
887 ioat->quiescing = TRUE;
888 mtx_unlock(&ioat->submit_lock);
891 * This is safe to do here because the submit queue is quiesced. We
892 * know that we will drain all outstanding events, so ioat_reset_hw
893 * can't deadlock. It is necessary to protect other ioat_process_event
894 * threads from racing ioat_reset_hw, reading an indeterminate hw
895 * state, and attempting to continue issuing completions.
897 mtx_lock(&ioat->cleanup_lock);
898 ioat->resetting_cleanup = TRUE;
900 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
901 if (1 <= g_ioat_debug_level)
902 ioat_halted_debug(ioat, chanerr);
903 ioat->stats.last_halt_chanerr = chanerr;
905 while (ioat_get_active(ioat) > 0) {
906 desc = ioat_get_ring_entry(ioat, ioat->tail);
907 dmadesc = &desc->bus_dmadesc;
908 CTR5(KTR_IOAT, "channel=%u completing desc idx %u (%p) err cb %p(%p)",
909 ioat->chan_idx, ioat->tail, dmadesc, dmadesc->callback_fn,
910 dmadesc->callback_arg);
912 if (dmadesc->callback_fn != NULL)
913 dmadesc->callback_fn(dmadesc->callback_arg,
914 chanerr_to_errno(chanerr));
917 ioat->stats.descriptors_processed++;
918 ioat->stats.descriptors_error++;
920 CTR5(KTR_IOAT, "%s channel=%u head=%u tail=%u active=%u", __func__,
921 ioat->chan_idx, ioat->head, ioat->tail, ioat_get_active(ioat));
923 /* Clear error status */
924 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
926 mtx_unlock(&ioat->cleanup_lock);
928 ioat_log_message(0, "Resetting channel to recover from error\n");
929 error = taskqueue_enqueue(taskqueue_thread, &ioat->reset_task);
931 ("%s: taskqueue_enqueue failed: %d", __func__, error));
935 ioat_reset_hw_task(void *ctx, int pending __unused)
937 struct ioat_softc *ioat;
941 ioat_log_message(1, "%s: Resetting channel\n", __func__);
943 error = ioat_reset_hw(ioat);
944 KASSERT(error == 0, ("%s: reset failed: %d", __func__, error));
952 ioat_get_nchannels(void)
955 return (ioat_channel_index);
959 ioat_get_dmaengine(uint32_t index, int flags)
961 struct ioat_softc *ioat;
963 KASSERT((flags & ~(M_NOWAIT | M_WAITOK)) == 0,
964 ("invalid flags: 0x%08x", flags));
965 KASSERT((flags & (M_NOWAIT | M_WAITOK)) != (M_NOWAIT | M_WAITOK),
966 ("invalid wait | nowait"));
968 mtx_lock(&ioat_list_mtx);
969 if (index >= ioat_channel_index ||
970 (ioat = ioat_channel[index]) == NULL) {
971 mtx_unlock(&ioat_list_mtx);
974 mtx_lock(&ioat->submit_lock);
975 mtx_unlock(&ioat_list_mtx);
977 if (ioat->destroying) {
978 mtx_unlock(&ioat->submit_lock);
983 if (ioat->quiescing) {
984 if ((flags & M_NOWAIT) != 0) {
986 mtx_unlock(&ioat->submit_lock);
990 while (ioat->quiescing && !ioat->destroying)
991 msleep(&ioat->quiescing, &ioat->submit_lock, 0, "getdma", 0);
993 if (ioat->destroying) {
995 mtx_unlock(&ioat->submit_lock);
999 mtx_unlock(&ioat->submit_lock);
1000 return (&ioat->dmaengine);
1004 ioat_put_dmaengine(bus_dmaengine_t dmaengine)
1006 struct ioat_softc *ioat;
1008 ioat = to_ioat_softc(dmaengine);
1009 mtx_lock(&ioat->submit_lock);
1011 mtx_unlock(&ioat->submit_lock);
1015 ioat_get_hwversion(bus_dmaengine_t dmaengine)
1017 struct ioat_softc *ioat;
1019 ioat = to_ioat_softc(dmaengine);
1020 return (ioat->version);
1024 ioat_get_max_io_size(bus_dmaengine_t dmaengine)
1026 struct ioat_softc *ioat;
1028 ioat = to_ioat_softc(dmaengine);
1029 return (ioat->max_xfer_size);
1033 ioat_get_capabilities(bus_dmaengine_t dmaengine)
1035 struct ioat_softc *ioat;
1037 ioat = to_ioat_softc(dmaengine);
1038 return (ioat->capabilities);
1042 ioat_set_interrupt_coalesce(bus_dmaengine_t dmaengine, uint16_t delay)
1044 struct ioat_softc *ioat;
1046 ioat = to_ioat_softc(dmaengine);
1047 if (!ioat->intrdelay_supported)
1049 if (delay > ioat->intrdelay_max)
1052 ioat_write_2(ioat, IOAT_INTRDELAY_OFFSET, delay);
1053 ioat->cached_intrdelay =
1054 ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & IOAT_INTRDELAY_US_MASK;
1059 ioat_get_max_coalesce_period(bus_dmaengine_t dmaengine)
1061 struct ioat_softc *ioat;
1063 ioat = to_ioat_softc(dmaengine);
1064 return (ioat->intrdelay_max);
1068 ioat_acquire(bus_dmaengine_t dmaengine)
1070 struct ioat_softc *ioat;
1072 ioat = to_ioat_softc(dmaengine);
1073 mtx_lock(&ioat->submit_lock);
1074 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1075 ioat->acq_head = ioat->head;
1079 ioat_acquire_reserve(bus_dmaengine_t dmaengine, unsigned n, int mflags)
1081 struct ioat_softc *ioat;
1084 ioat = to_ioat_softc(dmaengine);
1085 ioat_acquire(dmaengine);
1087 error = ioat_reserve_space(ioat, n, mflags);
1089 ioat_release(dmaengine);
1094 ioat_release(bus_dmaengine_t dmaengine)
1096 struct ioat_softc *ioat;
1098 ioat = to_ioat_softc(dmaengine);
1099 CTR3(KTR_IOAT, "%s channel=%u dispatch1 head=%u", __func__,
1100 ioat->chan_idx, ioat->head);
1101 KFAIL_POINT_CODE(DEBUG_FP, ioat_release, /* do nothing */);
1102 CTR3(KTR_IOAT, "%s channel=%u dispatch2 head=%u", __func__,
1103 ioat->chan_idx, ioat->head);
1105 if (ioat->acq_head != ioat->head) {
1106 ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET,
1107 (uint16_t)ioat->head);
1109 if (!callout_pending(&ioat->poll_timer)) {
1110 callout_reset(&ioat->poll_timer, 1,
1111 ioat_poll_timer_callback, ioat);
1114 mtx_unlock(&ioat->submit_lock);
1117 static struct ioat_descriptor *
1118 ioat_op_generic(struct ioat_softc *ioat, uint8_t op,
1119 uint32_t size, uint64_t src, uint64_t dst,
1120 bus_dmaengine_callback_t callback_fn, void *callback_arg,
1123 struct ioat_generic_hw_descriptor *hw_desc;
1124 struct ioat_descriptor *desc;
1125 bus_dma_segment_t seg;
1126 int mflags, nseg, error;
1128 mtx_assert(&ioat->submit_lock, MA_OWNED);
1130 KASSERT((flags & ~_DMA_GENERIC_FLAGS) == 0,
1131 ("Unrecognized flag(s): %#x", flags & ~_DMA_GENERIC_FLAGS));
1132 if ((flags & DMA_NO_WAIT) != 0)
1137 if (size > ioat->max_xfer_size) {
1138 ioat_log_message(0, "%s: max_xfer_size = %d, requested = %u\n",
1139 __func__, ioat->max_xfer_size, (unsigned)size);
1143 if (ioat_reserve_space(ioat, 1, mflags) != 0)
1146 desc = ioat_get_ring_entry(ioat, ioat->head);
1147 hw_desc = &ioat_get_descriptor(ioat, ioat->head)->generic;
1149 hw_desc->u.control_raw = 0;
1150 hw_desc->u.control_generic.op = op;
1151 hw_desc->u.control_generic.completion_update = 1;
1153 if ((flags & DMA_INT_EN) != 0)
1154 hw_desc->u.control_generic.int_enable = 1;
1155 if ((flags & DMA_FENCE) != 0)
1156 hw_desc->u.control_generic.fence = 1;
1158 hw_desc->size = size;
1162 error = _bus_dmamap_load_phys(ioat->data_tag, desc->src_dmamap,
1163 src, size, 0, &seg, &nseg);
1165 ioat_log_message(0, "%s: _bus_dmamap_load_phys"
1166 " failed %d\n", __func__, error);
1169 hw_desc->src_addr = seg.ds_addr;
1174 error = _bus_dmamap_load_phys(ioat->data_tag, desc->dst_dmamap,
1175 dst, size, 0, &seg, &nseg);
1177 ioat_log_message(0, "%s: _bus_dmamap_load_phys"
1178 " failed %d\n", __func__, error);
1181 hw_desc->dest_addr = seg.ds_addr;
1184 desc->bus_dmadesc.callback_fn = callback_fn;
1185 desc->bus_dmadesc.callback_arg = callback_arg;
1189 struct bus_dmadesc *
1190 ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn,
1191 void *callback_arg, uint32_t flags)
1193 struct ioat_dma_hw_descriptor *hw_desc;
1194 struct ioat_descriptor *desc;
1195 struct ioat_softc *ioat;
1197 ioat = to_ioat_softc(dmaengine);
1198 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1200 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 8, 0, 0, callback_fn,
1201 callback_arg, flags);
1205 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma;
1206 hw_desc->u.control.null = 1;
1207 ioat_submit_single(ioat);
1208 return (&desc->bus_dmadesc);
1211 struct bus_dmadesc *
1212 ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst,
1213 bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn,
1214 void *callback_arg, uint32_t flags)
1216 struct ioat_dma_hw_descriptor *hw_desc;
1217 struct ioat_descriptor *desc;
1218 struct ioat_softc *ioat;
1220 ioat = to_ioat_softc(dmaengine);
1222 if (((src | dst) & (0xffffull << 48)) != 0) {
1223 ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n",
1228 desc = ioat_op_generic(ioat, IOAT_OP_COPY, len, src, dst, callback_fn,
1229 callback_arg, flags);
1233 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma;
1234 if (g_ioat_debug_level >= 3)
1235 dump_descriptor(hw_desc);
1237 ioat_submit_single(ioat);
1238 CTR6(KTR_IOAT, "%s channel=%u desc=%p dest=%lx src=%lx len=%lx",
1239 __func__, ioat->chan_idx, &desc->bus_dmadesc, dst, src, len);
1240 return (&desc->bus_dmadesc);
1243 struct bus_dmadesc *
1244 ioat_copy_8k_aligned(bus_dmaengine_t dmaengine, bus_addr_t dst1,
1245 bus_addr_t dst2, bus_addr_t src1, bus_addr_t src2,
1246 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
1248 struct ioat_dma_hw_descriptor *hw_desc;
1249 struct ioat_descriptor *desc;
1250 struct ioat_softc *ioat;
1251 bus_size_t src1_len, dst1_len;
1252 bus_dma_segment_t seg;
1255 ioat = to_ioat_softc(dmaengine);
1256 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1258 if (((src1 | src2 | dst1 | dst2) & (0xffffull << 48)) != 0) {
1259 ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n",
1263 if (((src1 | src2 | dst1 | dst2) & PAGE_MASK) != 0) {
1264 ioat_log_message(0, "%s: Addresses must be page-aligned\n",
1269 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 2 * PAGE_SIZE, 0, 0,
1270 callback_fn, callback_arg, flags);
1274 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma;
1276 src1_len = (src2 != src1 + PAGE_SIZE) ? PAGE_SIZE : 2 * PAGE_SIZE;
1278 error = _bus_dmamap_load_phys(ioat->data_tag,
1279 desc->src_dmamap, src1, src1_len, 0, &seg, &nseg);
1281 ioat_log_message(0, "%s: _bus_dmamap_load_phys"
1282 " failed %d\n", __func__, error);
1285 hw_desc->src_addr = seg.ds_addr;
1286 if (src1_len != 2 * PAGE_SIZE) {
1287 hw_desc->u.control.src_page_break = 1;
1289 error = _bus_dmamap_load_phys(ioat->data_tag,
1290 desc->src2_dmamap, src2, PAGE_SIZE, 0, &seg, &nseg);
1292 ioat_log_message(0, "%s: _bus_dmamap_load_phys"
1293 " failed %d\n", __func__, error);
1296 hw_desc->next_src_addr = seg.ds_addr;
1299 dst1_len = (dst2 != dst1 + PAGE_SIZE) ? PAGE_SIZE : 2 * PAGE_SIZE;
1301 error = _bus_dmamap_load_phys(ioat->data_tag,
1302 desc->dst_dmamap, dst1, dst1_len, 0, &seg, &nseg);
1304 ioat_log_message(0, "%s: _bus_dmamap_load_phys"
1305 " failed %d\n", __func__, error);
1308 hw_desc->dest_addr = seg.ds_addr;
1309 if (dst1_len != 2 * PAGE_SIZE) {
1310 hw_desc->u.control.dest_page_break = 1;
1312 error = _bus_dmamap_load_phys(ioat->data_tag,
1313 desc->dst2_dmamap, dst2, PAGE_SIZE, 0, &seg, &nseg);
1315 ioat_log_message(0, "%s: _bus_dmamap_load_phys"
1316 " failed %d\n", __func__, error);
1319 hw_desc->next_dest_addr = seg.ds_addr;
1322 if (g_ioat_debug_level >= 3)
1323 dump_descriptor(hw_desc);
1325 ioat_submit_single(ioat);
1326 return (&desc->bus_dmadesc);
1329 struct bus_dmadesc *
1330 ioat_copy_crc(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src,
1331 bus_size_t len, uint32_t *initialseed, bus_addr_t crcptr,
1332 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
1334 struct ioat_crc32_hw_descriptor *hw_desc;
1335 struct ioat_descriptor *desc;
1336 struct ioat_softc *ioat;
1339 bus_dma_segment_t seg;
1342 ioat = to_ioat_softc(dmaengine);
1343 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1345 if ((ioat->capabilities & IOAT_DMACAP_MOVECRC) == 0) {
1346 ioat_log_message(0, "%s: Device lacks MOVECRC capability\n",
1350 if (((src | dst) & (0xffffffull << 40)) != 0) {
1351 ioat_log_message(0, "%s: High 24 bits of src/dst invalid\n",
1355 teststore = (flags & _DMA_CRC_TESTSTORE);
1356 if (teststore == _DMA_CRC_TESTSTORE) {
1357 ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__);
1360 if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) {
1361 ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n",
1366 switch (teststore) {
1368 op = IOAT_OP_MOVECRC_STORE;
1371 op = IOAT_OP_MOVECRC_TEST;
1374 KASSERT(teststore == 0, ("bogus"));
1375 op = IOAT_OP_MOVECRC;
1379 if ((flags & DMA_CRC_INLINE) == 0 &&
1380 (crcptr & (0xffffffull << 40)) != 0) {
1382 "%s: High 24 bits of crcptr invalid\n", __func__);
1386 desc = ioat_op_generic(ioat, op, len, src, dst, callback_fn,
1387 callback_arg, flags & ~_DMA_CRC_FLAGS);
1391 hw_desc = &ioat_get_descriptor(ioat, desc->id)->crc32;
1393 if ((flags & DMA_CRC_INLINE) == 0) {
1395 error = _bus_dmamap_load_phys(ioat->data_crc_tag,
1396 desc->crc_dmamap, crcptr, sizeof(uint32_t), 0,
1399 ioat_log_message(0, "%s: _bus_dmamap_load_phys"
1400 " failed %d\n", __func__, error);
1403 hw_desc->crc_address = seg.ds_addr;
1405 hw_desc->u.control.crc_location = 1;
1407 if (initialseed != NULL) {
1408 hw_desc->u.control.use_seed = 1;
1409 hw_desc->seed = *initialseed;
1412 if (g_ioat_debug_level >= 3)
1413 dump_descriptor(hw_desc);
1415 ioat_submit_single(ioat);
1416 return (&desc->bus_dmadesc);
1419 struct bus_dmadesc *
1420 ioat_crc(bus_dmaengine_t dmaengine, bus_addr_t src, bus_size_t len,
1421 uint32_t *initialseed, bus_addr_t crcptr,
1422 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
1424 struct ioat_crc32_hw_descriptor *hw_desc;
1425 struct ioat_descriptor *desc;
1426 struct ioat_softc *ioat;
1429 bus_dma_segment_t seg;
1432 ioat = to_ioat_softc(dmaengine);
1433 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1435 if ((ioat->capabilities & IOAT_DMACAP_CRC) == 0) {
1436 ioat_log_message(0, "%s: Device lacks CRC capability\n",
1440 if ((src & (0xffffffull << 40)) != 0) {
1441 ioat_log_message(0, "%s: High 24 bits of src invalid\n",
1445 teststore = (flags & _DMA_CRC_TESTSTORE);
1446 if (teststore == _DMA_CRC_TESTSTORE) {
1447 ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__);
1450 if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) {
1451 ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n",
1456 switch (teststore) {
1458 op = IOAT_OP_CRC_STORE;
1461 op = IOAT_OP_CRC_TEST;
1464 KASSERT(teststore == 0, ("bogus"));
1469 if ((flags & DMA_CRC_INLINE) == 0 &&
1470 (crcptr & (0xffffffull << 40)) != 0) {
1472 "%s: High 24 bits of crcptr invalid\n", __func__);
1476 desc = ioat_op_generic(ioat, op, len, src, 0, callback_fn,
1477 callback_arg, flags & ~_DMA_CRC_FLAGS);
1481 hw_desc = &ioat_get_descriptor(ioat, desc->id)->crc32;
1483 if ((flags & DMA_CRC_INLINE) == 0) {
1485 error = _bus_dmamap_load_phys(ioat->data_crc_tag,
1486 desc->crc_dmamap, crcptr, sizeof(uint32_t), 0,
1489 ioat_log_message(0, "%s: _bus_dmamap_load_phys"
1490 " failed %d\n", __func__, error);
1493 hw_desc->crc_address = seg.ds_addr;
1495 hw_desc->u.control.crc_location = 1;
1497 if (initialseed != NULL) {
1498 hw_desc->u.control.use_seed = 1;
1499 hw_desc->seed = *initialseed;
1502 if (g_ioat_debug_level >= 3)
1503 dump_descriptor(hw_desc);
1505 ioat_submit_single(ioat);
1506 return (&desc->bus_dmadesc);
1509 struct bus_dmadesc *
1510 ioat_blockfill(bus_dmaengine_t dmaengine, bus_addr_t dst, uint64_t fillpattern,
1511 bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg,
1514 struct ioat_fill_hw_descriptor *hw_desc;
1515 struct ioat_descriptor *desc;
1516 struct ioat_softc *ioat;
1518 ioat = to_ioat_softc(dmaengine);
1519 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1521 if ((ioat->capabilities & IOAT_DMACAP_BFILL) == 0) {
1522 ioat_log_message(0, "%s: Device lacks BFILL capability\n",
1527 if ((dst & (0xffffull << 48)) != 0) {
1528 ioat_log_message(0, "%s: High 16 bits of dst invalid\n",
1533 desc = ioat_op_generic(ioat, IOAT_OP_FILL, len, 0, dst,
1534 callback_fn, callback_arg, flags);
1538 hw_desc = &ioat_get_descriptor(ioat, desc->id)->fill;
1539 hw_desc->src_data = fillpattern;
1540 if (g_ioat_debug_level >= 3)
1541 dump_descriptor(hw_desc);
1543 ioat_submit_single(ioat);
1544 return (&desc->bus_dmadesc);
1550 static inline uint32_t
1551 ioat_get_active(struct ioat_softc *ioat)
1554 return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1));
1557 static inline uint32_t
1558 ioat_get_ring_space(struct ioat_softc *ioat)
1561 return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1);
1565 * Reserves space in this IOAT descriptor ring by ensuring enough slots remain
1568 * If mflags contains M_WAITOK, blocks until enough space is available.
1570 * Returns zero on success, or an errno on error. If num_descs is beyond the
1571 * maximum ring size, returns EINVAl; if allocation would block and mflags
1572 * contains M_NOWAIT, returns EAGAIN.
1574 * Must be called with the submit_lock held; returns with the lock held. The
1575 * lock may be dropped to allocate the ring.
1577 * (The submit_lock is needed to add any entries to the ring, so callers are
1578 * assured enough room is available.)
1581 ioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags)
1586 mtx_assert(&ioat->submit_lock, MA_OWNED);
1590 if (num_descs < 1 || num_descs >= (1 << ioat->ring_size_order)) {
1596 if (ioat->quiescing) {
1601 if (ioat_get_ring_space(ioat) >= num_descs)
1604 CTR3(KTR_IOAT, "%s channel=%u starved (%u)", __func__,
1605 ioat->chan_idx, num_descs);
1607 if (!dug && !ioat->is_submitter_processing) {
1608 ioat->is_submitter_processing = TRUE;
1609 mtx_unlock(&ioat->submit_lock);
1611 CTR2(KTR_IOAT, "%s channel=%u attempting to process events",
1612 __func__, ioat->chan_idx);
1613 ioat_process_events(ioat, FALSE);
1615 mtx_lock(&ioat->submit_lock);
1617 KASSERT(ioat->is_submitter_processing == TRUE,
1618 ("is_submitter_processing"));
1619 ioat->is_submitter_processing = FALSE;
1620 wakeup(&ioat->tail);
1624 if ((mflags & M_WAITOK) == 0) {
1628 CTR2(KTR_IOAT, "%s channel=%u blocking on completions",
1629 __func__, ioat->chan_idx);
1630 msleep(&ioat->tail, &ioat->submit_lock, 0,
1636 mtx_assert(&ioat->submit_lock, MA_OWNED);
1637 KASSERT(!ioat->quiescing || error == ENXIO,
1638 ("reserved during quiesce"));
1643 ioat_free_ring(struct ioat_softc *ioat, uint32_t size,
1644 struct ioat_descriptor *ring)
1650 static struct ioat_descriptor *
1651 ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index)
1654 return (&ioat->ring[index % (1 << ioat->ring_size_order)]);
1657 static union ioat_hw_descriptor *
1658 ioat_get_descriptor(struct ioat_softc *ioat, uint32_t index)
1661 return (&ioat->hw_desc_ring[index % (1 << ioat->ring_size_order)]);
1665 ioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr)
1667 union ioat_hw_descriptor *desc;
1669 ioat_log_message(0, "Channel halted (%b)\n", (int)chanerr,
1674 mtx_assert(&ioat->cleanup_lock, MA_OWNED);
1676 desc = ioat_get_descriptor(ioat, ioat->tail + 0);
1677 dump_descriptor(desc);
1679 desc = ioat_get_descriptor(ioat, ioat->tail + 1);
1680 dump_descriptor(desc);
1684 ioat_poll_timer_callback(void *arg)
1686 struct ioat_softc *ioat;
1689 ioat_log_message(3, "%s\n", __func__);
1691 ioat_process_events(ioat, FALSE);
1693 mtx_lock(&ioat->submit_lock);
1694 if (ioat_get_active(ioat) > 0)
1695 callout_schedule(&ioat->poll_timer, 1);
1696 mtx_unlock(&ioat->submit_lock);
1703 ioat_submit_single(struct ioat_softc *ioat)
1706 mtx_assert(&ioat->submit_lock, MA_OWNED);
1709 CTR4(KTR_IOAT, "%s channel=%u head=%u tail=%u", __func__,
1710 ioat->chan_idx, ioat->head, ioat->tail);
1712 ioat->stats.descriptors_submitted++;
1716 ioat_reset_hw(struct ioat_softc *ioat)
1723 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1725 mtx_lock(&ioat->submit_lock);
1726 while (ioat->resetting && !ioat->destroying)
1727 msleep(&ioat->resetting, &ioat->submit_lock, 0, "IRH_drain", 0);
1728 if (ioat->destroying) {
1729 mtx_unlock(&ioat->submit_lock);
1732 ioat->resetting = TRUE;
1733 ioat->quiescing = TRUE;
1734 mtx_unlock(&ioat->submit_lock);
1735 mtx_lock(&ioat->cleanup_lock);
1736 while (ioat_get_active(ioat) > 0)
1737 msleep(&ioat->tail, &ioat->cleanup_lock, 0, "ioat_drain", 1);
1740 * Suspend ioat_process_events while the hardware and softc are in an
1741 * indeterminate state.
1743 ioat->resetting_cleanup = TRUE;
1744 mtx_unlock(&ioat->cleanup_lock);
1746 CTR2(KTR_IOAT, "%s channel=%u quiesced and drained", __func__,
1749 status = ioat_get_chansts(ioat);
1750 if (is_ioat_active(status) || is_ioat_idle(status))
1753 /* Wait at most 20 ms */
1754 for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) &&
1755 timeout < 20; timeout++) {
1757 status = ioat_get_chansts(ioat);
1759 if (timeout == 20) {
1764 KASSERT(ioat_get_active(ioat) == 0, ("active after quiesce"));
1766 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
1767 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
1769 CTR2(KTR_IOAT, "%s channel=%u hardware suspended", __func__,
1773 * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors
1774 * that can cause stability issues for IOAT v3.
1776 pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07,
1778 chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4);
1779 pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4);
1782 * BDXDE and BWD models reset MSI-X registers on device reset.
1783 * Save/restore their contents manually.
1785 if (ioat_model_resets_msix(ioat)) {
1786 ioat_log_message(1, "device resets MSI-X registers; saving\n");
1787 pci_save_state(ioat->device);
1791 CTR2(KTR_IOAT, "%s channel=%u hardware reset", __func__,
1794 /* Wait at most 20 ms */
1795 for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++)
1797 if (timeout == 20) {
1802 if (ioat_model_resets_msix(ioat)) {
1803 ioat_log_message(1, "device resets registers; restored\n");
1804 pci_restore_state(ioat->device);
1807 /* Reset attempts to return the hardware to "halted." */
1808 status = ioat_get_chansts(ioat);
1809 if (is_ioat_active(status) || is_ioat_idle(status)) {
1810 /* So this really shouldn't happen... */
1811 ioat_log_message(0, "Device is active after a reset?\n");
1812 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
1817 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
1819 mtx_lock(&ioat->cleanup_lock);
1820 ioat_halted_debug(ioat, chanerr);
1821 mtx_unlock(&ioat->cleanup_lock);
1827 * Bring device back online after reset. Writing CHAINADDR brings the
1828 * device back to active.
1830 * The internal ring counter resets to zero, so we have to start over
1833 ioat->tail = ioat->head = 0;
1834 ioat->last_seen = 0;
1835 *ioat->comp_update = 0;
1837 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
1838 ioat_write_chancmp(ioat, ioat->comp_update_bus_addr);
1839 ioat_write_chainaddr(ioat, RING_PHYS_ADDR(ioat, 0));
1841 CTR2(KTR_IOAT, "%s channel=%u configured channel", __func__,
1845 /* Enqueues a null operation and ensures it completes. */
1847 error = ioat_start_channel(ioat);
1848 CTR2(KTR_IOAT, "%s channel=%u started channel", __func__,
1853 * Resume completions now that ring state is consistent.
1855 mtx_lock(&ioat->cleanup_lock);
1856 ioat->resetting_cleanup = FALSE;
1857 mtx_unlock(&ioat->cleanup_lock);
1859 /* Unblock submission of new work */
1860 mtx_lock(&ioat->submit_lock);
1861 ioat->quiescing = FALSE;
1862 wakeup(&ioat->quiescing);
1864 ioat->resetting = FALSE;
1865 wakeup(&ioat->resetting);
1867 CTR2(KTR_IOAT, "%s channel=%u reset done", __func__, ioat->chan_idx);
1868 mtx_unlock(&ioat->submit_lock);
1874 sysctl_handle_chansts(SYSCTL_HANDLER_ARGS)
1876 struct ioat_softc *ioat;
1883 status = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS;
1885 sbuf_new_for_sysctl(&sb, NULL, 256, req);
1887 case IOAT_CHANSTS_ACTIVE:
1888 sbuf_printf(&sb, "ACTIVE");
1890 case IOAT_CHANSTS_IDLE:
1891 sbuf_printf(&sb, "IDLE");
1893 case IOAT_CHANSTS_SUSPENDED:
1894 sbuf_printf(&sb, "SUSPENDED");
1896 case IOAT_CHANSTS_HALTED:
1897 sbuf_printf(&sb, "HALTED");
1899 case IOAT_CHANSTS_ARMED:
1900 sbuf_printf(&sb, "ARMED");
1903 sbuf_printf(&sb, "UNKNOWN");
1906 error = sbuf_finish(&sb);
1909 if (error != 0 || req->newptr == NULL)
1915 sysctl_handle_dpi(SYSCTL_HANDLER_ARGS)
1917 struct ioat_softc *ioat;
1919 #define PRECISION "1"
1920 const uintmax_t factor = 10;
1925 sbuf_new_for_sysctl(&sb, NULL, 16, req);
1927 if (ioat->stats.interrupts == 0) {
1928 sbuf_printf(&sb, "NaN");
1931 rate = ioat->stats.descriptors_processed * factor /
1932 ioat->stats.interrupts;
1933 sbuf_printf(&sb, "%ju.%." PRECISION "ju", rate / factor,
1937 error = sbuf_finish(&sb);
1939 if (error != 0 || req->newptr == NULL)
1945 sysctl_handle_reset(SYSCTL_HANDLER_ARGS)
1947 struct ioat_softc *ioat;
1953 error = SYSCTL_OUT(req, &arg, sizeof(arg));
1954 if (error != 0 || req->newptr == NULL)
1957 error = SYSCTL_IN(req, &arg, sizeof(arg));
1962 error = ioat_reset_hw(ioat);
1968 dump_descriptor(void *hw_desc)
1972 for (i = 0; i < 2; i++) {
1973 for (j = 0; j < 8; j++)
1974 printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]);
1980 ioat_setup_sysctl(device_t device)
1982 struct sysctl_oid_list *par, *statpar, *state, *hammer;
1983 struct sysctl_ctx_list *ctx;
1984 struct sysctl_oid *tree, *tmp;
1985 struct ioat_softc *ioat;
1987 ioat = DEVICE2SOFTC(device);
1988 ctx = device_get_sysctl_ctx(device);
1989 tree = device_get_sysctl_tree(device);
1990 par = SYSCTL_CHILDREN(tree);
1992 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "version", CTLFLAG_RD,
1993 &ioat->version, 0, "HW version (0xMM form)");
1994 SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "max_xfer_size", CTLFLAG_RD,
1995 &ioat->max_xfer_size, 0, "HW maximum transfer size");
1996 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "intrdelay_supported", CTLFLAG_RD,
1997 &ioat->intrdelay_supported, 0, "Is INTRDELAY supported");
1998 SYSCTL_ADD_U16(ctx, par, OID_AUTO, "intrdelay_max", CTLFLAG_RD,
1999 &ioat->intrdelay_max, 0,
2000 "Maximum configurable INTRDELAY on this channel (microseconds)");
2002 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "state", CTLFLAG_RD, NULL,
2003 "IOAT channel internal state");
2004 state = SYSCTL_CHILDREN(tmp);
2006 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "ring_size_order", CTLFLAG_RD,
2007 &ioat->ring_size_order, 0, "SW descriptor ring size order");
2008 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "head", CTLFLAG_RD, &ioat->head,
2009 0, "SW descriptor head pointer index");
2010 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail,
2011 0, "SW descriptor tail pointer index");
2013 SYSCTL_ADD_UQUAD(ctx, state, OID_AUTO, "last_completion", CTLFLAG_RD,
2014 ioat->comp_update, "HW addr of last completion");
2016 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_submitter_processing",
2017 CTLFLAG_RD, &ioat->is_submitter_processing, 0,
2018 "submitter processing");
2020 SYSCTL_ADD_PROC(ctx, state, OID_AUTO, "chansts",
2021 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_chansts, "A",
2022 "String of the channel status");
2024 SYSCTL_ADD_U16(ctx, state, OID_AUTO, "intrdelay", CTLFLAG_RD,
2025 &ioat->cached_intrdelay, 0,
2026 "Current INTRDELAY on this channel (cached, microseconds)");
2028 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "hammer", CTLFLAG_RD, NULL,
2029 "Big hammers (mostly for testing)");
2030 hammer = SYSCTL_CHILDREN(tmp);
2032 SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_reset",
2033 CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I",
2034 "Set to non-zero to reset the hardware");
2036 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "stats", CTLFLAG_RD, NULL,
2037 "IOAT channel statistics");
2038 statpar = SYSCTL_CHILDREN(tmp);
2040 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "interrupts", CTLFLAG_RW,
2041 &ioat->stats.interrupts,
2042 "Number of interrupts processed on this channel");
2043 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "descriptors", CTLFLAG_RW,
2044 &ioat->stats.descriptors_processed,
2045 "Number of descriptors processed on this channel");
2046 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "submitted", CTLFLAG_RW,
2047 &ioat->stats.descriptors_submitted,
2048 "Number of descriptors submitted to this channel");
2049 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "errored", CTLFLAG_RW,
2050 &ioat->stats.descriptors_error,
2051 "Number of descriptors failed by channel errors");
2052 SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "halts", CTLFLAG_RW,
2053 &ioat->stats.channel_halts, 0,
2054 "Number of times the channel has halted");
2055 SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "last_halt_chanerr", CTLFLAG_RW,
2056 &ioat->stats.last_halt_chanerr, 0,
2057 "The raw CHANERR when the channel was last halted");
2059 SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "desc_per_interrupt",
2060 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_dpi, "A",
2061 "Descriptors per interrupt");
2065 ioat_get(struct ioat_softc *ioat)
2068 mtx_assert(&ioat->submit_lock, MA_OWNED);
2069 KASSERT(ioat->refcnt < UINT32_MAX, ("refcnt overflow"));
2075 ioat_put(struct ioat_softc *ioat)
2078 mtx_assert(&ioat->submit_lock, MA_OWNED);
2079 KASSERT(ioat->refcnt >= 1, ("refcnt error"));
2081 if (--ioat->refcnt == 0)
2082 wakeup(&ioat->refcnt);
2086 ioat_drain_locked(struct ioat_softc *ioat)
2089 mtx_assert(&ioat->submit_lock, MA_OWNED);
2091 while (ioat->refcnt > 0)
2092 msleep(&ioat->refcnt, &ioat->submit_lock, 0, "ioat_drain", 0);
2096 #define _db_show_lock(lo) LOCK_CLASS(lo)->lc_ddb_show(lo)
2097 #define db_show_lock(lk) _db_show_lock(&(lk)->lock_object)
2098 DB_SHOW_COMMAND(ioat, db_show_ioat)
2100 struct ioat_softc *sc;
2105 idx = (unsigned)addr;
2106 if (idx >= ioat_channel_index)
2109 sc = ioat_channel[idx];
2110 db_printf("ioat softc at %p\n", sc);
2114 db_printf(" version: %d\n", sc->version);
2115 db_printf(" chan_idx: %u\n", sc->chan_idx);
2116 db_printf(" submit_lock: ");
2117 db_show_lock(&sc->submit_lock);
2119 db_printf(" capabilities: %b\n", (int)sc->capabilities,
2121 db_printf(" cached_intrdelay: %u\n", sc->cached_intrdelay);
2122 db_printf(" *comp_update: 0x%jx\n", (uintmax_t)*sc->comp_update);
2124 db_printf(" poll_timer:\n");
2125 db_printf(" c_time: %ju\n", (uintmax_t)sc->poll_timer.c_time);
2126 db_printf(" c_arg: %p\n", sc->poll_timer.c_arg);
2127 db_printf(" c_func: %p\n", sc->poll_timer.c_func);
2128 db_printf(" c_lock: %p\n", sc->poll_timer.c_lock);
2129 db_printf(" c_flags: 0x%x\n", (unsigned)sc->poll_timer.c_flags);
2131 db_printf(" quiescing: %d\n", (int)sc->quiescing);
2132 db_printf(" destroying: %d\n", (int)sc->destroying);
2133 db_printf(" is_submitter_processing: %d\n",
2134 (int)sc->is_submitter_processing);
2135 db_printf(" intrdelay_supported: %d\n", (int)sc->intrdelay_supported);
2136 db_printf(" resetting: %d\n", (int)sc->resetting);
2138 db_printf(" head: %u\n", sc->head);
2139 db_printf(" tail: %u\n", sc->tail);
2140 db_printf(" ring_size_order: %u\n", sc->ring_size_order);
2141 db_printf(" last_seen: 0x%lx\n", sc->last_seen);
2142 db_printf(" ring: %p\n", sc->ring);
2143 db_printf(" descriptors: %p\n", sc->hw_desc_ring);
2144 db_printf(" descriptors (phys): 0x%jx\n",
2145 (uintmax_t)sc->hw_desc_bus_addr);
2147 db_printf(" ring[%u] (tail):\n", sc->tail %
2148 (1 << sc->ring_size_order));
2149 db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->tail)->id);
2150 db_printf(" addr: 0x%lx\n",
2151 RING_PHYS_ADDR(sc, sc->tail));
2152 db_printf(" next: 0x%lx\n",
2153 ioat_get_descriptor(sc, sc->tail)->generic.next);
2155 db_printf(" ring[%u] (head - 1):\n", (sc->head - 1) %
2156 (1 << sc->ring_size_order));
2157 db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->head - 1)->id);
2158 db_printf(" addr: 0x%lx\n",
2159 RING_PHYS_ADDR(sc, sc->head - 1));
2160 db_printf(" next: 0x%lx\n",
2161 ioat_get_descriptor(sc, sc->head - 1)->generic.next);
2163 db_printf(" ring[%u] (head):\n", (sc->head) %
2164 (1 << sc->ring_size_order));
2165 db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->head)->id);
2166 db_printf(" addr: 0x%lx\n",
2167 RING_PHYS_ADDR(sc, sc->head));
2168 db_printf(" next: 0x%lx\n",
2169 ioat_get_descriptor(sc, sc->head)->generic.next);
2171 for (idx = 0; idx < (1 << sc->ring_size_order); idx++)
2172 if ((*sc->comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK)
2173 == RING_PHYS_ADDR(sc, idx))
2174 db_printf(" ring[%u] == hardware tail\n", idx);
2176 db_printf(" cleanup_lock: ");
2177 db_show_lock(&sc->cleanup_lock);
2179 db_printf(" refcnt: %u\n", sc->refcnt);
2180 db_printf(" stats:\n");
2181 db_printf(" interrupts: %lu\n", sc->stats.interrupts);
2182 db_printf(" descriptors_processed: %lu\n", sc->stats.descriptors_processed);
2183 db_printf(" descriptors_error: %lu\n", sc->stats.descriptors_error);
2184 db_printf(" descriptors_submitted: %lu\n", sc->stats.descriptors_submitted);
2186 db_printf(" channel_halts: %u\n", sc->stats.channel_halts);
2187 db_printf(" last_halt_chanerr: %u\n", sc->stats.last_halt_chanerr);
2192 db_printf(" hw status:\n");
2193 db_printf(" status: 0x%lx\n", ioat_get_chansts(sc));
2194 db_printf(" chanctrl: 0x%x\n",
2195 (unsigned)ioat_read_2(sc, IOAT_CHANCTRL_OFFSET));
2196 db_printf(" chancmd: 0x%x\n",
2197 (unsigned)ioat_read_1(sc, IOAT_CHANCMD_OFFSET));
2198 db_printf(" dmacount: 0x%x\n",
2199 (unsigned)ioat_read_2(sc, IOAT_DMACOUNT_OFFSET));
2200 db_printf(" chainaddr: 0x%lx\n",
2201 ioat_read_double_4(sc, IOAT_CHAINADDR_OFFSET_LOW));
2202 db_printf(" chancmp: 0x%lx\n",
2203 ioat_read_double_4(sc, IOAT_CHANCMP_OFFSET_LOW));
2204 db_printf(" chanerr: %b\n",
2205 (int)ioat_read_4(sc, IOAT_CHANERR_OFFSET), IOAT_CHANERR_STR);
2208 db_printf("usage: show ioat <0-%u>\n", ioat_channel_index);