2 * Copyright (C) 2012 Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
36 #include <sys/ioccom.h>
37 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
44 #include <sys/sysctl.h>
45 #include <sys/taskqueue.h>
47 #include <dev/pci/pcireg.h>
48 #include <dev/pci/pcivar.h>
49 #include <machine/bus.h>
50 #include <machine/resource.h>
51 #include <machine/stdarg.h>
59 #include "ioat_internal.h"
61 #ifndef BUS_SPACE_MAXADDR_40BIT
62 #define BUS_SPACE_MAXADDR_40BIT 0xFFFFFFFFFFULL
64 #define IOAT_REFLK (&ioat->submit_lock)
65 #define IOAT_SHRINK_PERIOD (10 * hz)
67 static int ioat_probe(device_t device);
68 static int ioat_attach(device_t device);
69 static int ioat_detach(device_t device);
70 static int ioat_setup_intr(struct ioat_softc *ioat);
71 static int ioat_teardown_intr(struct ioat_softc *ioat);
72 static int ioat3_attach(device_t device);
73 static int ioat_start_channel(struct ioat_softc *ioat);
74 static int ioat_map_pci_bar(struct ioat_softc *ioat);
75 static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg,
77 static void ioat_interrupt_handler(void *arg);
78 static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat);
79 static int chanerr_to_errno(uint32_t);
80 static void ioat_process_events(struct ioat_softc *ioat);
81 static inline uint32_t ioat_get_active(struct ioat_softc *ioat);
82 static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat);
83 static void ioat_free_ring(struct ioat_softc *, uint32_t size,
84 struct ioat_descriptor **);
85 static void ioat_free_ring_entry(struct ioat_softc *ioat,
86 struct ioat_descriptor *desc);
87 static struct ioat_descriptor *ioat_alloc_ring_entry(struct ioat_softc *,
89 static int ioat_reserve_space(struct ioat_softc *, uint32_t, int mflags);
90 static struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *ioat,
92 static struct ioat_descriptor **ioat_prealloc_ring(struct ioat_softc *,
93 uint32_t size, boolean_t need_dscr, int mflags);
94 static int ring_grow(struct ioat_softc *, uint32_t oldorder,
95 struct ioat_descriptor **);
96 static int ring_shrink(struct ioat_softc *, uint32_t oldorder,
97 struct ioat_descriptor **);
98 static void ioat_halted_debug(struct ioat_softc *, uint32_t);
99 static void ioat_poll_timer_callback(void *arg);
100 static void ioat_shrink_timer_callback(void *arg);
101 static void dump_descriptor(void *hw_desc);
102 static void ioat_submit_single(struct ioat_softc *ioat);
103 static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg,
105 static int ioat_reset_hw(struct ioat_softc *ioat);
106 static void ioat_reset_hw_task(void *, int);
107 static void ioat_setup_sysctl(device_t device);
108 static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS);
109 static inline struct ioat_softc *ioat_get(struct ioat_softc *,
111 static inline void ioat_put(struct ioat_softc *, enum ioat_ref_kind);
112 static inline void _ioat_putn(struct ioat_softc *, uint32_t,
113 enum ioat_ref_kind, boolean_t);
114 static inline void ioat_putn(struct ioat_softc *, uint32_t,
116 static inline void ioat_putn_locked(struct ioat_softc *, uint32_t,
118 static void ioat_drain_locked(struct ioat_softc *);
120 #define ioat_log_message(v, ...) do { \
121 if ((v) <= g_ioat_debug_level) { \
122 device_printf(ioat->device, __VA_ARGS__); \
126 MALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations");
127 SYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node");
129 static int g_force_legacy_interrupts;
130 SYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN,
131 &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled");
133 int g_ioat_debug_level = 0;
134 SYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level,
135 0, "Set log level (0-3) for ioat(4). Higher is more verbose.");
138 * OS <-> Driver interface structures
140 static device_method_t ioat_pci_methods[] = {
141 /* Device interface */
142 DEVMETHOD(device_probe, ioat_probe),
143 DEVMETHOD(device_attach, ioat_attach),
144 DEVMETHOD(device_detach, ioat_detach),
148 static driver_t ioat_pci_driver = {
151 sizeof(struct ioat_softc),
154 static devclass_t ioat_devclass;
155 DRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0);
156 MODULE_VERSION(ioat, 1);
159 * Private data structures
161 static struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS];
162 static unsigned ioat_channel_index = 0;
163 SYSCTL_UINT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0,
164 "Number of IOAT channels attached");
171 { 0x34308086, "TBG IOAT Ch0" },
172 { 0x34318086, "TBG IOAT Ch1" },
173 { 0x34328086, "TBG IOAT Ch2" },
174 { 0x34338086, "TBG IOAT Ch3" },
175 { 0x34298086, "TBG IOAT Ch4" },
176 { 0x342a8086, "TBG IOAT Ch5" },
177 { 0x342b8086, "TBG IOAT Ch6" },
178 { 0x342c8086, "TBG IOAT Ch7" },
180 { 0x37108086, "JSF IOAT Ch0" },
181 { 0x37118086, "JSF IOAT Ch1" },
182 { 0x37128086, "JSF IOAT Ch2" },
183 { 0x37138086, "JSF IOAT Ch3" },
184 { 0x37148086, "JSF IOAT Ch4" },
185 { 0x37158086, "JSF IOAT Ch5" },
186 { 0x37168086, "JSF IOAT Ch6" },
187 { 0x37178086, "JSF IOAT Ch7" },
188 { 0x37188086, "JSF IOAT Ch0 (RAID)" },
189 { 0x37198086, "JSF IOAT Ch1 (RAID)" },
191 { 0x3c208086, "SNB IOAT Ch0" },
192 { 0x3c218086, "SNB IOAT Ch1" },
193 { 0x3c228086, "SNB IOAT Ch2" },
194 { 0x3c238086, "SNB IOAT Ch3" },
195 { 0x3c248086, "SNB IOAT Ch4" },
196 { 0x3c258086, "SNB IOAT Ch5" },
197 { 0x3c268086, "SNB IOAT Ch6" },
198 { 0x3c278086, "SNB IOAT Ch7" },
199 { 0x3c2e8086, "SNB IOAT Ch0 (RAID)" },
200 { 0x3c2f8086, "SNB IOAT Ch1 (RAID)" },
202 { 0x0e208086, "IVB IOAT Ch0" },
203 { 0x0e218086, "IVB IOAT Ch1" },
204 { 0x0e228086, "IVB IOAT Ch2" },
205 { 0x0e238086, "IVB IOAT Ch3" },
206 { 0x0e248086, "IVB IOAT Ch4" },
207 { 0x0e258086, "IVB IOAT Ch5" },
208 { 0x0e268086, "IVB IOAT Ch6" },
209 { 0x0e278086, "IVB IOAT Ch7" },
210 { 0x0e2e8086, "IVB IOAT Ch0 (RAID)" },
211 { 0x0e2f8086, "IVB IOAT Ch1 (RAID)" },
213 { 0x2f208086, "HSW IOAT Ch0" },
214 { 0x2f218086, "HSW IOAT Ch1" },
215 { 0x2f228086, "HSW IOAT Ch2" },
216 { 0x2f238086, "HSW IOAT Ch3" },
217 { 0x2f248086, "HSW IOAT Ch4" },
218 { 0x2f258086, "HSW IOAT Ch5" },
219 { 0x2f268086, "HSW IOAT Ch6" },
220 { 0x2f278086, "HSW IOAT Ch7" },
221 { 0x2f2e8086, "HSW IOAT Ch0 (RAID)" },
222 { 0x2f2f8086, "HSW IOAT Ch1 (RAID)" },
224 { 0x0c508086, "BWD IOAT Ch0" },
225 { 0x0c518086, "BWD IOAT Ch1" },
226 { 0x0c528086, "BWD IOAT Ch2" },
227 { 0x0c538086, "BWD IOAT Ch3" },
229 { 0x6f508086, "BDXDE IOAT Ch0" },
230 { 0x6f518086, "BDXDE IOAT Ch1" },
231 { 0x6f528086, "BDXDE IOAT Ch2" },
232 { 0x6f538086, "BDXDE IOAT Ch3" },
234 { 0x6f208086, "BDX IOAT Ch0" },
235 { 0x6f218086, "BDX IOAT Ch1" },
236 { 0x6f228086, "BDX IOAT Ch2" },
237 { 0x6f238086, "BDX IOAT Ch3" },
238 { 0x6f248086, "BDX IOAT Ch4" },
239 { 0x6f258086, "BDX IOAT Ch5" },
240 { 0x6f268086, "BDX IOAT Ch6" },
241 { 0x6f278086, "BDX IOAT Ch7" },
242 { 0x6f2e8086, "BDX IOAT Ch0 (RAID)" },
243 { 0x6f2f8086, "BDX IOAT Ch1 (RAID)" },
244 { 0x20218086, "SKX IOAT" },
249 * OS <-> Driver linkage functions
252 ioat_probe(device_t device)
257 type = pci_get_devid(device);
258 for (ep = pci_ids; ep->type; ep++) {
259 if (ep->type == type) {
260 device_set_desc(device, ep->desc);
268 ioat_attach(device_t device)
270 struct ioat_softc *ioat;
273 ioat = DEVICE2SOFTC(device);
274 ioat->device = device;
276 error = ioat_map_pci_bar(ioat);
280 ioat->version = ioat_read_cbver(ioat);
281 if (ioat->version < IOAT_VER_3_0) {
286 error = ioat3_attach(device);
290 error = pci_enable_busmaster(device);
294 error = ioat_setup_intr(ioat);
298 error = ioat_reset_hw(ioat);
302 ioat_process_events(ioat);
303 ioat_setup_sysctl(device);
305 ioat->chan_idx = ioat_channel_index;
306 ioat_channel[ioat_channel_index++] = ioat;
316 ioat_detach(device_t device)
318 struct ioat_softc *ioat;
320 ioat = DEVICE2SOFTC(device);
323 taskqueue_drain(taskqueue_thread, &ioat->reset_task);
325 mtx_lock(IOAT_REFLK);
326 ioat->quiescing = TRUE;
327 ioat->destroying = TRUE;
328 wakeup(&ioat->quiescing);
329 wakeup(&ioat->resetting);
331 ioat_channel[ioat->chan_idx] = NULL;
333 ioat_drain_locked(ioat);
334 mtx_unlock(IOAT_REFLK);
336 ioat_teardown_intr(ioat);
337 callout_drain(&ioat->poll_timer);
338 callout_drain(&ioat->shrink_timer);
340 pci_disable_busmaster(device);
342 if (ioat->pci_resource != NULL)
343 bus_release_resource(device, SYS_RES_MEMORY,
344 ioat->pci_resource_id, ioat->pci_resource);
346 if (ioat->ring != NULL)
347 ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring);
349 if (ioat->comp_update != NULL) {
350 bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map);
351 bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update,
352 ioat->comp_update_map);
353 bus_dma_tag_destroy(ioat->comp_update_tag);
356 bus_dma_tag_destroy(ioat->hw_desc_tag);
362 ioat_teardown_intr(struct ioat_softc *ioat)
365 if (ioat->tag != NULL)
366 bus_teardown_intr(ioat->device, ioat->res, ioat->tag);
368 if (ioat->res != NULL)
369 bus_release_resource(ioat->device, SYS_RES_IRQ,
370 rman_get_rid(ioat->res), ioat->res);
372 pci_release_msi(ioat->device);
377 ioat_start_channel(struct ioat_softc *ioat)
379 struct ioat_dma_hw_descriptor *hw_desc;
380 struct ioat_descriptor *desc;
381 struct bus_dmadesc *dmadesc;
386 ioat_acquire(&ioat->dmaengine);
388 /* Submit 'NULL' operation manually to avoid quiescing flag */
389 desc = ioat_get_ring_entry(ioat, ioat->head);
390 dmadesc = &desc->bus_dmadesc;
391 hw_desc = desc->u.dma;
393 dmadesc->callback_fn = NULL;
394 dmadesc->callback_arg = NULL;
396 hw_desc->u.control_raw = 0;
397 hw_desc->u.control_generic.op = IOAT_OP_COPY;
398 hw_desc->u.control_generic.completion_update = 1;
400 hw_desc->src_addr = 0;
401 hw_desc->dest_addr = 0;
402 hw_desc->u.control.null = 1;
404 ioat_submit_single(ioat);
405 ioat_release(&ioat->dmaengine);
407 for (i = 0; i < 100; i++) {
409 status = ioat_get_chansts(ioat);
410 if (is_ioat_idle(status))
414 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
415 ioat_log_message(0, "could not start channel: "
416 "status = %#jx error = %b\n", (uintmax_t)status, (int)chanerr,
422 * Initialize Hardware
425 ioat3_attach(device_t device)
427 struct ioat_softc *ioat;
428 struct ioat_descriptor **ring;
429 struct ioat_descriptor *next;
430 struct ioat_dma_hw_descriptor *dma_hw_desc;
431 int i, num_descriptors;
436 ioat = DEVICE2SOFTC(device);
437 ioat->capabilities = ioat_read_dmacapability(ioat);
439 ioat_log_message(0, "Capabilities: %b\n", (int)ioat->capabilities,
442 xfercap = ioat_read_xfercap(ioat);
443 ioat->max_xfer_size = 1 << xfercap;
445 ioat->intrdelay_supported = (ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) &
446 IOAT_INTRDELAY_SUPPORTED) != 0;
447 if (ioat->intrdelay_supported)
448 ioat->intrdelay_max = IOAT_INTRDELAY_US_MASK;
450 /* TODO: need to check DCA here if we ever do XOR/PQ */
452 mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF);
453 mtx_init(&ioat->cleanup_lock, "ioat_cleanup", NULL, MTX_DEF);
454 callout_init(&ioat->poll_timer, 1);
455 callout_init(&ioat->shrink_timer, 1);
456 TASK_INIT(&ioat->reset_task, 0, ioat_reset_hw_task, ioat);
458 /* Establish lock order for Witness */
459 mtx_lock(&ioat->submit_lock);
460 mtx_lock(&ioat->cleanup_lock);
461 mtx_unlock(&ioat->cleanup_lock);
462 mtx_unlock(&ioat->submit_lock);
464 ioat->is_resize_pending = FALSE;
465 ioat->is_completion_pending = FALSE;
466 ioat->is_reset_pending = FALSE;
467 ioat->is_channel_running = FALSE;
469 bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0,
470 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
471 sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL,
472 &ioat->comp_update_tag);
474 error = bus_dmamem_alloc(ioat->comp_update_tag,
475 (void **)&ioat->comp_update, BUS_DMA_ZERO, &ioat->comp_update_map);
476 if (ioat->comp_update == NULL)
479 error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map,
480 ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat,
485 ioat->ring_size_order = IOAT_MIN_ORDER;
487 num_descriptors = 1 << ioat->ring_size_order;
489 bus_dma_tag_create(bus_get_dma_tag(ioat->device), 0x40, 0x0,
490 BUS_SPACE_MAXADDR_40BIT, BUS_SPACE_MAXADDR, NULL, NULL,
491 sizeof(struct ioat_dma_hw_descriptor), 1,
492 sizeof(struct ioat_dma_hw_descriptor), 0, NULL, NULL,
495 ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT,
499 for (i = 0; i < num_descriptors; i++) {
500 ring[i] = ioat_alloc_ring_entry(ioat, M_WAITOK);
507 for (i = 0; i < num_descriptors - 1; i++) {
509 dma_hw_desc = ring[i]->u.dma;
511 dma_hw_desc->next = next->hw_desc_bus_addr;
514 ring[i]->u.dma->next = ring[0]->hw_desc_bus_addr;
516 ioat->head = ioat->hw_head = 0;
519 *ioat->comp_update = 0;
524 ioat_map_pci_bar(struct ioat_softc *ioat)
527 ioat->pci_resource_id = PCIR_BAR(0);
528 ioat->pci_resource = bus_alloc_resource_any(ioat->device,
529 SYS_RES_MEMORY, &ioat->pci_resource_id, RF_ACTIVE);
531 if (ioat->pci_resource == NULL) {
532 ioat_log_message(0, "unable to allocate pci resource\n");
536 ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource);
537 ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource);
542 ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
544 struct ioat_softc *ioat = arg;
546 KASSERT(error == 0, ("%s: error:%d", __func__, error));
547 ioat->comp_update_bus_addr = seg[0].ds_addr;
551 ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
555 KASSERT(error == 0, ("%s: error:%d", __func__, error));
557 *baddr = segs->ds_addr;
561 * Interrupt setup and handlers
564 ioat_setup_intr(struct ioat_softc *ioat)
566 uint32_t num_vectors;
569 boolean_t force_legacy_interrupts;
572 force_legacy_interrupts = FALSE;
574 if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) {
576 pci_alloc_msix(ioat->device, &num_vectors);
577 if (num_vectors == 1)
583 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
584 &ioat->rid, RF_ACTIVE);
587 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
588 &ioat->rid, RF_SHAREABLE | RF_ACTIVE);
590 if (ioat->res == NULL) {
591 ioat_log_message(0, "bus_alloc_resource failed\n");
596 error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE |
597 INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag);
599 ioat_log_message(0, "bus_setup_intr failed\n");
603 ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN);
608 ioat_model_resets_msix(struct ioat_softc *ioat)
612 pciid = pci_get_devid(ioat->device);
631 ioat_interrupt_handler(void *arg)
633 struct ioat_softc *ioat = arg;
635 ioat->stats.interrupts++;
636 ioat_process_events(ioat);
640 chanerr_to_errno(uint32_t chanerr)
645 if ((chanerr & (IOAT_CHANERR_XSADDERR | IOAT_CHANERR_XDADDERR)) != 0)
647 if ((chanerr & (IOAT_CHANERR_RDERR | IOAT_CHANERR_WDERR)) != 0)
649 /* This one is probably our fault: */
650 if ((chanerr & IOAT_CHANERR_NDADDERR) != 0)
656 ioat_process_events(struct ioat_softc *ioat)
658 struct ioat_descriptor *desc;
659 struct bus_dmadesc *dmadesc;
660 uint64_t comp_update, status;
661 uint32_t completed, chanerr;
665 CTR0(KTR_IOAT, __func__);
667 mtx_lock(&ioat->cleanup_lock);
670 * Don't run while the hardware is being reset. Reset is responsible
671 * for blocking new work and draining & completing existing work, so
672 * there is nothing to do until new work is queued after reset anyway.
674 if (ioat->resetting_cleanup) {
675 mtx_unlock(&ioat->cleanup_lock);
680 comp_update = *ioat->comp_update;
681 status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
683 if (status == ioat->last_seen) {
685 * If we landed in process_events and nothing has been
686 * completed, check for a timeout due to channel halt.
688 comp_update = ioat_get_chansts(ioat);
693 desc = ioat_get_ring_entry(ioat, ioat->tail);
694 dmadesc = &desc->bus_dmadesc;
695 CTR1(KTR_IOAT, "completing desc %d", ioat->tail);
697 if (dmadesc->callback_fn != NULL)
698 dmadesc->callback_fn(dmadesc->callback_arg, 0);
702 if (desc->hw_desc_bus_addr == status)
706 ioat->last_seen = desc->hw_desc_bus_addr;
707 ioat->stats.descriptors_processed += completed;
710 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
712 /* Perform a racy check first; only take the locks if it passes. */
713 pending = (ioat_get_active(ioat) != 0);
714 if (!pending && ioat->is_completion_pending) {
715 mtx_unlock(&ioat->cleanup_lock);
716 mtx_lock(&ioat->submit_lock);
717 mtx_lock(&ioat->cleanup_lock);
719 pending = (ioat_get_active(ioat) != 0);
720 if (!pending && ioat->is_completion_pending) {
721 ioat->is_completion_pending = FALSE;
722 callout_reset(&ioat->shrink_timer, IOAT_SHRINK_PERIOD,
723 ioat_shrink_timer_callback, ioat);
724 callout_stop(&ioat->poll_timer);
726 mtx_unlock(&ioat->submit_lock);
728 mtx_unlock(&ioat->cleanup_lock);
731 callout_reset(&ioat->poll_timer, 1, ioat_poll_timer_callback,
734 if (completed != 0) {
735 ioat_putn(ioat, completed, IOAT_ACTIVE_DESCR_REF);
739 if (!is_ioat_halted(comp_update) && !is_ioat_suspended(comp_update))
742 ioat->stats.channel_halts++;
745 * Fatal programming error on this DMA channel. Flush any outstanding
746 * work with error status and restart the engine.
748 ioat_log_message(0, "Channel halted due to fatal programming error\n");
749 mtx_lock(&ioat->submit_lock);
750 mtx_lock(&ioat->cleanup_lock);
751 ioat->quiescing = TRUE;
753 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
754 ioat_halted_debug(ioat, chanerr);
755 ioat->stats.last_halt_chanerr = chanerr;
757 while (ioat_get_active(ioat) > 0) {
758 desc = ioat_get_ring_entry(ioat, ioat->tail);
759 dmadesc = &desc->bus_dmadesc;
760 CTR1(KTR_IOAT, "completing err desc %d", ioat->tail);
762 if (dmadesc->callback_fn != NULL)
763 dmadesc->callback_fn(dmadesc->callback_arg,
764 chanerr_to_errno(chanerr));
766 ioat_putn_locked(ioat, 1, IOAT_ACTIVE_DESCR_REF);
768 ioat->stats.descriptors_processed++;
769 ioat->stats.descriptors_error++;
772 /* Clear error status */
773 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
775 mtx_unlock(&ioat->cleanup_lock);
776 mtx_unlock(&ioat->submit_lock);
778 ioat_log_message(0, "Resetting channel to recover from error\n");
779 error = taskqueue_enqueue(taskqueue_thread, &ioat->reset_task);
781 ("%s: taskqueue_enqueue failed: %d", __func__, error));
785 ioat_reset_hw_task(void *ctx, int pending __unused)
787 struct ioat_softc *ioat;
791 ioat_log_message(1, "%s: Resetting channel\n", __func__);
793 error = ioat_reset_hw(ioat);
794 KASSERT(error == 0, ("%s: reset failed: %d", __func__, error));
802 ioat_get_nchannels(void)
805 return (ioat_channel_index);
809 ioat_get_dmaengine(uint32_t index, int flags)
811 struct ioat_softc *ioat;
813 KASSERT((flags & ~(M_NOWAIT | M_WAITOK)) == 0,
814 ("invalid flags: 0x%08x", flags));
815 KASSERT((flags & (M_NOWAIT | M_WAITOK)) != (M_NOWAIT | M_WAITOK),
816 ("invalid wait | nowait"));
818 if (index >= ioat_channel_index)
821 ioat = ioat_channel[index];
822 if (ioat == NULL || ioat->destroying)
825 if (ioat->quiescing) {
826 if ((flags & M_NOWAIT) != 0)
829 mtx_lock(IOAT_REFLK);
830 while (ioat->quiescing && !ioat->destroying)
831 msleep(&ioat->quiescing, IOAT_REFLK, 0, "getdma", 0);
832 mtx_unlock(IOAT_REFLK);
834 if (ioat->destroying)
839 * There's a race here between the quiescing check and HW reset or
842 return (&ioat_get(ioat, IOAT_DMAENGINE_REF)->dmaengine);
846 ioat_put_dmaengine(bus_dmaengine_t dmaengine)
848 struct ioat_softc *ioat;
850 ioat = to_ioat_softc(dmaengine);
851 ioat_put(ioat, IOAT_DMAENGINE_REF);
855 ioat_get_hwversion(bus_dmaengine_t dmaengine)
857 struct ioat_softc *ioat;
859 ioat = to_ioat_softc(dmaengine);
860 return (ioat->version);
864 ioat_get_max_io_size(bus_dmaengine_t dmaengine)
866 struct ioat_softc *ioat;
868 ioat = to_ioat_softc(dmaengine);
869 return (ioat->max_xfer_size);
873 ioat_set_interrupt_coalesce(bus_dmaengine_t dmaengine, uint16_t delay)
875 struct ioat_softc *ioat;
877 ioat = to_ioat_softc(dmaengine);
878 if (!ioat->intrdelay_supported)
880 if (delay > ioat->intrdelay_max)
883 ioat_write_2(ioat, IOAT_INTRDELAY_OFFSET, delay);
884 ioat->cached_intrdelay =
885 ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & IOAT_INTRDELAY_US_MASK;
890 ioat_get_max_coalesce_period(bus_dmaengine_t dmaengine)
892 struct ioat_softc *ioat;
894 ioat = to_ioat_softc(dmaengine);
895 return (ioat->intrdelay_max);
899 ioat_acquire(bus_dmaengine_t dmaengine)
901 struct ioat_softc *ioat;
903 ioat = to_ioat_softc(dmaengine);
904 mtx_lock(&ioat->submit_lock);
905 CTR0(KTR_IOAT, __func__);
909 ioat_acquire_reserve(bus_dmaengine_t dmaengine, unsigned n, int mflags)
911 struct ioat_softc *ioat;
914 ioat = to_ioat_softc(dmaengine);
915 ioat_acquire(dmaengine);
917 error = ioat_reserve_space(ioat, n, mflags);
919 ioat_release(dmaengine);
924 ioat_release(bus_dmaengine_t dmaengine)
926 struct ioat_softc *ioat;
928 ioat = to_ioat_softc(dmaengine);
929 CTR0(KTR_IOAT, __func__);
930 ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, (uint16_t)ioat->hw_head);
931 mtx_unlock(&ioat->submit_lock);
934 static struct ioat_descriptor *
935 ioat_op_generic(struct ioat_softc *ioat, uint8_t op,
936 uint32_t size, uint64_t src, uint64_t dst,
937 bus_dmaengine_callback_t callback_fn, void *callback_arg,
940 struct ioat_generic_hw_descriptor *hw_desc;
941 struct ioat_descriptor *desc;
944 mtx_assert(&ioat->submit_lock, MA_OWNED);
946 KASSERT((flags & ~_DMA_GENERIC_FLAGS) == 0,
947 ("Unrecognized flag(s): %#x", flags & ~_DMA_GENERIC_FLAGS));
948 if ((flags & DMA_NO_WAIT) != 0)
953 if (size > ioat->max_xfer_size) {
954 ioat_log_message(0, "%s: max_xfer_size = %d, requested = %u\n",
955 __func__, ioat->max_xfer_size, (unsigned)size);
959 if (ioat_reserve_space(ioat, 1, mflags) != 0)
962 desc = ioat_get_ring_entry(ioat, ioat->head);
963 hw_desc = desc->u.generic;
965 hw_desc->u.control_raw = 0;
966 hw_desc->u.control_generic.op = op;
967 hw_desc->u.control_generic.completion_update = 1;
969 if ((flags & DMA_INT_EN) != 0)
970 hw_desc->u.control_generic.int_enable = 1;
971 if ((flags & DMA_FENCE) != 0)
972 hw_desc->u.control_generic.fence = 1;
974 hw_desc->size = size;
975 hw_desc->src_addr = src;
976 hw_desc->dest_addr = dst;
978 desc->bus_dmadesc.callback_fn = callback_fn;
979 desc->bus_dmadesc.callback_arg = callback_arg;
984 ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn,
985 void *callback_arg, uint32_t flags)
987 struct ioat_dma_hw_descriptor *hw_desc;
988 struct ioat_descriptor *desc;
989 struct ioat_softc *ioat;
991 CTR0(KTR_IOAT, __func__);
992 ioat = to_ioat_softc(dmaengine);
994 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 8, 0, 0, callback_fn,
995 callback_arg, flags);
999 hw_desc = desc->u.dma;
1000 hw_desc->u.control.null = 1;
1001 ioat_submit_single(ioat);
1002 return (&desc->bus_dmadesc);
1005 struct bus_dmadesc *
1006 ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst,
1007 bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn,
1008 void *callback_arg, uint32_t flags)
1010 struct ioat_dma_hw_descriptor *hw_desc;
1011 struct ioat_descriptor *desc;
1012 struct ioat_softc *ioat;
1014 CTR0(KTR_IOAT, __func__);
1015 ioat = to_ioat_softc(dmaengine);
1017 if (((src | dst) & (0xffffull << 48)) != 0) {
1018 ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n",
1023 desc = ioat_op_generic(ioat, IOAT_OP_COPY, len, src, dst, callback_fn,
1024 callback_arg, flags);
1028 hw_desc = desc->u.dma;
1029 if (g_ioat_debug_level >= 3)
1030 dump_descriptor(hw_desc);
1032 ioat_submit_single(ioat);
1033 return (&desc->bus_dmadesc);
1036 struct bus_dmadesc *
1037 ioat_copy_8k_aligned(bus_dmaengine_t dmaengine, bus_addr_t dst1,
1038 bus_addr_t dst2, bus_addr_t src1, bus_addr_t src2,
1039 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
1041 struct ioat_dma_hw_descriptor *hw_desc;
1042 struct ioat_descriptor *desc;
1043 struct ioat_softc *ioat;
1045 CTR0(KTR_IOAT, __func__);
1046 ioat = to_ioat_softc(dmaengine);
1048 if (((src1 | src2 | dst1 | dst2) & (0xffffull << 48)) != 0) {
1049 ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n",
1053 if (((src1 | src2 | dst1 | dst2) & PAGE_MASK) != 0) {
1054 ioat_log_message(0, "%s: Addresses must be page-aligned\n",
1059 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 2 * PAGE_SIZE, src1, dst1,
1060 callback_fn, callback_arg, flags);
1064 hw_desc = desc->u.dma;
1065 if (src2 != src1 + PAGE_SIZE) {
1066 hw_desc->u.control.src_page_break = 1;
1067 hw_desc->next_src_addr = src2;
1069 if (dst2 != dst1 + PAGE_SIZE) {
1070 hw_desc->u.control.dest_page_break = 1;
1071 hw_desc->next_dest_addr = dst2;
1074 if (g_ioat_debug_level >= 3)
1075 dump_descriptor(hw_desc);
1077 ioat_submit_single(ioat);
1078 return (&desc->bus_dmadesc);
1081 struct bus_dmadesc *
1082 ioat_copy_crc(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src,
1083 bus_size_t len, uint32_t *initialseed, bus_addr_t crcptr,
1084 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
1086 struct ioat_crc32_hw_descriptor *hw_desc;
1087 struct ioat_descriptor *desc;
1088 struct ioat_softc *ioat;
1092 CTR0(KTR_IOAT, __func__);
1093 ioat = to_ioat_softc(dmaengine);
1095 if ((ioat->capabilities & IOAT_DMACAP_MOVECRC) == 0) {
1096 ioat_log_message(0, "%s: Device lacks MOVECRC capability\n",
1100 if (((src | dst) & (0xffffffull << 40)) != 0) {
1101 ioat_log_message(0, "%s: High 24 bits of src/dst invalid\n",
1105 teststore = (flags & _DMA_CRC_TESTSTORE);
1106 if (teststore == _DMA_CRC_TESTSTORE) {
1107 ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__);
1110 if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) {
1111 ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n",
1116 switch (teststore) {
1118 op = IOAT_OP_MOVECRC_STORE;
1121 op = IOAT_OP_MOVECRC_TEST;
1124 KASSERT(teststore == 0, ("bogus"));
1125 op = IOAT_OP_MOVECRC;
1129 if ((flags & DMA_CRC_INLINE) == 0 &&
1130 (crcptr & (0xffffffull << 40)) != 0) {
1132 "%s: High 24 bits of crcptr invalid\n", __func__);
1136 desc = ioat_op_generic(ioat, op, len, src, dst, callback_fn,
1137 callback_arg, flags & ~_DMA_CRC_FLAGS);
1141 hw_desc = desc->u.crc32;
1143 if ((flags & DMA_CRC_INLINE) == 0)
1144 hw_desc->crc_address = crcptr;
1146 hw_desc->u.control.crc_location = 1;
1148 if (initialseed != NULL) {
1149 hw_desc->u.control.use_seed = 1;
1150 hw_desc->seed = *initialseed;
1153 if (g_ioat_debug_level >= 3)
1154 dump_descriptor(hw_desc);
1156 ioat_submit_single(ioat);
1157 return (&desc->bus_dmadesc);
1160 struct bus_dmadesc *
1161 ioat_crc(bus_dmaengine_t dmaengine, bus_addr_t src, bus_size_t len,
1162 uint32_t *initialseed, bus_addr_t crcptr,
1163 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
1165 struct ioat_crc32_hw_descriptor *hw_desc;
1166 struct ioat_descriptor *desc;
1167 struct ioat_softc *ioat;
1171 CTR0(KTR_IOAT, __func__);
1172 ioat = to_ioat_softc(dmaengine);
1174 if ((ioat->capabilities & IOAT_DMACAP_CRC) == 0) {
1175 ioat_log_message(0, "%s: Device lacks CRC capability\n",
1179 if ((src & (0xffffffull << 40)) != 0) {
1180 ioat_log_message(0, "%s: High 24 bits of src invalid\n",
1184 teststore = (flags & _DMA_CRC_TESTSTORE);
1185 if (teststore == _DMA_CRC_TESTSTORE) {
1186 ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__);
1189 if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) {
1190 ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n",
1195 switch (teststore) {
1197 op = IOAT_OP_CRC_STORE;
1200 op = IOAT_OP_CRC_TEST;
1203 KASSERT(teststore == 0, ("bogus"));
1208 if ((flags & DMA_CRC_INLINE) == 0 &&
1209 (crcptr & (0xffffffull << 40)) != 0) {
1211 "%s: High 24 bits of crcptr invalid\n", __func__);
1215 desc = ioat_op_generic(ioat, op, len, src, 0, callback_fn,
1216 callback_arg, flags & ~_DMA_CRC_FLAGS);
1220 hw_desc = desc->u.crc32;
1222 if ((flags & DMA_CRC_INLINE) == 0)
1223 hw_desc->crc_address = crcptr;
1225 hw_desc->u.control.crc_location = 1;
1227 if (initialseed != NULL) {
1228 hw_desc->u.control.use_seed = 1;
1229 hw_desc->seed = *initialseed;
1232 if (g_ioat_debug_level >= 3)
1233 dump_descriptor(hw_desc);
1235 ioat_submit_single(ioat);
1236 return (&desc->bus_dmadesc);
1239 struct bus_dmadesc *
1240 ioat_blockfill(bus_dmaengine_t dmaengine, bus_addr_t dst, uint64_t fillpattern,
1241 bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg,
1244 struct ioat_fill_hw_descriptor *hw_desc;
1245 struct ioat_descriptor *desc;
1246 struct ioat_softc *ioat;
1248 CTR0(KTR_IOAT, __func__);
1249 ioat = to_ioat_softc(dmaengine);
1251 if ((ioat->capabilities & IOAT_DMACAP_BFILL) == 0) {
1252 ioat_log_message(0, "%s: Device lacks BFILL capability\n",
1257 if ((dst & (0xffffull << 48)) != 0) {
1258 ioat_log_message(0, "%s: High 16 bits of dst invalid\n",
1263 desc = ioat_op_generic(ioat, IOAT_OP_FILL, len, fillpattern, dst,
1264 callback_fn, callback_arg, flags);
1268 hw_desc = desc->u.fill;
1269 if (g_ioat_debug_level >= 3)
1270 dump_descriptor(hw_desc);
1272 ioat_submit_single(ioat);
1273 return (&desc->bus_dmadesc);
1279 static inline uint32_t
1280 ioat_get_active(struct ioat_softc *ioat)
1283 return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1));
1286 static inline uint32_t
1287 ioat_get_ring_space(struct ioat_softc *ioat)
1290 return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1);
1293 static struct ioat_descriptor *
1294 ioat_alloc_ring_entry(struct ioat_softc *ioat, int mflags)
1296 struct ioat_generic_hw_descriptor *hw_desc;
1297 struct ioat_descriptor *desc;
1298 int error, busdmaflag;
1303 if ((mflags & M_WAITOK) != 0)
1304 busdmaflag = BUS_DMA_WAITOK;
1306 busdmaflag = BUS_DMA_NOWAIT;
1308 desc = malloc(sizeof(*desc), M_IOAT, mflags);
1312 bus_dmamem_alloc(ioat->hw_desc_tag, (void **)&hw_desc,
1313 BUS_DMA_ZERO | busdmaflag, &ioat->hw_desc_map);
1314 if (hw_desc == NULL)
1317 memset(&desc->bus_dmadesc, 0, sizeof(desc->bus_dmadesc));
1318 desc->u.generic = hw_desc;
1320 error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc,
1321 sizeof(*hw_desc), ioat_dmamap_cb, &desc->hw_desc_bus_addr,
1328 ioat_free_ring_entry(ioat, desc);
1335 ioat_free_ring_entry(struct ioat_softc *ioat, struct ioat_descriptor *desc)
1341 if (desc->u.generic)
1342 bus_dmamem_free(ioat->hw_desc_tag, desc->u.generic,
1348 * Reserves space in this IOAT descriptor ring by ensuring enough slots remain
1351 * If mflags contains M_WAITOK, blocks until enough space is available.
1353 * Returns zero on success, or an errno on error. If num_descs is beyond the
1354 * maximum ring size, returns EINVAl; if allocation would block and mflags
1355 * contains M_NOWAIT, returns EAGAIN.
1357 * Must be called with the submit_lock held; returns with the lock held. The
1358 * lock may be dropped to allocate the ring.
1360 * (The submit_lock is needed to add any entries to the ring, so callers are
1361 * assured enough room is available.)
1364 ioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags)
1366 struct ioat_descriptor **new_ring;
1370 mtx_assert(&ioat->submit_lock, MA_OWNED);
1373 if (num_descs < 1 || num_descs > (1 << IOAT_MAX_ORDER)) {
1377 if (ioat->quiescing) {
1383 if (ioat_get_ring_space(ioat) >= num_descs)
1386 order = ioat->ring_size_order;
1387 if (ioat->is_resize_pending || order == IOAT_MAX_ORDER) {
1388 if ((mflags & M_WAITOK) != 0) {
1389 msleep(&ioat->tail, &ioat->submit_lock, 0,
1398 ioat->is_resize_pending = TRUE;
1400 mtx_unlock(&ioat->submit_lock);
1402 new_ring = ioat_prealloc_ring(ioat, 1 << (order + 1),
1405 mtx_lock(&ioat->submit_lock);
1406 KASSERT(ioat->ring_size_order == order,
1407 ("is_resize_pending should protect order"));
1409 if (new_ring == NULL) {
1410 KASSERT((mflags & M_WAITOK) == 0,
1411 ("allocation failed"));
1416 error = ring_grow(ioat, order, new_ring);
1420 ioat->is_resize_pending = FALSE;
1421 wakeup(&ioat->tail);
1427 mtx_assert(&ioat->submit_lock, MA_OWNED);
1431 static struct ioat_descriptor **
1432 ioat_prealloc_ring(struct ioat_softc *ioat, uint32_t size, boolean_t need_dscr,
1435 struct ioat_descriptor **ring;
1439 KASSERT(size > 0 && powerof2(size), ("bogus size"));
1441 ring = malloc(size * sizeof(*ring), M_IOAT, M_ZERO | mflags);
1447 for (i = size / 2; i < size; i++) {
1448 ring[i] = ioat_alloc_ring_entry(ioat, mflags);
1449 if (ring[i] == NULL)
1457 if (error != 0 && ring != NULL) {
1458 ioat_free_ring(ioat, size, ring);
1465 ioat_free_ring(struct ioat_softc *ioat, uint32_t size,
1466 struct ioat_descriptor **ring)
1470 for (i = 0; i < size; i++) {
1471 if (ring[i] != NULL)
1472 ioat_free_ring_entry(ioat, ring[i]);
1477 static struct ioat_descriptor *
1478 ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index)
1481 return (ioat->ring[index % (1 << ioat->ring_size_order)]);
1485 ring_grow(struct ioat_softc *ioat, uint32_t oldorder,
1486 struct ioat_descriptor **newring)
1488 struct ioat_descriptor *tmp, *next;
1489 struct ioat_dma_hw_descriptor *hw;
1490 uint32_t oldsize, newsize, head, tail, i, end;
1493 CTR0(KTR_IOAT, __func__);
1495 mtx_assert(&ioat->submit_lock, MA_OWNED);
1497 if (oldorder != ioat->ring_size_order || oldorder >= IOAT_MAX_ORDER) {
1502 oldsize = (1 << oldorder);
1503 newsize = (1 << (oldorder + 1));
1505 mtx_lock(&ioat->cleanup_lock);
1507 head = ioat->head & (oldsize - 1);
1508 tail = ioat->tail & (oldsize - 1);
1510 /* Copy old descriptors to new ring */
1511 for (i = 0; i < oldsize; i++)
1512 newring[i] = ioat->ring[i];
1515 * If head has wrapped but tail hasn't, we must swap some descriptors
1516 * around so that tail can increment directly to head.
1519 for (i = 0; i <= head; i++) {
1520 tmp = newring[oldsize + i];
1522 newring[oldsize + i] = newring[i];
1523 newring[oldsize + i]->id = oldsize + i;
1531 KASSERT(head >= tail, ("invariants"));
1533 /* Head didn't wrap; we only need to link in oldsize..newsize */
1534 if (head < oldsize) {
1538 /* Head did wrap; link newhead..newsize and 0..oldhead */
1540 end = newsize + (head - oldsize) + 1;
1544 * Fix up hardware ring, being careful not to trample the active
1545 * section (tail -> head).
1547 for (; i < end; i++) {
1548 KASSERT((i & (newsize - 1)) < tail ||
1549 (i & (newsize - 1)) >= head, ("trampling snake"));
1551 next = newring[(i + 1) & (newsize - 1)];
1552 hw = newring[i & (newsize - 1)]->u.dma;
1553 hw->next = next->hw_desc_bus_addr;
1556 free(ioat->ring, M_IOAT);
1557 ioat->ring = newring;
1558 ioat->ring_size_order = oldorder + 1;
1563 mtx_unlock(&ioat->cleanup_lock);
1566 ioat_free_ring(ioat, (1 << (oldorder + 1)), newring);
1571 ring_shrink(struct ioat_softc *ioat, uint32_t oldorder,
1572 struct ioat_descriptor **newring)
1574 struct ioat_dma_hw_descriptor *hw;
1575 struct ioat_descriptor *ent, *next;
1576 uint32_t oldsize, newsize, current_idx, new_idx, i;
1579 CTR0(KTR_IOAT, __func__);
1581 mtx_assert(&ioat->submit_lock, MA_OWNED);
1583 if (oldorder != ioat->ring_size_order || oldorder <= IOAT_MIN_ORDER) {
1588 oldsize = (1 << oldorder);
1589 newsize = (1 << (oldorder - 1));
1591 mtx_lock(&ioat->cleanup_lock);
1593 /* Can't shrink below current active set! */
1594 if (ioat_get_active(ioat) >= newsize) {
1600 * Copy current descriptors to the new ring, dropping the removed
1603 for (i = 0; i < newsize; i++) {
1604 current_idx = (ioat->tail + i) & (oldsize - 1);
1605 new_idx = (ioat->tail + i) & (newsize - 1);
1607 newring[new_idx] = ioat->ring[current_idx];
1608 newring[new_idx]->id = new_idx;
1611 /* Free deleted descriptors */
1612 for (i = newsize; i < oldsize; i++) {
1613 ent = ioat_get_ring_entry(ioat, ioat->tail + i);
1614 ioat_free_ring_entry(ioat, ent);
1617 /* Fix up hardware ring. */
1618 hw = newring[(ioat->tail + newsize - 1) & (newsize - 1)]->u.dma;
1619 next = newring[(ioat->tail + newsize) & (newsize - 1)];
1620 hw->next = next->hw_desc_bus_addr;
1622 free(ioat->ring, M_IOAT);
1623 ioat->ring = newring;
1624 ioat->ring_size_order = oldorder - 1;
1628 mtx_unlock(&ioat->cleanup_lock);
1631 ioat_free_ring(ioat, (1 << (oldorder - 1)), newring);
1636 ioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr)
1638 struct ioat_descriptor *desc;
1640 ioat_log_message(0, "Channel halted (%b)\n", (int)chanerr,
1645 mtx_assert(&ioat->cleanup_lock, MA_OWNED);
1647 desc = ioat_get_ring_entry(ioat, ioat->tail + 0);
1648 dump_descriptor(desc->u.raw);
1650 desc = ioat_get_ring_entry(ioat, ioat->tail + 1);
1651 dump_descriptor(desc->u.raw);
1655 ioat_poll_timer_callback(void *arg)
1657 struct ioat_softc *ioat;
1660 ioat_log_message(3, "%s\n", __func__);
1662 ioat_process_events(ioat);
1666 ioat_shrink_timer_callback(void *arg)
1668 struct ioat_descriptor **newring;
1669 struct ioat_softc *ioat;
1673 ioat_log_message(1, "%s\n", __func__);
1675 /* Slowly scale the ring down if idle. */
1676 mtx_lock(&ioat->submit_lock);
1678 /* Don't run while the hardware is being reset. */
1679 if (ioat->resetting) {
1680 mtx_unlock(&ioat->submit_lock);
1684 order = ioat->ring_size_order;
1685 if (ioat->is_resize_pending || order == IOAT_MIN_ORDER) {
1686 mtx_unlock(&ioat->submit_lock);
1689 ioat->is_resize_pending = TRUE;
1690 mtx_unlock(&ioat->submit_lock);
1692 newring = ioat_prealloc_ring(ioat, 1 << (order - 1), FALSE,
1695 mtx_lock(&ioat->submit_lock);
1696 KASSERT(ioat->ring_size_order == order,
1697 ("resize_pending protects order"));
1699 if (newring != NULL)
1700 ring_shrink(ioat, order, newring);
1702 ioat->is_resize_pending = FALSE;
1703 mtx_unlock(&ioat->submit_lock);
1706 if (ioat->ring_size_order > IOAT_MIN_ORDER)
1707 callout_reset(&ioat->poll_timer, IOAT_SHRINK_PERIOD,
1708 ioat_shrink_timer_callback, ioat);
1715 ioat_submit_single(struct ioat_softc *ioat)
1718 ioat_get(ioat, IOAT_ACTIVE_DESCR_REF);
1719 atomic_add_rel_int(&ioat->head, 1);
1720 atomic_add_rel_int(&ioat->hw_head, 1);
1722 if (!ioat->is_completion_pending) {
1723 ioat->is_completion_pending = TRUE;
1724 callout_reset(&ioat->poll_timer, 1, ioat_poll_timer_callback,
1726 callout_stop(&ioat->shrink_timer);
1729 ioat->stats.descriptors_submitted++;
1733 ioat_reset_hw(struct ioat_softc *ioat)
1740 mtx_lock(IOAT_REFLK);
1741 while (ioat->resetting && !ioat->destroying)
1742 msleep(&ioat->resetting, IOAT_REFLK, 0, "IRH_drain", 0);
1743 if (ioat->destroying) {
1744 mtx_unlock(IOAT_REFLK);
1747 ioat->resetting = TRUE;
1749 ioat->quiescing = TRUE;
1750 ioat_drain_locked(ioat);
1751 mtx_unlock(IOAT_REFLK);
1754 * Suspend ioat_process_events while the hardware and softc are in an
1755 * indeterminate state.
1757 mtx_lock(&ioat->cleanup_lock);
1758 ioat->resetting_cleanup = TRUE;
1759 mtx_unlock(&ioat->cleanup_lock);
1761 status = ioat_get_chansts(ioat);
1762 if (is_ioat_active(status) || is_ioat_idle(status))
1765 /* Wait at most 20 ms */
1766 for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) &&
1767 timeout < 20; timeout++) {
1769 status = ioat_get_chansts(ioat);
1771 if (timeout == 20) {
1776 KASSERT(ioat_get_active(ioat) == 0, ("active after quiesce"));
1778 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
1779 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
1782 * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors
1783 * that can cause stability issues for IOAT v3.
1785 pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07,
1787 chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4);
1788 pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4);
1791 * BDXDE and BWD models reset MSI-X registers on device reset.
1792 * Save/restore their contents manually.
1794 if (ioat_model_resets_msix(ioat)) {
1795 ioat_log_message(1, "device resets MSI-X registers; saving\n");
1796 pci_save_state(ioat->device);
1801 /* Wait at most 20 ms */
1802 for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++)
1804 if (timeout == 20) {
1809 if (ioat_model_resets_msix(ioat)) {
1810 ioat_log_message(1, "device resets registers; restored\n");
1811 pci_restore_state(ioat->device);
1814 /* Reset attempts to return the hardware to "halted." */
1815 status = ioat_get_chansts(ioat);
1816 if (is_ioat_active(status) || is_ioat_idle(status)) {
1817 /* So this really shouldn't happen... */
1818 ioat_log_message(0, "Device is active after a reset?\n");
1819 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
1824 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
1826 mtx_lock(&ioat->cleanup_lock);
1827 ioat_halted_debug(ioat, chanerr);
1828 mtx_unlock(&ioat->cleanup_lock);
1834 * Bring device back online after reset. Writing CHAINADDR brings the
1835 * device back to active.
1837 * The internal ring counter resets to zero, so we have to start over
1840 ioat->tail = ioat->head = ioat->hw_head = 0;
1841 ioat->last_seen = 0;
1842 *ioat->comp_update = 0;
1844 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
1845 ioat_write_chancmp(ioat, ioat->comp_update_bus_addr);
1846 ioat_write_chainaddr(ioat, ioat->ring[0]->hw_desc_bus_addr);
1851 * Resume completions now that ring state is consistent.
1852 * ioat_start_channel will add a pending completion and if we are still
1853 * blocking completions, we may livelock.
1855 mtx_lock(&ioat->cleanup_lock);
1856 ioat->resetting_cleanup = FALSE;
1857 mtx_unlock(&ioat->cleanup_lock);
1859 /* Enqueues a null operation and ensures it completes. */
1861 error = ioat_start_channel(ioat);
1863 /* Unblock submission of new work */
1864 mtx_lock(IOAT_REFLK);
1865 ioat->quiescing = FALSE;
1866 wakeup(&ioat->quiescing);
1868 ioat->resetting = FALSE;
1869 wakeup(&ioat->resetting);
1870 mtx_unlock(IOAT_REFLK);
1876 sysctl_handle_chansts(SYSCTL_HANDLER_ARGS)
1878 struct ioat_softc *ioat;
1885 status = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS;
1887 sbuf_new_for_sysctl(&sb, NULL, 256, req);
1889 case IOAT_CHANSTS_ACTIVE:
1890 sbuf_printf(&sb, "ACTIVE");
1892 case IOAT_CHANSTS_IDLE:
1893 sbuf_printf(&sb, "IDLE");
1895 case IOAT_CHANSTS_SUSPENDED:
1896 sbuf_printf(&sb, "SUSPENDED");
1898 case IOAT_CHANSTS_HALTED:
1899 sbuf_printf(&sb, "HALTED");
1901 case IOAT_CHANSTS_ARMED:
1902 sbuf_printf(&sb, "ARMED");
1905 sbuf_printf(&sb, "UNKNOWN");
1908 error = sbuf_finish(&sb);
1911 if (error != 0 || req->newptr == NULL)
1917 sysctl_handle_dpi(SYSCTL_HANDLER_ARGS)
1919 struct ioat_softc *ioat;
1921 #define PRECISION "1"
1922 const uintmax_t factor = 10;
1927 sbuf_new_for_sysctl(&sb, NULL, 16, req);
1929 if (ioat->stats.interrupts == 0) {
1930 sbuf_printf(&sb, "NaN");
1933 rate = ioat->stats.descriptors_processed * factor /
1934 ioat->stats.interrupts;
1935 sbuf_printf(&sb, "%ju.%." PRECISION "ju", rate / factor,
1939 error = sbuf_finish(&sb);
1941 if (error != 0 || req->newptr == NULL)
1947 sysctl_handle_error(SYSCTL_HANDLER_ARGS)
1949 struct ioat_descriptor *desc;
1950 struct ioat_softc *ioat;
1956 error = SYSCTL_OUT(req, &arg, sizeof(arg));
1957 if (error != 0 || req->newptr == NULL)
1960 error = SYSCTL_IN(req, &arg, sizeof(arg));
1965 ioat_acquire(&ioat->dmaengine);
1966 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 1,
1967 0xffff000000000000ull, 0xffff000000000000ull, NULL, NULL,
1972 ioat_submit_single(ioat);
1973 ioat_release(&ioat->dmaengine);
1979 sysctl_handle_reset(SYSCTL_HANDLER_ARGS)
1981 struct ioat_softc *ioat;
1987 error = SYSCTL_OUT(req, &arg, sizeof(arg));
1988 if (error != 0 || req->newptr == NULL)
1991 error = SYSCTL_IN(req, &arg, sizeof(arg));
1996 error = ioat_reset_hw(ioat);
2002 dump_descriptor(void *hw_desc)
2006 for (i = 0; i < 2; i++) {
2007 for (j = 0; j < 8; j++)
2008 printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]);
2014 ioat_setup_sysctl(device_t device)
2016 struct sysctl_oid_list *par, *statpar, *state, *hammer;
2017 struct sysctl_ctx_list *ctx;
2018 struct sysctl_oid *tree, *tmp;
2019 struct ioat_softc *ioat;
2021 ioat = DEVICE2SOFTC(device);
2022 ctx = device_get_sysctl_ctx(device);
2023 tree = device_get_sysctl_tree(device);
2024 par = SYSCTL_CHILDREN(tree);
2026 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "version", CTLFLAG_RD,
2027 &ioat->version, 0, "HW version (0xMM form)");
2028 SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "max_xfer_size", CTLFLAG_RD,
2029 &ioat->max_xfer_size, 0, "HW maximum transfer size");
2030 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "intrdelay_supported", CTLFLAG_RD,
2031 &ioat->intrdelay_supported, 0, "Is INTRDELAY supported");
2032 SYSCTL_ADD_U16(ctx, par, OID_AUTO, "intrdelay_max", CTLFLAG_RD,
2033 &ioat->intrdelay_max, 0,
2034 "Maximum configurable INTRDELAY on this channel (microseconds)");
2036 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "state", CTLFLAG_RD, NULL,
2037 "IOAT channel internal state");
2038 state = SYSCTL_CHILDREN(tmp);
2040 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "ring_size_order", CTLFLAG_RD,
2041 &ioat->ring_size_order, 0, "SW descriptor ring size order");
2042 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "head", CTLFLAG_RD, &ioat->head,
2043 0, "SW descriptor head pointer index");
2044 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail,
2045 0, "SW descriptor tail pointer index");
2046 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "hw_head", CTLFLAG_RD,
2047 &ioat->hw_head, 0, "HW DMACOUNT");
2049 SYSCTL_ADD_UQUAD(ctx, state, OID_AUTO, "last_completion", CTLFLAG_RD,
2050 ioat->comp_update, "HW addr of last completion");
2052 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_resize_pending", CTLFLAG_RD,
2053 &ioat->is_resize_pending, 0, "resize pending");
2054 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_completion_pending",
2055 CTLFLAG_RD, &ioat->is_completion_pending, 0, "completion pending");
2056 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_reset_pending", CTLFLAG_RD,
2057 &ioat->is_reset_pending, 0, "reset pending");
2058 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_channel_running", CTLFLAG_RD,
2059 &ioat->is_channel_running, 0, "channel running");
2061 SYSCTL_ADD_PROC(ctx, state, OID_AUTO, "chansts",
2062 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_chansts, "A",
2063 "String of the channel status");
2065 SYSCTL_ADD_U16(ctx, state, OID_AUTO, "intrdelay", CTLFLAG_RD,
2066 &ioat->cached_intrdelay, 0,
2067 "Current INTRDELAY on this channel (cached, microseconds)");
2069 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "hammer", CTLFLAG_RD, NULL,
2070 "Big hammers (mostly for testing)");
2071 hammer = SYSCTL_CHILDREN(tmp);
2073 SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_reset",
2074 CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I",
2075 "Set to non-zero to reset the hardware");
2076 SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_error",
2077 CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_error, "I",
2078 "Set to non-zero to inject a recoverable hardware error");
2080 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "stats", CTLFLAG_RD, NULL,
2081 "IOAT channel statistics");
2082 statpar = SYSCTL_CHILDREN(tmp);
2084 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "interrupts", CTLFLAG_RW,
2085 &ioat->stats.interrupts,
2086 "Number of interrupts processed on this channel");
2087 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "descriptors", CTLFLAG_RW,
2088 &ioat->stats.descriptors_processed,
2089 "Number of descriptors processed on this channel");
2090 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "submitted", CTLFLAG_RW,
2091 &ioat->stats.descriptors_submitted,
2092 "Number of descriptors submitted to this channel");
2093 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "errored", CTLFLAG_RW,
2094 &ioat->stats.descriptors_error,
2095 "Number of descriptors failed by channel errors");
2096 SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "halts", CTLFLAG_RW,
2097 &ioat->stats.channel_halts, 0,
2098 "Number of times the channel has halted");
2099 SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "last_halt_chanerr", CTLFLAG_RW,
2100 &ioat->stats.last_halt_chanerr, 0,
2101 "The raw CHANERR when the channel was last halted");
2103 SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "desc_per_interrupt",
2104 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_dpi, "A",
2105 "Descriptors per interrupt");
2108 static inline struct ioat_softc *
2109 ioat_get(struct ioat_softc *ioat, enum ioat_ref_kind kind)
2113 KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus"));
2115 old = atomic_fetchadd_32(&ioat->refcnt, 1);
2116 KASSERT(old < UINT32_MAX, ("refcnt overflow"));
2119 old = atomic_fetchadd_32(&ioat->refkinds[kind], 1);
2120 KASSERT(old < UINT32_MAX, ("refcnt kind overflow"));
2127 ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind)
2130 _ioat_putn(ioat, n, kind, FALSE);
2134 ioat_putn_locked(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind)
2137 _ioat_putn(ioat, n, kind, TRUE);
2141 _ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind,
2146 KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus"));
2152 old = atomic_fetchadd_32(&ioat->refkinds[kind], -n);
2153 KASSERT(old >= n, ("refcnt kind underflow"));
2156 /* Skip acquiring the lock if resulting refcnt > 0. */
2161 if (atomic_cmpset_32(&ioat->refcnt, old, old - n))
2166 mtx_assert(IOAT_REFLK, MA_OWNED);
2168 mtx_lock(IOAT_REFLK);
2170 old = atomic_fetchadd_32(&ioat->refcnt, -n);
2171 KASSERT(old >= n, ("refcnt error"));
2176 mtx_unlock(IOAT_REFLK);
2180 ioat_put(struct ioat_softc *ioat, enum ioat_ref_kind kind)
2183 ioat_putn(ioat, 1, kind);
2187 ioat_drain_locked(struct ioat_softc *ioat)
2190 mtx_assert(IOAT_REFLK, MA_OWNED);
2191 while (ioat->refcnt > 0)
2192 msleep(IOAT_REFLK, IOAT_REFLK, 0, "ioat_drain", 0);
2196 #define _db_show_lock(lo) LOCK_CLASS(lo)->lc_ddb_show(lo)
2197 #define db_show_lock(lk) _db_show_lock(&(lk)->lock_object)
2198 DB_SHOW_COMMAND(ioat, db_show_ioat)
2200 struct ioat_softc *sc;
2205 idx = (unsigned)addr;
2206 if (addr >= ioat_channel_index)
2209 sc = ioat_channel[idx];
2210 db_printf("ioat softc at %p\n", sc);
2214 db_printf(" version: %d\n", sc->version);
2215 db_printf(" chan_idx: %u\n", sc->chan_idx);
2216 db_printf(" submit_lock: ");
2217 db_show_lock(&sc->submit_lock);
2219 db_printf(" capabilities: %b\n", (int)sc->capabilities,
2221 db_printf(" cached_intrdelay: %u\n", sc->cached_intrdelay);
2222 db_printf(" *comp_update: 0x%jx\n", (uintmax_t)*sc->comp_update);
2224 db_printf(" poll_timer:\n");
2225 db_printf(" c_time: %ju\n", (uintmax_t)sc->poll_timer.c_time);
2226 db_printf(" c_arg: %p\n", sc->poll_timer.c_arg);
2227 db_printf(" c_func: %p\n", sc->poll_timer.c_func);
2228 db_printf(" c_lock: %p\n", sc->poll_timer.c_lock);
2229 db_printf(" c_flags: 0x%x\n", (unsigned)sc->poll_timer.c_flags);
2231 db_printf(" shrink_timer:\n");
2232 db_printf(" c_time: %ju\n", (uintmax_t)sc->shrink_timer.c_time);
2233 db_printf(" c_arg: %p\n", sc->shrink_timer.c_arg);
2234 db_printf(" c_func: %p\n", sc->shrink_timer.c_func);
2235 db_printf(" c_lock: %p\n", sc->shrink_timer.c_lock);
2236 db_printf(" c_flags: 0x%x\n", (unsigned)sc->shrink_timer.c_flags);
2238 db_printf(" quiescing: %d\n", (int)sc->quiescing);
2239 db_printf(" destroying: %d\n", (int)sc->destroying);
2240 db_printf(" is_resize_pending: %d\n", (int)sc->is_resize_pending);
2241 db_printf(" is_completion_pending: %d\n", (int)sc->is_completion_pending);
2242 db_printf(" is_reset_pending: %d\n", (int)sc->is_reset_pending);
2243 db_printf(" is_channel_running: %d\n", (int)sc->is_channel_running);
2244 db_printf(" intrdelay_supported: %d\n", (int)sc->intrdelay_supported);
2245 db_printf(" resetting: %d\n", (int)sc->resetting);
2247 db_printf(" head: %u\n", sc->head);
2248 db_printf(" tail: %u\n", sc->tail);
2249 db_printf(" hw_head: %u\n", sc->hw_head);
2250 db_printf(" ring_size_order: %u\n", sc->ring_size_order);
2251 db_printf(" last_seen: 0x%lx\n", sc->last_seen);
2252 db_printf(" ring: %p\n", sc->ring);
2254 db_printf(" cleanup_lock: ");
2255 db_show_lock(&sc->cleanup_lock);
2257 db_printf(" refcnt: %u\n", sc->refcnt);
2259 CTASSERT(IOAT_NUM_REF_KINDS == 2);
2260 db_printf(" refkinds: [ENG=%u, DESCR=%u]\n", sc->refkinds[0],
2263 db_printf(" stats:\n");
2264 db_printf(" interrupts: %lu\n", sc->stats.interrupts);
2265 db_printf(" descriptors_processed: %lu\n", sc->stats.descriptors_processed);
2266 db_printf(" descriptors_error: %lu\n", sc->stats.descriptors_error);
2267 db_printf(" descriptors_submitted: %lu\n", sc->stats.descriptors_submitted);
2269 db_printf(" channel_halts: %u\n", sc->stats.channel_halts);
2270 db_printf(" last_halt_chanerr: %u\n", sc->stats.last_halt_chanerr);
2275 db_printf(" hw status:\n");
2276 db_printf(" status: 0x%lx\n", ioat_get_chansts(sc));
2277 db_printf(" chanctrl: 0x%x\n",
2278 (unsigned)ioat_read_2(sc, IOAT_CHANCTRL_OFFSET));
2279 db_printf(" chancmd: 0x%x\n",
2280 (unsigned)ioat_read_1(sc, IOAT_CHANCMD_OFFSET));
2281 db_printf(" dmacount: 0x%x\n",
2282 (unsigned)ioat_read_2(sc, IOAT_DMACOUNT_OFFSET));
2283 db_printf(" chainaddr: 0x%lx\n",
2284 ioat_read_double_4(sc, IOAT_CHAINADDR_OFFSET_LOW));
2285 db_printf(" chancmp: 0x%lx\n",
2286 ioat_read_double_4(sc, IOAT_CHANCMP_OFFSET_LOW));
2287 db_printf(" chanerr: %b\n",
2288 (int)ioat_read_4(sc, IOAT_CHANERR_OFFSET), IOAT_CHANERR_STR);
2291 db_printf("usage: show ioat <0-%u>\n", ioat_channel_index);